mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
16 Commits
(platform)
...
release-0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28fca4ef12 | ||
|
|
7db92db23c | ||
|
|
2c167719b4 | ||
|
|
d61ec3df04 | ||
|
|
9cfc07353f | ||
|
|
bb985806a0 | ||
|
|
2179dbd05c | ||
|
|
7ba27f4857 | ||
|
|
1e47fc55ca | ||
|
|
d481d83661 | ||
|
|
770089244e | ||
|
|
05de907054 | ||
|
|
34fd00e7cd | ||
|
|
5fb9cfe3da | ||
|
|
bb2e948131 | ||
|
|
bd10b18291 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
||||
* @kvaps @lllamnyp @klinch0
|
||||
* @kvaps
|
||||
|
||||
55
.github/workflows/pre-commit.yml
vendored
55
.github/workflows/pre-commit.yml
vendored
@@ -1,55 +0,0 @@
|
||||
name: Pre-Commit Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install pre-commit
|
||||
run: pip install pre-commit
|
||||
|
||||
- name: Install generate
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install curl -y
|
||||
curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -
|
||||
sudo apt install nodejs -y
|
||||
git clone https://github.com/bitnami/readme-generator-for-helm
|
||||
cd ./readme-generator-for-helm
|
||||
npm install
|
||||
npm install -g pkg
|
||||
pkg . -o /usr/local/bin/readme-generator
|
||||
|
||||
- name: Run pre-commit hooks
|
||||
run: |
|
||||
git fetch origin main || git fetch origin master
|
||||
base_commit=$(git rev-parse --verify origin/main || git rev-parse --verify origin/master || echo "")
|
||||
|
||||
if [ -z "$base_commit" ]; then
|
||||
files=$(git ls-files '*.yaml' '*.md')
|
||||
else
|
||||
files=$(git diff --name-only "$base_commit" -- '*.yaml' '*.md')
|
||||
fi
|
||||
|
||||
if [ -n "$files" ]; then
|
||||
echo "$files" | xargs pre-commit run --files
|
||||
else
|
||||
echo "No YAML or Markdown files to lint"
|
||||
fi
|
||||
96
.github/workflows/pull-requests-release.yaml
vendored
96
.github/workflows/pull-requests-release.yaml
vendored
@@ -1,96 +0,0 @@
|
||||
name: Releasing PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
name: Test Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
finalize:
|
||||
name: Finalize Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const match = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
|
||||
if (!match) {
|
||||
core.setFailed(`Branch '${branch}' does not match expected format 'release-X.Y.Z[-suffix]'`);
|
||||
} else {
|
||||
const tag = `v${match[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Extracted tag: ${tag}`);
|
||||
}
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merged commit
|
||||
run: |
|
||||
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }} --force
|
||||
git push origin ${{ steps.get_tag.outputs.tag }} --force
|
||||
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
const release = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!release) {
|
||||
throw new Error(`Draft release with tag ${tag} not found`);
|
||||
}
|
||||
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: release.id,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`✅ Published release for ${tag}`);
|
||||
39
.github/workflows/pull-requests.yaml
vendored
39
.github/workflows/pull-requests.yaml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: make build
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: make test
|
||||
run: |
|
||||
make test
|
||||
195
.github/workflows/tags.yaml
vendored
195
.github/workflows/tags.yaml
vendored
@@ -1,195 +0,0 @@
|
||||
name: Versioned Tag
|
||||
|
||||
on:
|
||||
# Trigger on push if it includes a tag like vX.Y.Z
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
name: Prepare Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
# 1) Check if a non-draft release with this tag already exists
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
|
||||
if (existing) {
|
||||
core.setOutput('skip', 'true');
|
||||
} else {
|
||||
core.setOutput('skip', 'false');
|
||||
}
|
||||
|
||||
# If a published release already exists, skip the rest of the workflow
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
# 2) Determine the base branch from which the tag was pushed
|
||||
- name: Get base branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: get_base
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
/*
|
||||
For a push event with a tag, GitHub sets context.payload.base_ref
|
||||
if the tag was pushed from a branch.
|
||||
If it's empty, we can't determine the correct base branch and must fail.
|
||||
*/
|
||||
const baseRef = context.payload.base_ref;
|
||||
if (!baseRef) {
|
||||
core.setFailed(`❌ base_ref is empty. Make sure you push the tag from a branch (e.g. 'git push origin HEAD:refs/tags/vX.Y.Z').`);
|
||||
return;
|
||||
}
|
||||
|
||||
const shortBranch = baseRef.replace("refs/heads/", "");
|
||||
const releasePattern = /^release-\d+\.\d+$/;
|
||||
if (shortBranch !== "main" && !releasePattern.test(shortBranch)) {
|
||||
core.setFailed(`❌ Tagged commit must belong to branch 'main' or 'release-X.Y'. Got '${shortBranch}'`);
|
||||
return;
|
||||
}
|
||||
|
||||
core.setOutput('branch', shortBranch);
|
||||
|
||||
# 3) Checkout full git history and tags
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
# 4) Login to GitHub Container Registry
|
||||
- name: Login to GitHub Container Registry
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
# 5) Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
|
||||
# 6) Optionally commit built artifacts to the repository
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GIT_AUTHOR_NAME: ${{ github.actor }}
|
||||
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
|
||||
run: |
|
||||
git config user.name "github-actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
|
||||
# 7) Create a release branch like release-X.Y.Z
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH_NAME"
|
||||
git push origin "$BRANCH_NAME" --force
|
||||
|
||||
# 8) Create a pull request from release-X.Y.Z to the original base branch
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
base
|
||||
});
|
||||
|
||||
if (prs.data.length === 0) {
|
||||
const newPr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head,
|
||||
base,
|
||||
title: `Release v${version}`,
|
||||
body:
|
||||
`This PR prepares the release \`v${version}\`.\n` +
|
||||
`(Please merge it before releasing draft)`,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`Created pull request #${newPr.data.number} from ${head} to ${base}`);
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: newPr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
} else {
|
||||
console.log(`Pull request already exists from ${head} to ${base}`);
|
||||
}
|
||||
|
||||
# 9) Create or reuse an existing draft GitHub release for this tag
|
||||
- name: Create or reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: create_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
let release = releases.data.find(r => r.tag_name === tag);
|
||||
if (!release) {
|
||||
release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: `${tag}`,
|
||||
draft: true,
|
||||
prerelease: false
|
||||
});
|
||||
}
|
||||
core.setOutput('upload_url', release.upload_url);
|
||||
|
||||
# 10) Build additional assets for the release (if needed)
|
||||
- name: Build assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make assets
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# 11) Upload assets to the draft release
|
||||
- name: Upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# 12) Run tests
|
||||
- name: Run tests
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make test
|
||||
@@ -1,23 +0,0 @@
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: gen-versions-map
|
||||
name: Generate versions map and check for changes
|
||||
entry: sh -c 'make -C packages/apps check-version-map && make -C packages/extra check-version-map'
|
||||
language: system
|
||||
types: [file]
|
||||
pass_filenames: false
|
||||
description: Run the script and fail if it generates changes
|
||||
- id: run-make-generate
|
||||
name: Run 'make generate' in all app directories
|
||||
entry: |
|
||||
/bin/bash -c '
|
||||
for dir in ./packages/apps/*/; do
|
||||
if [ -d "$dir" ]; then
|
||||
echo "Running make generate in $dir"
|
||||
(cd "$dir" && make generate)
|
||||
fi
|
||||
done
|
||||
'
|
||||
language: script
|
||||
files: ^.*$
|
||||
@@ -13,8 +13,8 @@ but it means a lot to us.
|
||||
|
||||
To add your organization to this list, you can either:
|
||||
|
||||
- [open a pull request](https://github.com/cozystack/cozystack/pulls) to directly update this file, or
|
||||
- [edit this file](https://github.com/cozystack/cozystack/blob/main/ADOPTERS.md) directly in GitHub
|
||||
- [open a pull request](https://github.com/aenix-io/cozystack/pulls) to directly update this file, or
|
||||
- [edit this file](https://github.com/aenix-io/cozystack/blob/main/ADOPTERS.md) directly in GitHub
|
||||
|
||||
Feel free to ask in the Slack chat if you any questions and/or require
|
||||
assistance with updating this list.
|
||||
@@ -28,5 +28,4 @@ This list is sorted in chronological order, based on the submission date.
|
||||
| [Ænix](https://aenix.io/) | @kvaps | 2024-02-14 | Ænix provides consulting services for cloud providers and uses Cozystack as the main tool for organizing managed services for them. |
|
||||
| [Mediatech](https://mediatech.dev/) | @ugenk | 2024-05-01 | We're developing and hosting software for our and our custmer services. We're using cozystack as a kubernetes distribution for that. |
|
||||
| [Bootstack](https://bootstack.app/) | @mrkhachaturov | 2024-08-01| At Bootstack, we utilize a Kubernetes operator specifically designed to simplify and streamline cloud infrastructure creation.|
|
||||
| [gohost](https://gohost.kz/) | @karabass_off | 2024-02-01 | Our company has been working in the market of Kazakhstan for more than 15 years, providing clients with a standard set of services: VPS/VDC, IaaS, shared hosting, etc. Now we are expanding the lineup by introducing Bare Metal Kubenetes cluster under Cozystack management. |
|
||||
| [Urmanac](https://urmanac.com) | @kingdonb | 2024-12-04 | Urmanac is the future home of a hosting platform for the knowledge base of a community of personal server enthusiasts. We use Cozystack to provide support services for web sites hosted using both conventional deployments and on SpinKube, with WASM. |
|
||||
| [gohost](https://gohost.kz/) | @karabass_off | 2024-02-01| Our company has been working in the market of Kazakhstan for more than 15 years, providing clients with a standard set of services: VPS/VDC, IaaS, shared hosting, etc. Now we are expanding the lineup by introducing Bare Metal Kubenetes cluster under Cozystack management.|
|
||||
|
||||
@@ -6,13 +6,13 @@ As you get started, you are in the best position to give us feedbacks on areas o
|
||||
|
||||
* Problems found while setting up the development environment
|
||||
* Gaps in our documentation
|
||||
* Bugs in our GitHub actions
|
||||
* Bugs in our Github actions
|
||||
|
||||
First, though, it is important that you read the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
The guidelines below are a starting point. We don't want to limit your
|
||||
creativity, passion, and initiative. If you think there's a better way, please
|
||||
feel free to bring it up in a GitHub discussion, or open a pull request. We're
|
||||
feel free to bring it up in a Github discussion, or open a pull request. We're
|
||||
certain there are always better ways to do things, we just need to start some
|
||||
constructive dialogue!
|
||||
|
||||
@@ -23,9 +23,9 @@ We welcome many types of contributions including:
|
||||
* New features
|
||||
* Builds, CI/CD
|
||||
* Bug fixes
|
||||
* [Documentation](https://GitHub.com/cozystack/cozystack-website/tree/main)
|
||||
* [Documentation](https://github.com/aenix-io/cozystack-website/tree/main)
|
||||
* Issue Triage
|
||||
* Answering questions on Slack or GitHub Discussions
|
||||
* Answering questions on Slack or Github Discussions
|
||||
* Web design
|
||||
* Communications / Social Media / Blog Posts
|
||||
* Events participation
|
||||
@@ -34,7 +34,7 @@ We welcome many types of contributions including:
|
||||
## Ask for Help
|
||||
|
||||
The best way to reach us with a question when contributing is to drop a line in
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new GitHub discussion.
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new Github discussion.
|
||||
|
||||
## Raising Issues
|
||||
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
# Cozystack Governance
|
||||
|
||||
This document defines the governance structure of the Cozystack community, outlining how members collaborate to achieve shared goals.
|
||||
|
||||
## Overview
|
||||
|
||||
**Cozystack**, a Cloud Native Computing Foundation (CNCF) project, is committed
|
||||
to building an open, inclusive, productive, and self-governing open source
|
||||
community focused on building a high-quality open source PaaS and framework for building clouds.
|
||||
|
||||
## Code Repositories
|
||||
|
||||
The following code repositories are governed by the Cozystack community and
|
||||
maintained under the `cozystack` namespace:
|
||||
|
||||
* **[Cozystack](https://github.com/cozystack/cozystack):** Main Cozystack codebase
|
||||
* **[website](https://github.com/cozystack/website):** Cozystack website and documentation sources
|
||||
* **[Talm](https://github.com/cozystack/talm):** Tool for managing Talos Linux the GitOps way
|
||||
* **[cozy-proxy](https://github.com/cozystack/cozy-proxy):** A simple kube-proxy addon for 1:1 NAT services in Kubernetes with NFT backend
|
||||
* **[cozystack-telemetry-server](https://github.com/cozystack/cozystack-telemetry-server):** Cozystack telemetry
|
||||
* **[talos-bootstrap](https://github.com/cozystack/talos-bootstrap):** An interactive Talos Linux installer
|
||||
* **[talos-meta-tool](https://github.com/cozystack/talos-meta-tool):** Tool for writing network metadata into META partition
|
||||
|
||||
## Community Roles
|
||||
|
||||
* **Users:** Members that engage with the Cozystack community via any medium, including Slack, Telegram, GitHub, and mailing lists.
|
||||
* **Contributors:** Members contributing to the projects by contributing and reviewing code, writing documentation,
|
||||
responding to issues, participating in proposal discussions, and so on.
|
||||
* **Directors:** Non-technical project leaders.
|
||||
* **Maintainers**: Technical project leaders.
|
||||
|
||||
## Contributors
|
||||
|
||||
Cozystack is for everyone. Anyone can become a Cozystack contributor simply by
|
||||
contributing to the project, whether through code, documentation, blog posts,
|
||||
community management, or other means.
|
||||
As with all Cozystack community members, contributors are expected to follow the
|
||||
[Cozystack Code of Conduct](https://github.com/cozystack/cozystack/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
All contributions to Cozystack code, documentation, or other components in the
|
||||
Cozystack GitHub organisation must follow the
|
||||
[contributing guidelines](https://github.com/cozystack/cozystack/blob/main/CONTRIBUTING.md).
|
||||
Whether these contributions are merged into the project is the prerogative of the maintainers.
|
||||
|
||||
## Directors
|
||||
|
||||
Directors are responsible for non-technical leadership functions within the project.
|
||||
This includes representing Cozystack and its maintainers to the community, to the press,
|
||||
and to the outside world; interfacing with CNCF and other governance entities;
|
||||
and participating in project decision-making processes when appropriate.
|
||||
|
||||
Directors are elected by a majority vote of the maintainers.
|
||||
|
||||
## Maintainers
|
||||
|
||||
Maintainers have the right to merge code into the project.
|
||||
Anyone can become a Cozystack maintainer (see "Becoming a maintainer" below).
|
||||
|
||||
### Expectations
|
||||
|
||||
Cozystack maintainers are expected to:
|
||||
|
||||
* Review pull requests, triage issues, and fix bugs in their areas of
|
||||
expertise, ensuring that all changes go through the project's code review
|
||||
and integration processes.
|
||||
* Monitor cncf-cozystack-* emails, the Cozystack Slack channels in Kubernetes
|
||||
and CNCF Slack workspaces, Telegram groups, and help out when possible.
|
||||
* Rapidly respond to any time-sensitive security release processes.
|
||||
* Attend Cozystack community meetings.
|
||||
|
||||
If a maintainer is no longer interested in or cannot perform the duties
|
||||
listed above, they should move themselves to emeritus status.
|
||||
If necessary, this can also occur through the decision-making process outlined below.
|
||||
|
||||
### Becoming a Maintainer
|
||||
|
||||
Anyone can become a Cozystack maintainer. Maintainers should be extremely
|
||||
proficient in cloud native technologies and/or Go; have relevant domain expertise;
|
||||
have the time and ability to meet the maintainer's expectations above;
|
||||
and demonstrate the ability to work with the existing maintainers and project processes.
|
||||
|
||||
To become a maintainer, start by expressing interest to existing maintainers.
|
||||
Existing maintainers will then ask you to demonstrate the qualifications above
|
||||
by contributing PRs, doing code reviews, and other such tasks under their guidance.
|
||||
After several months of working together, maintainers will decide whether to grant maintainer status.
|
||||
|
||||
## Project Decision-making Process
|
||||
|
||||
Ideally, all project decisions are resolved by consensus of maintainers and directors.
|
||||
If this is not possible, a vote will be called.
|
||||
The voting process is a simple majority in which each maintainer and director receives one vote.
|
||||
@@ -1,12 +1,7 @@
|
||||
# The Cozystack Maintainers
|
||||
|
||||
| Maintainer | GitHub Username | Company | Responsibility |
|
||||
| ---------- | --------------- | ------- | --------------------------------- |
|
||||
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix | Core Maintainer |
|
||||
| George Gaál | [@gecube](https://github.com/gecube) | Ænix | DevOps Practices in Platform, Developers Advocate |
|
||||
| Kingdon Barrett | [@kingdonb](https://github.com/kingdonb) | Urmanac | FluxCD and flux-operator |
|
||||
| Timofei Larkin | [@lllamnyp](https://github.com/lllamnyp) | 3commas | Etcd-operator Lead |
|
||||
| Artem Bortnikov | [@aobort](https://github.com/aobort) | Timescale | Etcd-operator Lead |
|
||||
| Andrei Gumilev | [@chumkaska](https://github.com/chumkaska) | Ænix | Platform Documentation |
|
||||
| Timur Tukaev | [@tym83](https://github.com/tym83) | Ænix | Cozystack Website, Marketing, Community Management |
|
||||
| Kirill Klinchenkov | [@klinch0](https://github.com/klinch0) | Ænix | Core Maintainer |
|
||||
| Maintainer | GitHub Username | Company |
|
||||
| ---------- | --------------- | ------- |
|
||||
| Andrei Kvapil | [@kvaps](https://github.com/kvaps) | Ænix |
|
||||
| George Gaál | [@gecube](https://github.com/gecube) | Ænix |
|
||||
| Eduard Generalov | [@egeneralov](https://github.com/egeneralov) | Ænix |
|
||||
|
||||
31
Makefile
31
Makefile
@@ -1,31 +1,23 @@
|
||||
.PHONY: manifests repos assets
|
||||
|
||||
build-deps:
|
||||
@command -V find docker skopeo jq gh helm > /dev/null
|
||||
@yq --version | grep -q "mikefarah" || (echo "mikefarah/yq is required" && exit 1)
|
||||
@tar --version | grep -q GNU || (echo "GNU tar is required" && exit 1)
|
||||
@sed --version | grep -q GNU || (echo "GNU sed is required" && exit 1)
|
||||
@awk --version | grep -q GNU || (echo "GNU awk is required" && exit 1)
|
||||
|
||||
build: build-deps
|
||||
build:
|
||||
make -C packages/apps/http-cache image
|
||||
make -C packages/apps/postgres image
|
||||
make -C packages/apps/mysql image
|
||||
make -C packages/apps/clickhouse image
|
||||
make -C packages/apps/kubernetes image
|
||||
make -C packages/extra/monitoring image
|
||||
make -C packages/system/cozystack-api image
|
||||
make -C packages/system/cozystack-controller image
|
||||
make -C packages/system/cilium image
|
||||
make -C packages/system/kubeovn image
|
||||
make -C packages/system/kubeovn-webhook image
|
||||
make -C packages/system/dashboard image
|
||||
make -C packages/system/kamaji image
|
||||
make -C packages/system/bucket image
|
||||
make -C packages/core/testing image
|
||||
make -C packages/core/installer image
|
||||
make manifests
|
||||
|
||||
manifests:
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > manifests/cozystack-installer.yaml
|
||||
sed -i 's|@sha256:[^"]\+||' manifests/cozystack-installer.yaml
|
||||
|
||||
repos:
|
||||
rm -rf _out
|
||||
make -C packages/apps check-version-map
|
||||
@@ -36,21 +28,10 @@ repos:
|
||||
mkdir -p _out/logos
|
||||
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
|
||||
|
||||
|
||||
manifests:
|
||||
mkdir -p _out/assets
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||
|
||||
assets:
|
||||
make -C packages/core/installer/ assets
|
||||
|
||||
test:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
#make -C packages/core/testing test-applications
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
upload_assets: manifests
|
||||
hack/upload-assets.sh
|
||||
make -C packages/core/testing delete
|
||||
|
||||
53
README.md
53
README.md
@@ -2,31 +2,30 @@
|
||||

|
||||
|
||||
[](https://opensource.org/)
|
||||
[](https://opensource.org/licenses/)
|
||||
[](https://cozystack.io/support/)
|
||||
[](https://github.com/cozystack/cozystack)
|
||||
[](https://github.com/cozystack/cozystack/releases/latest)
|
||||
[](https://github.com/cozystack/cozystack/graphs/contributors)
|
||||
[](https://opensource.org/licenses/)
|
||||
[](https://aenix.io/contact-us/#meet)
|
||||
[](https://aenix.io/cozystack/)
|
||||
[](https://github.com/aenix-io/cozystack)
|
||||
[](https://github.com/aenix-io/cozystack)
|
||||
|
||||
# Cozystack
|
||||
|
||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||
|
||||
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
With Cozystack, you can transform your bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters, Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
|
||||
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||
You can use Cozystack to build your own cloud or to provide a cost-effective development environments.
|
||||
|
||||
## Use-Cases
|
||||
|
||||
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||
You can use Cozystack as a backend for a public cloud
|
||||
* [**Using Cozystack to build public cloud**](https://cozystack.io/docs/use-cases/public-cloud/)
|
||||
You can use Cozystack as backend for a public cloud
|
||||
|
||||
* [**Using Cozystack to build a private cloud**](https://cozystack.io/docs/guides/use-cases/private-cloud/)
|
||||
You can use Cozystack as a platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
* [**Using Cozystack to build private cloud**](https://cozystack.io/docs/use-cases/private-cloud/)
|
||||
You can use Cozystack as platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
|
||||
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
* [**Using Cozystack as Kubernetes distribution**](https://cozystack.io/docs/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as Kubernetes distribution for Bare Metal
|
||||
|
||||
## Screenshot
|
||||
|
||||
@@ -34,32 +33,32 @@ You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is located on the [cozystack.io](https://cozystack.io) website.
|
||||
The documentation is located on official [cozystack.io](https://cozystack.io) website.
|
||||
|
||||
Read the [Getting Started](https://cozystack.io/docs/getting-started/) section for a quick start.
|
||||
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
||||
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/operations/troubleshooting/) and work your way through the process that we've outlined.
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/troubleshooting/), and work your way through the process that we've outlined.
|
||||
|
||||
## Versioning
|
||||
|
||||
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
||||
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/cozystack/cozystack/releases) section.
|
||||
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/aenix-io/cozystack/releases) section.
|
||||
|
||||
- [Roadmap](https://cozystack.io/docs/roadmap/)
|
||||
- [Roadmap](https://github.com/orgs/aenix-io/projects/2)
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are highly appreciated and very welcomed!
|
||||
|
||||
In case of bugs, please check if the issue has already been opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||
If it isn't, you can open a new one. A detailed report will help us replicate it, assess it, and work on a fix.
|
||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/aenix-io/cozystack/issues) section.
|
||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||
|
||||
You can express your intention to on the fix on your own.
|
||||
You can express your intention in working on the fix on your own.
|
||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||
|
||||
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/aenix-io/cozystack/discussions/categories/feature-requests).
|
||||
|
||||
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
|
||||
## License
|
||||
|
||||
@@ -68,4 +67,8 @@ The code is provided as-is with no warranties.
|
||||
|
||||
## Commercial Support
|
||||
|
||||
A list of companies providing commercial support for this project can be found on [official site](https://cozystack.io/support/).
|
||||
[**Ænix**](https://aenix.io) offers enterprise-grade support, available 24/7.
|
||||
|
||||
We provide all types of assistance, including consultations, development of missing features, design, assistance with installation, and integration.
|
||||
|
||||
[Contact us](https://aenix.io/contact/)
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
API rule violation: list_type_missing,github.com/cozystack/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XIntOrString
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XListMapKeys
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XListType
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XMapType
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XPreserveUnknownFields
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XValidations
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrArray,JSONSchemas
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrArray,Schema
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrBool,Allows
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrBool,Schema
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrStringArray,Property
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaPropsOrStringArray,Schema
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,APIResourceList,APIResources
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,Duration,Duration
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,InternalEvent,Object
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,InternalEvent,Type
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,MicroTime,Time
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,StatusCause,Type
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,Time,Time
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentEncoding
|
||||
API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentType
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1alpha1 contains API Schema definitions for the v1alpha1 API group.
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=cozystack.io
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
GroupVersion = schema.GroupVersion{Group: "cozystack.io", Version: "v1alpha1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
@@ -1,70 +0,0 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// WorkloadStatus defines the observed state of Workload
|
||||
type WorkloadStatus struct {
|
||||
// Kind represents the type of workload (redis, postgres, etc.)
|
||||
// +required
|
||||
Kind string `json:"kind"`
|
||||
|
||||
// Type represents the specific role of the workload (redis, sentinel, etc.)
|
||||
// If not specified, defaults to Kind
|
||||
// +optional
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// Resources specifies the compute resources allocated to this workload
|
||||
// +required
|
||||
Resources map[string]resource.Quantity `json:"resources"`
|
||||
|
||||
// Operational indicates if all pods of the workload are ready
|
||||
// +optional
|
||||
Operational bool `json:"operational"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".status.kind"
|
||||
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".status.type"
|
||||
// +kubebuilder:printcolumn:name="CPU",type="string",JSONPath=".status.resources.cpu"
|
||||
// +kubebuilder:printcolumn:name="Memory",type="string",JSONPath=".status.resources.memory"
|
||||
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=`.status.operational`
|
||||
|
||||
// Workload is the Schema for the workloads API
|
||||
type Workload struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Status WorkloadStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// WorkloadList contains a list of Workload
|
||||
type WorkloadList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Workload `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Workload{}, &WorkloadList{})
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorSpec defines the desired state of WorkloadMonitor
|
||||
type WorkloadMonitorSpec struct {
|
||||
// Selector is a label selector to find workloads to monitor
|
||||
// +required
|
||||
Selector map[string]string `json:"selector"`
|
||||
|
||||
// Kind specifies the kind of the workload
|
||||
// +optional
|
||||
Kind string `json:"kind,omitempty"`
|
||||
|
||||
// Type specifies the type of the workload
|
||||
// +optional
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// Version specifies the version of the workload
|
||||
// +optional
|
||||
Version string `json:"version,omitempty"`
|
||||
|
||||
// MinReplicas specifies the minimum number of replicas that should be available
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +optional
|
||||
MinReplicas *int32 `json:"minReplicas,omitempty"`
|
||||
|
||||
// Replicas is the desired number of replicas
|
||||
// If not specified, will use observedReplicas as the target
|
||||
// +kubebuilder:validation:Minimum=0
|
||||
// +optional
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
}
|
||||
|
||||
// WorkloadMonitorStatus defines the observed state of WorkloadMonitor
|
||||
type WorkloadMonitorStatus struct {
|
||||
// Operational indicates if the workload meets all operational requirements
|
||||
// +optional
|
||||
Operational *bool `json:"operational,omitempty"`
|
||||
|
||||
// AvailableReplicas is the number of ready replicas
|
||||
// +optional
|
||||
AvailableReplicas int32 `json:"availableReplicas"`
|
||||
|
||||
// ObservedReplicas is the total number of pods observed
|
||||
// +optional
|
||||
ObservedReplicas int32 `json:"observedReplicas"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="Kind",type="string",JSONPath=".spec.kind"
|
||||
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type"
|
||||
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version"
|
||||
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas"
|
||||
// +kubebuilder:printcolumn:name="MinReplicas",type="integer",JSONPath=".spec.minReplicas"
|
||||
// +kubebuilder:printcolumn:name="Available",type="integer",JSONPath=".status.availableReplicas"
|
||||
// +kubebuilder:printcolumn:name="Observed",type="integer",JSONPath=".status.observedReplicas"
|
||||
// +kubebuilder:printcolumn:name="Operational",type="boolean",JSONPath=".status.operational"
|
||||
|
||||
// WorkloadMonitor is the Schema for the workloadmonitors API
|
||||
type WorkloadMonitor struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec WorkloadMonitorSpec `json:"spec,omitempty"`
|
||||
Status WorkloadMonitorStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// WorkloadMonitorList contains a list of WorkloadMonitor
|
||||
type WorkloadMonitorList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []WorkloadMonitor `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&WorkloadMonitor{}, &WorkloadMonitorList{})
|
||||
}
|
||||
|
||||
// GetSelector returns the label selector from metadata
|
||||
func (w *WorkloadMonitor) GetSelector() map[string]string {
|
||||
return w.Spec.Selector
|
||||
}
|
||||
|
||||
// Selector specifies the label selector for workloads
|
||||
type Selector map[string]string
|
||||
@@ -1,238 +0,0 @@
|
||||
//go:build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in Selector) DeepCopyInto(out *Selector) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(Selector, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector.
|
||||
func (in Selector) DeepCopy() Selector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Selector)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Workload) DeepCopyInto(out *Workload) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload.
|
||||
func (in *Workload) DeepCopy() *Workload {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Workload)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Workload) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadList) DeepCopyInto(out *WorkloadList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Workload, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadList.
|
||||
func (in *WorkloadList) DeepCopy() *WorkloadList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *WorkloadList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadMonitor) DeepCopyInto(out *WorkloadMonitor) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitor.
|
||||
func (in *WorkloadMonitor) DeepCopy() *WorkloadMonitor {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadMonitor)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *WorkloadMonitor) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadMonitorList) DeepCopyInto(out *WorkloadMonitorList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]WorkloadMonitor, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorList.
|
||||
func (in *WorkloadMonitorList) DeepCopy() *WorkloadMonitorList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadMonitorList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *WorkloadMonitorList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadMonitorSpec) DeepCopyInto(out *WorkloadMonitorSpec) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.MinReplicas != nil {
|
||||
in, out := &in.MinReplicas, &out.MinReplicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorSpec.
|
||||
func (in *WorkloadMonitorSpec) DeepCopy() *WorkloadMonitorSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadMonitorSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadMonitorStatus) DeepCopyInto(out *WorkloadMonitorStatus) {
|
||||
*out = *in
|
||||
if in.Operational != nil {
|
||||
in, out := &in.Operational, &out.Operational
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadMonitorStatus.
|
||||
func (in *WorkloadMonitorStatus) DeepCopy() *WorkloadMonitorStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadMonitorStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WorkloadStatus) DeepCopyInto(out *WorkloadStatus) {
|
||||
*out = *in
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make(map[string]resource.Quantity, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadStatus.
|
||||
func (in *WorkloadStatus) DeepCopy() *WorkloadStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WorkloadStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright 2024 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/cozystack/cozystack/pkg/cmd/server"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/component-base/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := genericapiserver.SetupSignalContext()
|
||||
options := server.NewAppsServerOptions(os.Stdout, os.Stderr)
|
||||
cmd := server.NewCommandStartAppsServer(ctx, options)
|
||||
code := cli.Run(cmd)
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func main() {
|
||||
addr := flag.String("address", ":8123", "Address to listen on")
|
||||
dir := flag.String("dir", "/cozystack/assets", "Directory to serve files from")
|
||||
flag.Parse()
|
||||
|
||||
absDir, err := filepath.Abs(*dir)
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting absolute path for %s: %v", *dir, err)
|
||||
}
|
||||
|
||||
fs := http.FileServer(http.Dir(absDir))
|
||||
http.Handle("/", fs)
|
||||
|
||||
log.Printf("Server starting on %s, serving directory %s", *addr, absDir)
|
||||
|
||||
err = http.ListenAndServe(*addr, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Server failed to start: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1,219 +0,0 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
|
||||
// to ensure that exec-entrypoint and run can make use of them.
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
"github.com/cozystack/cozystack/internal/controller"
|
||||
"github.com/cozystack/cozystack/internal/telemetry"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
setupLog = ctrl.Log.WithName("setup")
|
||||
)
|
||||
|
||||
func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
func main() {
|
||||
var metricsAddr string
|
||||
var enableLeaderElection bool
|
||||
var probeAddr string
|
||||
var secureMetrics bool
|
||||
var enableHTTP2 bool
|
||||
var disableTelemetry bool
|
||||
var telemetryEndpoint string
|
||||
var telemetryInterval string
|
||||
var cozystackVersion string
|
||||
var tlsOpts []func(*tls.Config)
|
||||
flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
|
||||
"Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
|
||||
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
|
||||
"Enable leader election for controller manager. "+
|
||||
"Enabling this will ensure there is only one active controller manager.")
|
||||
flag.BoolVar(&secureMetrics, "metrics-secure", true,
|
||||
"If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
|
||||
flag.BoolVar(&enableHTTP2, "enable-http2", false,
|
||||
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
|
||||
flag.BoolVar(&disableTelemetry, "disable-telemetry", false,
|
||||
"Disable telemetry collection")
|
||||
flag.StringVar(&telemetryEndpoint, "telemetry-endpoint", "https://telemetry.cozystack.io",
|
||||
"Endpoint for sending telemetry data")
|
||||
flag.StringVar(&telemetryInterval, "telemetry-interval", "15m",
|
||||
"Interval between telemetry data collection (e.g. 15m, 1h)")
|
||||
flag.StringVar(&cozystackVersion, "cozystack-version", "unknown",
|
||||
"Version of Cozystack")
|
||||
opts := zap.Options{
|
||||
Development: false,
|
||||
}
|
||||
opts.BindFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
|
||||
// Parse telemetry interval
|
||||
interval, err := time.ParseDuration(telemetryInterval)
|
||||
if err != nil {
|
||||
setupLog.Error(err, "invalid telemetry interval")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Configure telemetry
|
||||
telemetryConfig := telemetry.Config{
|
||||
Disabled: disableTelemetry,
|
||||
Endpoint: telemetryEndpoint,
|
||||
Interval: interval,
|
||||
CozystackVersion: cozystackVersion,
|
||||
}
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
|
||||
// if the enable-http2 flag is false (the default), http/2 should be disabled
|
||||
// due to its vulnerabilities. More specifically, disabling http/2 will
|
||||
// prevent from being vulnerable to the HTTP/2 Stream Cancellation and
|
||||
// Rapid Reset CVEs. For more information see:
|
||||
// - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
|
||||
// - https://github.com/advisories/GHSA-4374-p667-p6c8
|
||||
disableHTTP2 := func(c *tls.Config) {
|
||||
setupLog.Info("disabling http/2")
|
||||
c.NextProtos = []string{"http/1.1"}
|
||||
}
|
||||
|
||||
if !enableHTTP2 {
|
||||
tlsOpts = append(tlsOpts, disableHTTP2)
|
||||
}
|
||||
|
||||
webhookServer := webhook.NewServer(webhook.Options{
|
||||
TLSOpts: tlsOpts,
|
||||
})
|
||||
|
||||
// Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
|
||||
// More info:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/server
|
||||
// - https://book.kubebuilder.io/reference/metrics.html
|
||||
metricsServerOptions := metricsserver.Options{
|
||||
BindAddress: metricsAddr,
|
||||
SecureServing: secureMetrics,
|
||||
TLSOpts: tlsOpts,
|
||||
}
|
||||
|
||||
if secureMetrics {
|
||||
// FilterProvider is used to protect the metrics endpoint with authn/authz.
|
||||
// These configurations ensure that only authorized users and service accounts
|
||||
// can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
|
||||
// https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/metrics/filters#WithAuthenticationAndAuthorization
|
||||
metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
|
||||
|
||||
// TODO(user): If CertDir, CertName, and KeyName are not specified, controller-runtime will automatically
|
||||
// generate self-signed certificates for the metrics server. While convenient for development and testing,
|
||||
// this setup is not recommended for production.
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsServerOptions,
|
||||
WebhookServer: webhookServer,
|
||||
HealthProbeBindAddress: probeAddr,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: "19a0338c.cozystack.io",
|
||||
// LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
|
||||
// when the Manager ends. This requires the binary to immediately end when the
|
||||
// Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
|
||||
// speeds up voluntary leader transitions as the new leader don't have to wait
|
||||
// LeaseDuration time first.
|
||||
//
|
||||
// In the default scaffold provided, the program ends immediately after
|
||||
// the manager stops, so would be fine to enable this option. However,
|
||||
// if you are doing or is intended to do any operation such as perform cleanups
|
||||
// after the manager stops then its usage might be unsafe.
|
||||
// LeaderElectionReleaseOnCancel: true,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.WorkloadMonitorReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.WorkloadReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up health check")
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up ready check")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Initialize telemetry collector
|
||||
collector, err := telemetry.NewCollector(mgr.GetClient(), &telemetryConfig, mgr.GetConfig())
|
||||
if err != nil {
|
||||
setupLog.V(1).Error(err, "unable to create telemetry collector, telemetry will be disabled")
|
||||
}
|
||||
|
||||
if collector != nil {
|
||||
if err := mgr.Add(collector); err != nil {
|
||||
setupLog.Error(err, "unable to set up telemetry collector")
|
||||
setupLog.V(1).Error(err, "unable to set up telemetry collector, continuing without telemetry")
|
||||
}
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1602
dashboards/control-plane/kube-etcd3.json
Normal file
1602
dashboards/control-plane/kube-etcd3.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -450,7 +450,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -520,7 +520,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -590,7 +590,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -660,7 +660,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
@@ -1128,7 +1128,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1143,7 +1143,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -1527,7 +1527,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1542,7 +1542,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -1909,7 +1909,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2037,7 +2037,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2160,7 +2160,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2288,7 +2288,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
|
||||
@@ -684,7 +684,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -710,7 +710,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -723,7 +723,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -736,7 +736,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -762,7 +762,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -786,7 +786,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -798,7 +798,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -810,7 +810,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -848,7 +848,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -860,7 +860,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1315,7 +1315,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1488,7 +1488,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -1502,7 +1502,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -1642,7 +1642,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -1779,7 +1779,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2095,7 +2095,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
@@ -2109,7 +2109,7 @@
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2295,7 +2295,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -2306,7 +2306,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2468,7 +2468,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -2653,7 +2653,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
@@ -2666,7 +2666,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
@@ -2679,7 +2679,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
@@ -2692,7 +2692,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
@@ -2705,7 +2705,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2837,7 +2837,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -2974,7 +2974,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -3290,56 +3290,56 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"POD\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3834,7 +3834,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -3972,7 +3972,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
|
||||
@@ -656,7 +656,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -680,7 +680,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -692,7 +692,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -704,7 +704,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -728,7 +728,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -740,7 +740,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -752,7 +752,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -764,7 +764,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -776,7 +776,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -814,7 +814,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -826,7 +826,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -877,7 +877,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1475,7 +1475,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -1646,7 +1646,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -1657,7 +1657,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1798,7 +1798,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -1939,7 +1939,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2257,28 +2257,28 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2458,7 +2458,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2470,7 +2470,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2622,7 +2622,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -2799,14 +2799,14 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2814,7 +2814,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2822,14 +2822,14 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2955,7 +2955,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -3091,7 +3091,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -3408,14 +3408,14 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3423,7 +3423,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3431,35 +3431,35 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3910,7 +3910,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -4049,7 +4049,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
|
||||
@@ -869,7 +869,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -878,7 +878,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -895,7 +895,7 @@
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -903,7 +903,7 @@
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -919,7 +919,7 @@
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -935,7 +935,7 @@
|
||||
"refId": "I"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -943,7 +943,7 @@
|
||||
"refId": "J"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -968,7 +968,7 @@
|
||||
"refId": "M"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -977,7 +977,7 @@
|
||||
"refId": "N"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -1449,7 +1449,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -1616,7 +1616,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -1627,7 +1627,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1764,7 +1764,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -1901,7 +1901,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -2210,7 +2210,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2218,21 +2218,21 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2407,7 +2407,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2419,7 +2419,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2572,7 +2572,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -2754,14 +2754,14 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2769,7 +2769,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2777,14 +2777,14 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2910,7 +2910,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -3046,7 +3046,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -3370,14 +3370,14 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3385,7 +3385,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3393,35 +3393,35 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3873,7 +3873,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -4008,7 +4008,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
|
||||
@@ -686,7 +686,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -759,7 +759,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -847,7 +847,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -860,7 +860,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -899,7 +899,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1503,7 +1503,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1669,7 +1669,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1681,7 +1681,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1820,7 +1820,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2269,7 +2269,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
@@ -2476,7 +2476,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2488,7 +2488,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2639,7 +2639,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2816,7 +2816,7 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2824,28 +2824,28 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2974,7 +2974,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3110,7 +3110,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3431,7 +3431,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3439,7 +3439,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3447,28 +3447,28 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
@@ -3482,7 +3482,7 @@
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3930,7 +3930,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ container }}",
|
||||
@@ -4068,7 +4068,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ container }}",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
139
docs/release.md
139
docs/release.md
@@ -1,139 +0,0 @@
|
||||
# Release Workflow
|
||||
|
||||
This section explains how Cozystack builds and releases are made.
|
||||
|
||||
## Regular Releases
|
||||
|
||||
When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
|
||||
In this explanation, we'll use version `v0.42.0` as an example:
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
A regular release sequence starts in the following way:
|
||||
|
||||
1. Maintainer tags a commit in `main` with `v0.42.0` and pushes it to GitHub.
|
||||
2. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.0`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.0`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.0` with updated digests, pushes it to the new branch `release-0.42.0`, and opens a PR to `main`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.0` and uploads them to the release draft page.
|
||||
3. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Pull Request"
|
||||
```
|
||||
|
||||
When testing and editing are completed, the sequence goes on.
|
||||
|
||||
4. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.0`.
|
||||
5. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.0` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
6. The maintainer can now announce the release to the community.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Release v0.42.0" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
## Patch Releases
|
||||
|
||||
Making a patch release has a lot in common with a regular release, with a couple of differences:
|
||||
|
||||
* A release branch is used instead of `main`
|
||||
* Patch commits are cherry-picked to the release branch.
|
||||
* A pull request is opened against the release branch.
|
||||
|
||||
|
||||
Let's assume that we've released `v0.42.0` and that development is ongoing.
|
||||
We have introduced a couple of new features and some fixes to features that we have released
|
||||
in `v0.42.0`.
|
||||
|
||||
Once problems were found and fixed, a patch release is due.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
```
|
||||
|
||||
|
||||
1. The maintainer creates a release branch, `release-0.42,` and cherry-picks patch commits from `main` to `release-0.42`.
|
||||
These must be only patches to features that were present in version `v0.42.0`.
|
||||
|
||||
Cherry-picking can be done as soon as each patch is merged into `main`,
|
||||
or directly before the release.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2"
|
||||
```
|
||||
|
||||
When all relevant patch commits are cherry-picked, the branch is ready for release.
|
||||
|
||||
2. The maintainer tags the `HEAD` commit of branch `release-0.42` as `v0.42.1` and then pushes it to GitHub.
|
||||
3. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.1`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.1`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.1` with updated digests, pushes it to the new branch `release-0.42.1`, and opens a PR to `release-0.42`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.1` and uploads them to the release draft page.
|
||||
4. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2" tag: "v0.42.1"
|
||||
branch release-0.42.1
|
||||
commit id: "Prepare release v0.42.1"
|
||||
checkout release-0.42
|
||||
merge release-0.42.1 id: "Pull request"
|
||||
```
|
||||
|
||||
Finally, when release is confirmed, the release sequence goes on.
|
||||
|
||||
5. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.1`.
|
||||
6. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.1` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
7. The maintainer can now announce the release to the community.
|
||||
117
go.mod
117
go.mod
@@ -1,117 +0,0 @@
|
||||
// This is a generated file. Do not edit directly.
|
||||
|
||||
module github.com/cozystack/cozystack
|
||||
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/fluxcd/helm-controller/api v1.1.0
|
||||
github.com/google/gofuzz v1.2.0
|
||||
github.com/onsi/ginkgo/v2 v2.19.0
|
||||
github.com/onsi/gomega v1.33.1
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.31.2
|
||||
k8s.io/apiextensions-apiserver v0.31.2
|
||||
k8s.io/apimachinery v0.31.2
|
||||
k8s.io/apiserver v0.31.2
|
||||
k8s.io/client-go v0.31.2
|
||||
k8s.io/component-base v0.31.2
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
|
||||
sigs.k8s.io/controller-runtime v0.19.0
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
||||
github.com/fluxcd/pkg/apis/meta v1.6.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/cel-go v0.21.0 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.28.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/term v0.25.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
|
||||
google.golang.org/grpc v1.65.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/kms v0.31.2 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
313
go.sum
313
go.sum
@@ -1,313 +0,0 @@
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fluxcd/helm-controller/api v1.1.0 h1:NS5Wm3U6Kv4w7Cw2sDOV++vf2ecGfFV00x1+2Y3QcOY=
|
||||
github.com/fluxcd/helm-controller/api v1.1.0/go.mod h1:BgHMgMY6CWynzl4KIbHpd6Wpn3FN9BqgkwmvoKCp6iE=
|
||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 h1:22FJc69Mq4i8aCxnKPlddHhSMyI4UPkQkqiAdWFcqe0=
|
||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1/go.mod h1:5dvQ4IZwz0hMGmuj8tTWGtarsuxW0rWsxJOwC6i+0V8=
|
||||
github.com/fluxcd/pkg/apis/meta v1.6.1 h1:maLhcRJ3P/70ArLCY/LF/YovkxXbX+6sTWZwZQBeNq0=
|
||||
github.com/fluxcd/pkg/apis/meta v1.6.1/go.mod h1:YndB/gxgGZmKfqpAfFxyCDNFJFP0ikpeJzs66jwq280=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.21.0 h1:cl6uW/gxN+Hy50tNYvI691+sXxioCnstFzLp2WO4GCI=
|
||||
github.com/google/cel-go v0.21.0/go.mod h1:rHUlWCcBKgyEk+eV03RPdZUekPp6YcJwV0FxuUksYxc=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k=
|
||||
github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
|
||||
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0=
|
||||
go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.16/go.mod h1:V8acl8pcEK0Y2g19YlOV9m9ssUe6MgiDSobSoaBAM0E=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13 h1:RWfV1SX5jTU0lbCvpVQe3iPQeAHETWdOTb6pxhd77C8=
|
||||
go.etcd.io/etcd/client/v2 v2.305.13/go.mod h1:iQnL7fepbiomdXMb3om1rHq96htNNGv2sJkEcZGDRRg=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16 h1:sSmVYOAHeC9doqi0gv7v86oY/BTld0SEFGaxsU9eRhE=
|
||||
go.etcd.io/etcd/client/v3 v3.5.16/go.mod h1:X+rExSGkyqxvu276cr2OwPLBaeqFu1cIl4vmRjAD/50=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13 h1:st9bDWNsKkBNpP4PR1MvM/9NqUPfvYZx/YXegsYEH8M=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.13/go.mod h1:N+4PLrp7agI/Viy+dUYpX7iRtSPvKq+w8Y14d1vX+m0=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13 h1:7r/NKAOups1YnKcfro2RvGGo2PTuizF/xh26Z2CTAzA=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.13/go.mod h1:uUFibGLn2Ksm2URMxN1fICGhk8Wu96EfDQyuLhAcAmw=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13 h1:V6KG+yMfMSqWt+lGnhFpP5z5dRUj1BDRJ5k1fQ9DFok=
|
||||
go.etcd.io/etcd/server/v3 v3.5.13/go.mod h1:K/8nbsGupHqmr5MkgaZpLlH1QdX1pcNQLAkODy44XcQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
|
||||
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
|
||||
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0=
|
||||
k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk=
|
||||
k8s.io/apiextensions-apiserver v0.31.2 h1:W8EwUb8+WXBLu56ser5IudT2cOho0gAKeTOnywBLxd0=
|
||||
k8s.io/apiextensions-apiserver v0.31.2/go.mod h1:i+Geh+nGCJEGiCGR3MlBDkS7koHIIKWVfWeRFiOsUcM=
|
||||
k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=
|
||||
k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4=
|
||||
k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE=
|
||||
k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc=
|
||||
k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs=
|
||||
k8s.io/component-base v0.31.2 h1:Z1J1LIaC0AV+nzcPRFqfK09af6bZ4D1nAOpWsy9owlA=
|
||||
k8s.io/component-base v0.31.2/go.mod h1:9PeyyFN/drHjtJZMCTkSpQJS3U9OXORnHQqMLDz0sUQ=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.31.2 h1:pyx7l2qVOkClzFMIWMVF/FxsSkgd+OIGH7DecpbscJI=
|
||||
k8s.io/kms v0.31.2/go.mod h1:OZKwl1fan3n3N5FFxnW5C4V3ygrah/3YXeJWS3O6+94=
|
||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2 h1:GKE9U8BH16uynoxQii0auTjmmmuZ3O0LFMN6S0lPPhI=
|
||||
k8s.io/kube-openapi v0.0.0-20240827152857-f7e401e7b4c2/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
|
||||
sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q=
|
||||
sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
@@ -1,16 +0,0 @@
|
||||
/*
|
||||
Copyright 2025 The Cozystack Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
@@ -21,7 +21,7 @@ fix_d8() {
|
||||
}
|
||||
|
||||
swap_pvc_overview() {
|
||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||
}
|
||||
|
||||
deprectaed_remove_faq() {
|
||||
@@ -68,7 +68,7 @@ modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/namespace/
|
||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhost_detail.json
|
||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhosts.json
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/control-plane-status.json
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd.json #TODO
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd3.json #TODO
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/deprecated-resources.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/ntp.json #TODO
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/nodes.json
|
||||
@@ -78,10 +78,6 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/pod.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespaces.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespace.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/capacity-planning/capacity-planning.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-control-plane.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kafka/strimzi-kafka.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//goldpinger/goldpinger.json
|
||||
EOT
|
||||
|
||||
|
||||
@@ -113,3 +109,4 @@ done <<\EOT
|
||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
|
||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
|
||||
EOT
|
||||
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
RESET='\033[0m'
|
||||
YELLOW='\033[0;33m'
|
||||
|
||||
|
||||
ROOT_NS="tenant-root"
|
||||
TEST_TENANT="tenant-e2e"
|
||||
|
||||
values_base_path="/hack/testdata/"
|
||||
checks_base_path="/hack/testdata/"
|
||||
|
||||
function delete_hr() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$release_name" == "tenant-e2e" ]]; then
|
||||
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
kubectl delete helmrelease $release_name -n $namespace
|
||||
}
|
||||
|
||||
function install_helmrelease() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local chart_path="$3"
|
||||
local repo_name="$4"
|
||||
local repo_ns="$5"
|
||||
local values_file="$6"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$chart_path" ]]; then
|
||||
echo -e "${RED}Error: Chart path name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "$values_file" && -f "$values_file" ]]; then
|
||||
local values_section
|
||||
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
|
||||
fi
|
||||
|
||||
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
|
||||
{
|
||||
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
|
||||
echo "kind: HelmRelease"
|
||||
echo "metadata:"
|
||||
echo " labels:"
|
||||
echo " cozystack.io/ui: \"true\""
|
||||
echo " name: \"$release_name\""
|
||||
echo " namespace: \"$namespace\""
|
||||
echo "spec:"
|
||||
echo " chart:"
|
||||
echo " spec:"
|
||||
echo " chart: \"$chart_path\""
|
||||
echo " reconcileStrategy: Revision"
|
||||
echo " sourceRef:"
|
||||
echo " kind: HelmRepository"
|
||||
echo " name: \"$repo_name\""
|
||||
echo " namespace: \"$repo_ns\""
|
||||
echo " version: '*'"
|
||||
echo " interval: 1m0s"
|
||||
echo " timeout: 5m0s"
|
||||
[[ -n "$values_section" ]] && echo "$values_section"
|
||||
} > "$helmrelease_file"
|
||||
|
||||
kubectl apply -f "$helmrelease_file"
|
||||
|
||||
rm -f "$helmrelease_file"
|
||||
}
|
||||
|
||||
function install_tenant (){
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local values_file="${values_base_path}tenant/values.yaml"
|
||||
local repo_name="cozystack-apps"
|
||||
local repo_ns="cozy-public"
|
||||
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
|
||||
}
|
||||
|
||||
function make_extra_checks(){
|
||||
local checks_file="$1"
|
||||
echo "after exec make $checks_file"
|
||||
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
|
||||
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
function check_helmrelease_status() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local checks_file="$3"
|
||||
local timeout=300 # Timeout in seconds
|
||||
local interval=5 # Interval between checks in seconds
|
||||
local elapsed=0
|
||||
|
||||
|
||||
while [[ $elapsed -lt $timeout ]]; do
|
||||
local status_output
|
||||
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
|
||||
|
||||
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
|
||||
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
|
||||
make_extra_checks "$checks_file"
|
||||
delete_hr $release_name $namespace
|
||||
return 0
|
||||
elif [[ "$status_output" == "InstallFailed" ]]; then
|
||||
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
|
||||
exit 1
|
||||
else
|
||||
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
|
||||
fi
|
||||
|
||||
sleep "$interval"
|
||||
elapsed=$((elapsed + interval))
|
||||
done
|
||||
|
||||
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
chart_name="$1"
|
||||
|
||||
if [ -z "$chart_name" ]; then
|
||||
echo -e "${RED}No chart name provided. Exiting...${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
checks_file="${checks_base_path}${chart_name}/check.sh"
|
||||
repo_name="cozystack-apps"
|
||||
repo_ns="cozy-public"
|
||||
release_name="$chart_name-e2e"
|
||||
values_file="${values_base_path}${chart_name}/values.yaml"
|
||||
|
||||
install_tenant $TEST_TENANT $ROOT_NS
|
||||
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
|
||||
|
||||
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
|
||||
|
||||
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
|
||||
check_helmrelease_status $release_name $TEST_TENANT $checks_file
|
||||
61
hack/e2e.sh
61
hack/e2e.sh
@@ -60,7 +60,7 @@ done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||
wget https://github.com/aenix-io/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
@@ -84,7 +84,7 @@ done
|
||||
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
@@ -113,11 +113,8 @@ machine:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
install:
|
||||
image: ghcr.io/aenix-io/cozystack/talos:v1.8.0
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
@@ -127,12 +124,6 @@ machine:
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
@@ -145,9 +136,6 @@ EOT
|
||||
|
||||
cat > patch-controlplane.yaml <<\EOT
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
@@ -191,11 +179,10 @@ talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
# Bootstrap
|
||||
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11
|
||||
|
||||
# Wait for etcd
|
||||
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
||||
timeout 180 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
||||
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
@@ -203,7 +190,7 @@ export KUBECONFIG=$PWD/kubeconfig
|
||||
|
||||
# Wait for kubernetes nodes appear
|
||||
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
||||
kubectl create ns cozy-system -o yaml | kubectl apply -f -
|
||||
kubectl create ns cozy-system
|
||||
kubectl create -f - <<\EOT
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
@@ -216,8 +203,6 @@ data:
|
||||
ipv4-pod-gateway: "10.244.0.1"
|
||||
ipv4-svc-cidr: "10.96.0.0/16"
|
||||
ipv4-join-cidr: "100.64.0.0/16"
|
||||
root-host: example.org
|
||||
api-server-endpoint: https://192.168.123.10:6443
|
||||
EOT
|
||||
|
||||
#
|
||||
@@ -234,7 +219,6 @@ sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
@@ -303,31 +287,23 @@ spec:
|
||||
avoidBuggyIPs: false
|
||||
EOT
|
||||
|
||||
# Wait for cozystack-api
|
||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
||||
|
||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
||||
kubectl patch -n tenant-root hr/tenant-root --type=merge -p '{"spec":{ "values":{
|
||||
"host": "example.org",
|
||||
"ingress": true,
|
||||
"monitoring": true,
|
||||
"etcd": true,
|
||||
"isolated": true
|
||||
}}'
|
||||
}}}'
|
||||
|
||||
# Wait for HelmRelease be created
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
||||
|
||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||
fi
|
||||
|
||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||
kubectl patch -n tenant-root hr/ingress --type=merge -p '{"spec":{ "values":{
|
||||
"dashboard": true
|
||||
}}'
|
||||
}}}'
|
||||
|
||||
# Wait for nginx-ingress-controller
|
||||
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
||||
@@ -337,8 +313,8 @@ kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-i
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-longterm vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
@@ -350,12 +326,3 @@ ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.statu
|
||||
|
||||
# Check Grafana
|
||||
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
||||
|
||||
|
||||
# Test OIDC
|
||||
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
file=versions_map
|
||||
|
||||
charts=$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")')
|
||||
|
||||
# <chart> <version> <commit>
|
||||
new_map=$(
|
||||
for chart in $charts; do
|
||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' "$chart/Chart.yaml"
|
||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' $chart/Chart.yaml
|
||||
done
|
||||
)
|
||||
|
||||
@@ -16,46 +15,47 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||
|
||||
# search accross all tags sorted by version
|
||||
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { new_map[$1 " " $2] = $3; next } { if (!($1 " " $2 in new_map)) print $1, $2, $3}' - $file)
|
||||
|
||||
resolved_miss_map=$(
|
||||
echo "$miss_map" | while read -r chart version commit; do
|
||||
# if version is found in HEAD, it's HEAD
|
||||
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||
echo "$chart $version HEAD"
|
||||
continue
|
||||
fi
|
||||
|
||||
# if commit is not HEAD, check if it's valid
|
||||
if [ "$commit" != "HEAD" ]; then
|
||||
if [ "$(git show "${commit}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" != "${version}" ]; then
|
||||
echo "Commit $commit for $chart $version is not valid" >&2
|
||||
exit 1
|
||||
echo "$miss_map" | while read chart version commit; do
|
||||
if [ "$commit" = HEAD ]; then
|
||||
line=$(awk '/^version:/ {print NR; exit}' "./$chart/Chart.yaml")
|
||||
change_commit=$(git --no-pager blame -L"$line",+1 -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
|
||||
if [ "$change_commit" = "00000000" ]; then
|
||||
# Not committed yet, use previous commit
|
||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
||||
commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
if [ $(echo $commit | cut -c1) = "^" ]; then
|
||||
# Previous commit not exists
|
||||
commit=$(echo $commit | cut -c2-)
|
||||
fi
|
||||
else
|
||||
# Committed, but version_map wasn't updated
|
||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
||||
change_commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
if [ $(echo $change_commit | cut -c1) = "^" ]; then
|
||||
# Previous commit not exists
|
||||
commit=$(echo $change_commit | cut -c2-)
|
||||
else
|
||||
commit=$(git describe --always "$change_commit~1")
|
||||
fi
|
||||
fi
|
||||
|
||||
commit=$(git rev-parse --short "$commit")
|
||||
echo "$chart $version $commit"
|
||||
continue
|
||||
fi
|
||||
|
||||
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||
found_tag=""
|
||||
for tag in $search_commits; do
|
||||
if [ "$(git show "${tag}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" = "${version}" ]; then
|
||||
found_tag=$(git rev-parse --short "${tag}")
|
||||
break
|
||||
# Check if the commit belongs to the main branch
|
||||
if ! git merge-base --is-ancestor "$commit" main; then
|
||||
# Find the closest parent commit that belongs to main
|
||||
commit_in_main=$(git log --pretty=format:"%h" main -- "$chart" | head -n 1)
|
||||
if [ -n "$commit_in_main" ]; then
|
||||
commit="$commit_in_main"
|
||||
else
|
||||
# No valid commit found in main branch for $chart, skipping..."
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$found_tag" ]; then
|
||||
echo "Can't find $chart $version in any version tag, removing it" >&2
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "$chart $version $found_tag"
|
||||
echo "$chart $version $commit"
|
||||
done
|
||||
)
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
YQ_VERSION="v4.35.1"
|
||||
RED='\033[31m'
|
||||
RESET='\033[0m'
|
||||
|
||||
check-yq-version() {
|
||||
current_version=$(yq -V | awk '$(NF-1) == "version" {print $NF}')
|
||||
if [ -z "$current_version" ]; then
|
||||
echo "yq is not installed or version cannot be determined."
|
||||
exit 1
|
||||
fi
|
||||
echo "Current yq version: $current_version"
|
||||
|
||||
if [ "$(printf '%s\n' "$YQ_VERSION" "$current_version" | sort -V | head -n1)" = "$YQ_VERSION" ]; then
|
||||
echo "Greater than or equal to $YQ_VERSION"
|
||||
else
|
||||
echo -e "${RED}ERROR: yq version less than $YQ_VERSION${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check-yq-version
|
||||
1
hack/testdata/http-cache/check.sh
vendored
1
hack/testdata/http-cache/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
2
hack/testdata/http-cache/values.yaml
vendored
2
hack/testdata/http-cache/values.yaml
vendored
@@ -1,2 +0,0 @@
|
||||
endpoints:
|
||||
- 8.8.8.8:443
|
||||
1
hack/testdata/kubernetes/check.sh
vendored
1
hack/testdata/kubernetes/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
62
hack/testdata/kubernetes/values.yaml
vendored
62
hack/testdata/kubernetes/values.yaml
vendored
@@ -1,62 +0,0 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
##
|
||||
host: ""
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
storageClass: replicated
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
##
|
||||
nodeGroups:
|
||||
md0:
|
||||
minReplicas: 0
|
||||
maxReplicas: 10
|
||||
instanceType: "u1.medium"
|
||||
ephemeralStorage: 20Gi
|
||||
roles:
|
||||
- ingress-nginx
|
||||
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
## @section Cluster Addons
|
||||
##
|
||||
addons:
|
||||
|
||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
||||
##
|
||||
certManager:
|
||||
## @param addons.certManager.enabled Enables the cert-manager
|
||||
## @param addons.certManager.valuesOverride Custom values to override
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
||||
## e.g:
|
||||
## hosts:
|
||||
## - example.org
|
||||
## - foo.example.net
|
||||
##
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
## @param addons.fluxcd.enabled Enables Flux CD
|
||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
1
hack/testdata/nats/check.sh
vendored
1
hack/testdata/nats/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
10
hack/testdata/nats/values.yaml
vendored
10
hack/testdata/nats/values.yaml
vendored
@@ -1,10 +0,0 @@
|
||||
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param replicas Persistent Volume size for NATS
|
||||
## @param storageClass StorageClass used to store the data
|
||||
##
|
||||
external: false
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
1
hack/testdata/tenant/check.sh
vendored
1
hack/testdata/tenant/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
6
hack/testdata/tenant/values.yaml
vendored
6
hack/testdata/tenant/values.yaml
vendored
@@ -1,6 +0,0 @@
|
||||
host: ""
|
||||
etcd: false
|
||||
monitoring: false
|
||||
ingress: false
|
||||
seaweedfs: false
|
||||
isolated: true
|
||||
@@ -1,52 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2024 The Cozystack Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${SCRIPT_ROOT}/api/api-rules"}"
|
||||
UPDATE_API_KNOWN_VIOLATIONS="${UPDATE_API_KNOWN_VIOLATIONS:-true}"
|
||||
CONTROLLER_GEN="go run sigs.k8s.io/controller-tools/cmd/controller-gen@v0.16.4"
|
||||
|
||||
source "${CODEGEN_PKG}/kube_codegen.sh"
|
||||
|
||||
THIS_PKG="k8s.io/sample-apiserver"
|
||||
|
||||
kube::codegen::gen_helpers \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/pkg/apis"
|
||||
|
||||
if [[ -n "${API_KNOWN_VIOLATIONS_DIR:-}" ]]; then
|
||||
report_filename="${API_KNOWN_VIOLATIONS_DIR}/cozystack_api_violation_exceptions.list"
|
||||
if [[ "${UPDATE_API_KNOWN_VIOLATIONS:-}" == "true" ]]; then
|
||||
update_report="--update-report"
|
||||
fi
|
||||
fi
|
||||
|
||||
kube::codegen::gen_openapi \
|
||||
--extra-pkgs "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" \
|
||||
--output-dir "${SCRIPT_ROOT}/pkg/generated/openapi" \
|
||||
--output-pkg "${THIS_PKG}/pkg/generated/openapi" \
|
||||
--report-filename "${report_filename:-"/dev/null"}" \
|
||||
${update_report:+"${update_report}"} \
|
||||
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt" \
|
||||
"${SCRIPT_ROOT}/pkg/apis"
|
||||
|
||||
$CONTROLLER_GEN object:headerFile="hack/boilerplate.go.txt" paths="./api/..."
|
||||
$CONTROLLER_GEN rbac:roleName=manager-role crd paths="./api/..." output:crd:artifacts:config=packages/system/cozystack-controller/templates/crds
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
version=${VERSION:-$(git describe --tags)}
|
||||
|
||||
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/kernel-amd64
|
||||
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
|
||||
@@ -1,96 +0,0 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
|
||||
// The BinaryAssetsDirectory is only required if you want to run the tests directly
|
||||
// without call the makefile target test. If not informed it will look for the
|
||||
// default path defined in controller-runtime which is /usr/local/kubebuilder/.
|
||||
// Note that you must have the required binaries setup under the bin directory to perform
|
||||
// the tests directly. When we run make test it will be setup and used automatically.
|
||||
BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
|
||||
fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
err = cozystackiov1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
cancel()
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
@@ -1,87 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
type WorkloadReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
err := r.Get(ctx, req.NamespacedName, w)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "Unable to fetch Workload")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// it's being deleted, nothing to handle
|
||||
if w.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
if err == nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// error getting object but not 404 -- requeue
|
||||
if !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
// Watch WorkloadMonitor objects
|
||||
For(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
if strings.HasPrefix(w.Name, "pvc-") {
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
if strings.HasPrefix(w.Name, "svc-") {
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = w.Name
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
@@ -1,438 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/pointer"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
type WorkloadMonitorReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloadmonitors/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch
|
||||
|
||||
// isServiceReady checks if the service has an external IP bound
|
||||
func (r *WorkloadMonitorReconciler) isServiceReady(svc *corev1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||
}
|
||||
|
||||
// isPVCReady checks if the PVC is bound
|
||||
func (r *WorkloadMonitorReconciler) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool {
|
||||
return pvc.Status.Phase == corev1.ClaimBound
|
||||
}
|
||||
|
||||
// isPodReady checks if the Pod is in the Ready condition.
|
||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// updateOwnerReferences adds the given monitor as a new owner reference to the object if not already present.
|
||||
// It then sorts the owner references to enforce a consistent order.
|
||||
func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
||||
// Retrieve current owner references
|
||||
owners := obj.GetOwnerReferences()
|
||||
|
||||
// Check if current monitor is already in owner references
|
||||
var alreadyOwned bool
|
||||
for _, ownerRef := range owners {
|
||||
if ownerRef.UID == monitor.GetUID() {
|
||||
alreadyOwned = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
runtimeObj, ok := monitor.(runtime.Object)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
gvk := runtimeObj.GetObjectKind().GroupVersionKind()
|
||||
|
||||
// If not already present, add new owner reference without controller flag
|
||||
if !alreadyOwned {
|
||||
newOwnerRef := metav1.OwnerReference{
|
||||
APIVersion: gvk.GroupVersion().String(),
|
||||
Kind: gvk.Kind,
|
||||
Name: monitor.GetName(),
|
||||
UID: monitor.GetUID(),
|
||||
// Set Controller to false to avoid conflict as multiple controllers are not allowed
|
||||
Controller: pointer.BoolPtr(false),
|
||||
BlockOwnerDeletion: pointer.BoolPtr(true),
|
||||
}
|
||||
owners = append(owners, newOwnerRef)
|
||||
}
|
||||
|
||||
// Sort owner references to enforce a consistent order by UID
|
||||
sort.SliceStable(owners, func(i, j int) bool {
|
||||
return owners[i].UID < owners[j].UID
|
||||
})
|
||||
|
||||
// Update the owner references of the object
|
||||
obj.SetOwnerReferences(owners)
|
||||
}
|
||||
|
||||
// reconcileServiceForMonitor creates or updates a Workload object for the given Service and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
svc corev1.Service,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%s", svc.Name),
|
||||
Namespace: svc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
quantity := resource.MustParse("0")
|
||||
|
||||
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||
if ing.IP != "" {
|
||||
quantity.Add(resource.MustParse("1"))
|
||||
}
|
||||
}
|
||||
|
||||
var resourceLabel string
|
||||
if svc.Annotations != nil {
|
||||
var ok bool
|
||||
resourceLabel, ok = svc.Annotations["metallb.universe.tf/ip-allocated-from-pool"]
|
||||
if !ok {
|
||||
resourceLabel = "default"
|
||||
}
|
||||
}
|
||||
resourceLabel = fmt.Sprintf("%s.ipaddresspool.metallb.io/requests.ipaddresses", resourceLabel)
|
||||
resources[resourceLabel] = quantity
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = svc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isServiceReady(&svc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePVCForMonitor creates or updates a Workload object for the given PVC and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
pvc corev1.PersistentVolumeClaim,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pvc-%s", pvc.Name),
|
||||
Namespace: pvc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||
storageClass := "default"
|
||||
if pvc.Spec.StorageClassName != nil || *pvc.Spec.StorageClassName == "" {
|
||||
storageClass = *pvc.Spec.StorageClassName
|
||||
}
|
||||
resourceLabel := fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.%s", storageClass, resourceName.String())
|
||||
resources[resourceLabel] = resourceQuantity
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = pvc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isPVCReady(&pvc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
pod corev1.Pod,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Combine both init containers and normal containers to sum resources properly
|
||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||
|
||||
// totalResources will store the sum of all container resource limits
|
||||
totalResources := make(map[string]resource.Quantity)
|
||||
|
||||
// Iterate over all containers to aggregate their Limits
|
||||
for _, container := range combinedContainers {
|
||||
for name, qty := range container.Resources.Limits {
|
||||
if existing, exists := totalResources[name.String()]; exists {
|
||||
existing.Add(qty)
|
||||
totalResources[name.String()] = existing
|
||||
} else {
|
||||
totalResources[name.String()] = qty.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If annotation "workload.cozystack.io/resources" is present, parse and merge
|
||||
if resourcesStr, ok := pod.Annotations["workload.cozystack.io/resources"]; ok {
|
||||
annRes := map[string]string{}
|
||||
if err := json.Unmarshal([]byte(resourcesStr), &annRes); err != nil {
|
||||
logger.Error(err, "Failed to parse resources annotation", "pod", pod.Name)
|
||||
} else {
|
||||
for k, v := range annRes {
|
||||
parsed, err := resource.ParseQuantity(v)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to parse resource quantity from annotation", "key", k, "value", v)
|
||||
continue
|
||||
}
|
||||
totalResources[k] = parsed
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
// Copy labels from the Pod if needed
|
||||
workload.Labels = pod.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = totalResources
|
||||
workload.Status.Operational = r.isPodReady(&pod)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reconcile is the main reconcile loop.
|
||||
// 1. It reconciles WorkloadMonitor objects themselves (create/update/delete).
|
||||
// 2. It also reconciles Pod events mapped to WorkloadMonitor via label selector.
|
||||
func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Fetch the WorkloadMonitor object if it exists
|
||||
monitor := &cozyv1alpha1.WorkloadMonitor{}
|
||||
err := r.Get(ctx, req.NamespacedName, monitor)
|
||||
if err != nil {
|
||||
// If the resource is not found, it may be a Pod event (mapFunc).
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "Unable to fetch WorkloadMonitor")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// List Pods that match the WorkloadMonitor's selector
|
||||
podList := &corev1.PodList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
podList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list Pods for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
var observedReplicas, availableReplicas int32
|
||||
|
||||
// For each matching Pod, reconcile the corresponding Workload
|
||||
for _, pod := range podList.Items {
|
||||
observedReplicas++
|
||||
if err := r.reconcilePodForMonitor(ctx, monitor, pod); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for Pod", "pod", pod.Name)
|
||||
continue
|
||||
}
|
||||
if r.isPodReady(&pod) {
|
||||
availableReplicas++
|
||||
}
|
||||
}
|
||||
|
||||
pvcList := &corev1.PersistentVolumeClaimList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
pvcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list PVCs for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, pvc := range pvcList.Items {
|
||||
if err := r.reconcilePVCForMonitor(ctx, monitor, pvc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for PVC", "PVC", pvc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
svcList := &corev1.ServiceList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
svcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list Services for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, svc := range svcList.Items {
|
||||
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
|
||||
continue
|
||||
}
|
||||
if err := r.reconcileServiceForMonitor(ctx, monitor, svc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for Service", "Service", svc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Update WorkloadMonitor status based on observed pods
|
||||
monitor.Status.ObservedReplicas = observedReplicas
|
||||
monitor.Status.AvailableReplicas = availableReplicas
|
||||
|
||||
// Default to operational = true, but check MinReplicas if set
|
||||
monitor.Status.Operational = pointer.Bool(true)
|
||||
if monitor.Spec.MinReplicas != nil && availableReplicas < *monitor.Spec.MinReplicas {
|
||||
monitor.Status.Operational = pointer.Bool(false)
|
||||
}
|
||||
|
||||
// Update the WorkloadMonitor status in the cluster
|
||||
if err := r.Status().Update(ctx, monitor); err != nil {
|
||||
logger.Error(err, "Unable to update WorkloadMonitor status", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Return without requeue if we want purely event-driven reconciliations
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||
func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
// Watch WorkloadMonitor objects
|
||||
For(&cozyv1alpha1.WorkloadMonitor{}).
|
||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||
Watches(
|
||||
&corev1.Pod{},
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.Pod{}, r.Client)),
|
||||
).
|
||||
// Watch PVCs as well
|
||||
Watches(
|
||||
&corev1.PersistentVolumeClaim{},
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.PersistentVolumeClaim{}, r.Client)),
|
||||
).
|
||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||
Owns(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
concrete, ok := obj.(T)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := c.List(ctx, &monitorList, client.InNamespace(concrete.GetNamespace())); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
labels := concrete.GetLabels()
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if labelVal, exists := labels[k]; !exists || labelVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}
|
||||
}
|
||||
@@ -1,292 +0,0 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// Collector handles telemetry data collection and sending
|
||||
type Collector struct {
|
||||
client client.Client
|
||||
discoveryClient discovery.DiscoveryInterface
|
||||
config *Config
|
||||
ticker *time.Ticker
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewCollector creates a new telemetry collector
|
||||
func NewCollector(client client.Client, config *Config, kubeConfig *rest.Config) (*Collector, error) {
|
||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create discovery client: %w", err)
|
||||
}
|
||||
return &Collector{
|
||||
client: client,
|
||||
discoveryClient: discoveryClient,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start implements manager.Runnable
|
||||
func (c *Collector) Start(ctx context.Context) error {
|
||||
if c.config.Disabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.ticker = time.NewTicker(c.config.Interval)
|
||||
c.stopCh = make(chan struct{})
|
||||
|
||||
// Initial collection
|
||||
c.collect(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
c.ticker.Stop()
|
||||
close(c.stopCh)
|
||||
return nil
|
||||
case <-c.ticker.C:
|
||||
c.collect(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NeedLeaderElection implements manager.LeaderElectionRunnable
|
||||
func (c *Collector) NeedLeaderElection() bool {
|
||||
// Only run telemetry collector on the leader
|
||||
return true
|
||||
}
|
||||
|
||||
// Stop halts telemetry collection
|
||||
func (c *Collector) Stop() {
|
||||
close(c.stopCh)
|
||||
}
|
||||
|
||||
// getSizeGroup returns the exponential size group for PVC
|
||||
func getSizeGroup(size resource.Quantity) string {
|
||||
gb := size.Value() / (1024 * 1024 * 1024)
|
||||
switch {
|
||||
case gb <= 1:
|
||||
return "1Gi"
|
||||
case gb <= 5:
|
||||
return "5Gi"
|
||||
case gb <= 10:
|
||||
return "10Gi"
|
||||
case gb <= 25:
|
||||
return "25Gi"
|
||||
case gb <= 50:
|
||||
return "50Gi"
|
||||
case gb <= 100:
|
||||
return "100Gi"
|
||||
case gb <= 250:
|
||||
return "250Gi"
|
||||
case gb <= 500:
|
||||
return "500Gi"
|
||||
case gb <= 1024:
|
||||
return "1Ti"
|
||||
case gb <= 2048:
|
||||
return "2Ti"
|
||||
case gb <= 5120:
|
||||
return "5Ti"
|
||||
default:
|
||||
return "10Ti"
|
||||
}
|
||||
}
|
||||
|
||||
// collect gathers and sends telemetry data
|
||||
func (c *Collector) collect(ctx context.Context) {
|
||||
logger := log.FromContext(ctx).V(1)
|
||||
|
||||
// Get cluster ID from kube-system namespace
|
||||
var kubeSystemNS corev1.Namespace
|
||||
if err := c.client.Get(ctx, types.NamespacedName{Name: "kube-system"}, &kubeSystemNS); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to get kube-system namespace: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
clusterID := string(kubeSystemNS.UID)
|
||||
|
||||
var cozystackCM corev1.ConfigMap
|
||||
if err := c.client.Get(ctx, types.NamespacedName{Namespace: "cozy-system", Name: "cozystack"}, &cozystackCM); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to get cozystack configmap in cozy-system namespace: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
oidcEnabled := cozystackCM.Data["oidc-enabled"]
|
||||
bundle := cozystackCM.Data["bundle-name"]
|
||||
bundleEnable := cozystackCM.Data["bundle-enable"]
|
||||
bundleDisable := cozystackCM.Data["bundle-disable"]
|
||||
|
||||
// Get Kubernetes version from nodes
|
||||
var nodeList corev1.NodeList
|
||||
if err := c.client.List(ctx, &nodeList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list nodes: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Create metrics buffer
|
||||
var metrics strings.Builder
|
||||
|
||||
// Add Cozystack info metric
|
||||
if len(nodeList.Items) > 0 {
|
||||
k8sVersion, _ := c.discoveryClient.ServerVersion()
|
||||
metrics.WriteString(fmt.Sprintf(
|
||||
"cozy_cluster_info{cozystack_version=\"%s\",kubernetes_version=\"%s\",oidc_enabled=\"%s\",bundle_name=\"%s\",bunde_enable=\"%s\",bunde_disable=\"%s\"} 1\n",
|
||||
c.config.CozystackVersion,
|
||||
k8sVersion,
|
||||
oidcEnabled,
|
||||
bundle,
|
||||
bundleEnable,
|
||||
bundleDisable,
|
||||
))
|
||||
}
|
||||
|
||||
// Collect node metrics
|
||||
nodeOSCount := make(map[string]int)
|
||||
for _, node := range nodeList.Items {
|
||||
key := fmt.Sprintf("%s (%s)", node.Status.NodeInfo.OperatingSystem, node.Status.NodeInfo.OSImage)
|
||||
nodeOSCount[key] = nodeOSCount[key] + 1
|
||||
}
|
||||
|
||||
for osKey, count := range nodeOSCount {
|
||||
metrics.WriteString(fmt.Sprintf(
|
||||
"cozy_nodes_count{os=\"%s\",kernel=\"%s\"} %d\n",
|
||||
osKey,
|
||||
nodeList.Items[0].Status.NodeInfo.KernelVersion,
|
||||
count,
|
||||
))
|
||||
}
|
||||
|
||||
// Collect LoadBalancer services metrics
|
||||
var serviceList corev1.ServiceList
|
||||
if err := c.client.List(ctx, &serviceList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list Services: %v", err))
|
||||
} else {
|
||||
lbCount := 0
|
||||
for _, svc := range serviceList.Items {
|
||||
if svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
|
||||
lbCount++
|
||||
}
|
||||
}
|
||||
metrics.WriteString(fmt.Sprintf("cozy_loadbalancers_count %d\n", lbCount))
|
||||
}
|
||||
|
||||
// Count tenant namespaces
|
||||
var nsList corev1.NamespaceList
|
||||
if err := c.client.List(ctx, &nsList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list Namespaces: %v", err))
|
||||
} else {
|
||||
tenantCount := 0
|
||||
for _, ns := range nsList.Items {
|
||||
if strings.HasPrefix(ns.Name, "tenant-") {
|
||||
tenantCount++
|
||||
}
|
||||
}
|
||||
metrics.WriteString(fmt.Sprintf("cozy_tenants_count %d\n", tenantCount))
|
||||
}
|
||||
|
||||
// Collect PV metrics grouped by driver and size
|
||||
var pvList corev1.PersistentVolumeList
|
||||
if err := c.client.List(ctx, &pvList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list PVs: %v", err))
|
||||
} else {
|
||||
// Map to store counts by size and driver
|
||||
pvMetrics := make(map[string]map[string]int)
|
||||
|
||||
for _, pv := range pvList.Items {
|
||||
if capacity, ok := pv.Spec.Capacity[corev1.ResourceStorage]; ok {
|
||||
sizeGroup := getSizeGroup(capacity)
|
||||
|
||||
// Get the CSI driver name
|
||||
driver := "unknown"
|
||||
if pv.Spec.CSI != nil {
|
||||
driver = pv.Spec.CSI.Driver
|
||||
} else if pv.Spec.HostPath != nil {
|
||||
driver = "hostpath"
|
||||
} else if pv.Spec.NFS != nil {
|
||||
driver = "nfs"
|
||||
}
|
||||
|
||||
// Initialize nested map if needed
|
||||
if _, exists := pvMetrics[sizeGroup]; !exists {
|
||||
pvMetrics[sizeGroup] = make(map[string]int)
|
||||
}
|
||||
|
||||
// Increment count for this size/driver combination
|
||||
pvMetrics[sizeGroup][driver]++
|
||||
}
|
||||
}
|
||||
|
||||
// Write metrics
|
||||
for size, drivers := range pvMetrics {
|
||||
for driver, count := range drivers {
|
||||
metrics.WriteString(fmt.Sprintf(
|
||||
"cozy_pvs_count{driver=\"%s\",size=\"%s\"} %d\n",
|
||||
driver,
|
||||
size,
|
||||
count,
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect workload metrics
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
if err := c.client.List(ctx, &monitorList); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to list WorkloadMonitors: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
for _, monitor := range monitorList.Items {
|
||||
metrics.WriteString(fmt.Sprintf(
|
||||
"cozy_workloads_count{uid=\"%s\",kind=\"%s\",type=\"%s\",version=\"%s\"} %d\n",
|
||||
monitor.UID,
|
||||
monitor.Spec.Kind,
|
||||
monitor.Spec.Type,
|
||||
monitor.Spec.Version,
|
||||
monitor.Status.ObservedReplicas,
|
||||
))
|
||||
}
|
||||
|
||||
// Send metrics
|
||||
if err := c.sendMetrics(clusterID, metrics.String()); err != nil {
|
||||
logger.Info(fmt.Sprintf("Failed to send metrics: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// sendMetrics sends collected metrics to the configured endpoint
|
||||
func (c *Collector) sendMetrics(clusterID, metrics string) error {
|
||||
req, err := http.NewRequest("POST", c.config.Endpoint, bytes.NewBufferString(metrics))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
req.Header.Set("X-Cluster-ID", clusterID)
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config holds telemetry configuration
|
||||
type Config struct {
|
||||
// Disable telemetry collection if set to true
|
||||
Disabled bool
|
||||
// Endpoint to send telemetry data to
|
||||
Endpoint string
|
||||
// Interval between telemetry data collection
|
||||
Interval time.Duration
|
||||
// CozystackVersion represents the current version of Cozystack
|
||||
CozystackVersion string
|
||||
}
|
||||
|
||||
// DefaultConfig returns default telemetry configuration
|
||||
func DefaultConfig() *Config {
|
||||
return &Config{
|
||||
Disabled: false,
|
||||
Endpoint: "https://telemetry.cozystack.io",
|
||||
Interval: 15 * time.Minute,
|
||||
CozystackVersion: "unknown",
|
||||
}
|
||||
}
|
||||
105
manifests/cozystack-installer.yaml
Normal file
105
manifests/cozystack-installer.yaml
Normal file
@@ -0,0 +1,105 @@
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cozy-system
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cozystack
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8123
|
||||
selector:
|
||||
app: cozystack
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cozystack
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cozystack
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: cozystack
|
||||
containers:
|
||||
- name: cozystack
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.16.5"
|
||||
env:
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: localhost
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "7445"
|
||||
- name: K8S_AWAIT_ELECTION_ENABLED
|
||||
value: "1"
|
||||
- name: K8S_AWAIT_ELECTION_NAME
|
||||
value: cozystack
|
||||
- name: K8S_AWAIT_ELECTION_LOCK_NAME
|
||||
value: cozystack
|
||||
- name: K8S_AWAIT_ELECTION_LOCK_NAMESPACE
|
||||
value: cozy-system
|
||||
- name: K8S_AWAIT_ELECTION_IDENTITY
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: darkhttpd
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.16.5"
|
||||
command:
|
||||
- /usr/bin/darkhttpd
|
||||
- /cozystack/assets
|
||||
- --port
|
||||
- "8123"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8123
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
- key: "node.cilium.io/agent-not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
@@ -1,9 +0,0 @@
|
||||
### How to test packages local
|
||||
|
||||
```bash
|
||||
cd packages/core/installer
|
||||
make image-cozystack REGISTRY=YOUR_CUSTOM_REGISTRY
|
||||
make apply
|
||||
kubectl delete pod dashboard-redis-master-0 -n cozy-dashboard
|
||||
kubectl delete po -l app=source-controller -n cozy-fluxcd
|
||||
```
|
||||
@@ -9,12 +9,4 @@ rules:
|
||||
- secrets
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
- {{ .Release.Name }}-credentials
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-ui
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-system
|
||||
spec:
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-bucket
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '*'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
bucketName: {{ .Release.Name }}
|
||||
@@ -16,10 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.5.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "24.9.2"
|
||||
appVersion: "24.3.0"
|
||||
|
||||
@@ -14,7 +14,6 @@ image:
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/clickhouse-backup.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG))@$$(yq e '."containerimage.digest"' images/clickhouse-backup.json -o json -r)" \
|
||||
> images/clickhouse-backup.tag
|
||||
|
||||
@@ -19,14 +19,12 @@ more details:
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------- | ----------------------------------- | ------ |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `logStorageSize` | Persistent Volume for logs size | `2Gi` |
|
||||
| `shards` | Number of Clickhouse replicas | `1` |
|
||||
| `replicas` | Number of Clickhouse shards | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `logTTL` | for query_log and query_thread_log | `15` |
|
||||
| Name | Description | Value |
|
||||
| -------------- | ----------------------------------- | ------ |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `shards` | Number of Clickhouse replicas | `1` |
|
||||
| `replicas` | Number of Clickhouse shards | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
@@ -36,15 +34,13 @@ more details:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.5.0@sha256:dda84420cb8648721299221268a00d72a05c7af5b7fb452619bac727068b9e61
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -32,12 +32,11 @@ kind: "ClickHouseInstallation"
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
namespaceDomainPattern: "%s.svc.cozy.local"
|
||||
{{- with .Values.size }}
|
||||
defaults:
|
||||
templates:
|
||||
dataVolumeClaimTemplate: data-volume-template
|
||||
podTemplate: clickhouse-per-host
|
||||
serviceTemplate: svc-template
|
||||
{{- end }}
|
||||
configuration:
|
||||
{{- with $users }}
|
||||
users:
|
||||
@@ -47,41 +46,6 @@ spec:
|
||||
{{ $name }}/networks/ip: ["::/0"]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
files:
|
||||
config.d/z_log_disable.xml: |
|
||||
<clickhouse>
|
||||
<asynchronous_metric_log remove="1"/>
|
||||
<metric_log remove="1"/>
|
||||
<query_views_log remove="1" />
|
||||
<part_log remove="1"/>
|
||||
<session_log remove="1"/>
|
||||
<text_log remove="1" />
|
||||
<trace_log remove="1"/>
|
||||
<crash_log remove="1"/>
|
||||
<opentelemetry_span_log remove="1"/>
|
||||
<processors_profile_log remove="1"/>
|
||||
</clickhouse>
|
||||
config.d/query_log_ttl.xml: |
|
||||
<clickhouse>
|
||||
<query_log replace="1">
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
<engine>ENGINE = MergeTree PARTITION BY (event_date)
|
||||
ORDER BY (event_time)
|
||||
TTL event_date + INTERVAL {{ .Values.logTTL }} DAY DELETE
|
||||
</engine>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
<query_thread_log replace="1">
|
||||
<database>system</database>
|
||||
<table>query_thread_log</table>
|
||||
<engine>ENGINE = MergeTree PARTITION BY (event_date)
|
||||
ORDER BY (event_time)
|
||||
TTL event_date + INTERVAL {{ .Values.logTTL }} DAY DELETE
|
||||
</engine>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_thread_log>
|
||||
</clickhouse>
|
||||
profiles:
|
||||
readonly/readonly: "1"
|
||||
clusters:
|
||||
@@ -89,54 +53,17 @@ spec:
|
||||
layout:
|
||||
shardsCount: {{ .Values.shards }}
|
||||
replicasCount: {{ .Values.replicas }}
|
||||
{{- with .Values.size }}
|
||||
templates:
|
||||
volumeClaimTemplates:
|
||||
- name: data-volume-template
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
{{- with $.Values.storageClass }}
|
||||
storageClassName: {{ . }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.size }}
|
||||
- name: log-volume-template
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.logStorageSize }}
|
||||
podTemplates:
|
||||
- name: clickhouse-per-host
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "clickhouse.altinity.com/chi"
|
||||
operator: In
|
||||
values:
|
||||
- "{{ .Release.Name }}"
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
containers:
|
||||
- name: clickhouse
|
||||
image: clickhouse/clickhouse-server:24.9.2.42
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data-volume-template
|
||||
mountPath: /var/lib/clickhouse
|
||||
- name: log-volume-template
|
||||
mountPath: /var/log/clickhouse-server
|
||||
serviceTemplates:
|
||||
- name: svc-template
|
||||
generateName: chendpoint-{chi}
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8123
|
||||
- name: tcp
|
||||
port: 9000
|
||||
storage: {{ . }}
|
||||
{{- end }}
|
||||
|
||||
@@ -8,7 +8,7 @@ rules:
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- chendpoint-{{ .Release.Name }}
|
||||
- chi-clickhouse-test-clickhouse-0-0
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -17,10 +17,3 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-credentials
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: clickhouse
|
||||
type: clickhouse
|
||||
selector:
|
||||
clickhouse.altinity.com/chi: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -7,11 +7,6 @@
|
||||
"description": "Persistent Volume size",
|
||||
"default": "10Gi"
|
||||
},
|
||||
"logStorageSize": {
|
||||
"type": "string",
|
||||
"description": "Persistent Volume for logs size",
|
||||
"default": "2Gi"
|
||||
},
|
||||
"shards": {
|
||||
"type": "number",
|
||||
"description": "Number of Clickhouse replicas",
|
||||
@@ -27,11 +22,6 @@
|
||||
"description": "StorageClass used to store the data",
|
||||
"default": ""
|
||||
},
|
||||
"logTTL": {
|
||||
"type": "number",
|
||||
"description": "for query_log and query_thread_log",
|
||||
"default": 15
|
||||
},
|
||||
"backup": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -76,16 +66,6 @@
|
||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,14 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param size Persistent Volume size
|
||||
## @param logStorageSize Persistent Volume for logs size
|
||||
## @param shards Number of Clickhouse replicas
|
||||
## @param replicas Number of Clickhouse shards
|
||||
## @param storageClass StorageClass used to store the data
|
||||
## @param logTTL for query_log and query_thread_log
|
||||
##
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
shards: 1
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
logTTL: 15
|
||||
|
||||
## @section Configuration parameters
|
||||
|
||||
@@ -46,16 +42,3 @@ backup:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.4.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -21,17 +21,15 @@
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.7.0@sha256:d2015c6dba92293bda652d055e97d1be80e8414c2dc78037c12812d1a2e2cba1
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -17,10 +17,3 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-credentials
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -34,9 +34,6 @@ stringData:
|
||||
init.sh: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
until pg_isready ; do sleep 5; done
|
||||
|
||||
echo "== create users"
|
||||
{{- if .Values.users }}
|
||||
psql -v ON_ERROR_STOP=1 <<\EOT
|
||||
@@ -63,7 +60,7 @@ stringData:
|
||||
DROP USER $user;
|
||||
EOT
|
||||
done
|
||||
|
||||
|
||||
echo "== create roles"
|
||||
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
||||
SELECT 'CREATE ROLE app_admin NOINHERIT;'
|
||||
@@ -83,7 +80,7 @@ stringData:
|
||||
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
|
||||
-- Changing Schema Ownership
|
||||
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, 'app_admin');
|
||||
|
||||
|
||||
-- Add rights for the admin role
|
||||
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
||||
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
||||
|
||||
@@ -6,20 +6,10 @@ metadata:
|
||||
spec:
|
||||
instances: {{ .Values.replicas }}
|
||||
enableSuperuserAccess: true
|
||||
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
||||
{{- if $configMap }}
|
||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
||||
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: ferretdb
|
||||
type: ferretdb
|
||||
selector:
|
||||
app: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -81,16 +81,6 @@
|
||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -48,16 +48,3 @@ backup:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.4.0
|
||||
version: 0.3.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -13,7 +13,6 @@ image-nginx:
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/nginx-cache.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))@$$(yq e '."containerimage.digest"' images/nginx-cache.json -o json -r)" \
|
||||
> images/nginx-cache.tag
|
||||
|
||||
@@ -60,17 +60,13 @@ VTS module shows wrong upstream resonse time
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||
| `haproxy.resources` | Resources | `{}` |
|
||||
| `haproxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `nginx.resources` | Resources | `{}` |
|
||||
| `nginx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ----------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b
|
||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:cd744b2d1d50191f4908f2db83079b32973d1c009fe9468627be72efbfa0a107
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -33,11 +33,6 @@ spec:
|
||||
containers:
|
||||
- image: haproxy:latest
|
||||
name: haproxy
|
||||
{{- if .Values.haproxy.resources }}
|
||||
resources: {{- toYaml .Values.haproxy.resources | nindent 10 }}
|
||||
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.haproxy.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
|
||||
@@ -52,11 +52,6 @@ spec:
|
||||
shareProcessNamespace: true
|
||||
containers:
|
||||
- name: nginx
|
||||
{{- if $.Values.nginx.resources }}
|
||||
resources: {{- toYaml $.Values.nginx.resources | nindent 10 }}
|
||||
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" $.Values.nginx.resourcesPreset "Release" $.Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
@@ -88,13 +83,6 @@ spec:
|
||||
- name: reloader
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
command: ["/usr/bin/nginx-reloader.sh"]
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
#command: ["sleep", "infinity"]
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx/nginx.conf
|
||||
|
||||
@@ -24,16 +24,6 @@
|
||||
"type": "number",
|
||||
"description": "Number of HAProxy replicas",
|
||||
"default": 2
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -44,16 +34,6 @@
|
||||
"type": "number",
|
||||
"description": "Number of Nginx replicas",
|
||||
"default": 2
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -12,32 +12,8 @@ size: 10Gi
|
||||
storageClass: ""
|
||||
haproxy:
|
||||
replicas: 2
|
||||
## @param haproxy.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param haproxy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
nginx:
|
||||
replicas: 2
|
||||
## @param nginx.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
|
||||
## @section Configuration parameters
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.3.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -4,19 +4,15 @@
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
||||
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
||||
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
|
||||
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||
| `kafka.resources` | Resources | `{}` |
|
||||
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `zookeeper.resources` | Resources | `{}` |
|
||||
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
||||
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
||||
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
|
||||
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-kafka-bootstrap
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-clients-ca
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
- {{ $.Release.Name }}-zookeeper
|
||||
verbs: ["get", "list", "watch"]
|
||||
@@ -8,11 +8,6 @@ metadata:
|
||||
spec:
|
||||
kafka:
|
||||
replicas: {{ .Values.kafka.replicas }}
|
||||
{{- if .Values.kafka.resources }}
|
||||
resources: {{- toYaml .Values.kafka.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kafka.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kafka.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
listeners:
|
||||
- name: plain
|
||||
port: 9092
|
||||
@@ -62,19 +57,8 @@ spec:
|
||||
class: {{ . }}
|
||||
{{- end }}
|
||||
deleteClaim: true
|
||||
metricsConfig:
|
||||
type: jmxPrometheusExporter
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
key: kafka-metrics-config.yml
|
||||
zookeeper:
|
||||
replicas: {{ .Values.zookeeper.replicas }}
|
||||
{{- if .Values.zookeeper.resources }}
|
||||
resources: {{- toYaml .Values.zookeeper.resources | nindent 6 }}
|
||||
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.zookeeper.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
storage:
|
||||
type: persistent-claim
|
||||
{{- with .Values.zookeeper.size }}
|
||||
@@ -84,12 +68,6 @@ spec:
|
||||
class: {{ . }}
|
||||
{{- end }}
|
||||
deleteClaim: false
|
||||
metricsConfig:
|
||||
type: jmxPrometheusExporter
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
key: kafka-metrics-config.yml
|
||||
entityOperator:
|
||||
topicOperator: {}
|
||||
userOperator: {}
|
||||
|
||||
@@ -1,198 +0,0 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
data:
|
||||
kafka-metrics-config.yml: |
|
||||
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
|
||||
lowercaseOutputName: true
|
||||
rules:
|
||||
# Special cases and very specific rules
|
||||
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
|
||||
name: kafka_server_$1_$2
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientId: "$3"
|
||||
topic: "$4"
|
||||
partition: "$5"
|
||||
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
|
||||
name: kafka_server_$1_$2
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientId: "$3"
|
||||
broker: "$4:$5"
|
||||
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
|
||||
name: kafka_server_$1_connections_tls_info
|
||||
type: GAUGE
|
||||
labels:
|
||||
cipher: "$2"
|
||||
protocol: "$3"
|
||||
listener: "$4"
|
||||
networkProcessor: "$5"
|
||||
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
|
||||
name: kafka_server_$1_connections_software
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientSoftwareName: "$2"
|
||||
clientSoftwareVersion: "$3"
|
||||
listener: "$4"
|
||||
networkProcessor: "$5"
|
||||
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total):"
|
||||
name: kafka_server_$1_$4
|
||||
type: COUNTER
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
|
||||
name: kafka_server_$1_$4
|
||||
type: GAUGE
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total)
|
||||
name: kafka_server_$1_$4
|
||||
type: COUNTER
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
|
||||
name: kafka_server_$1_$4
|
||||
type: GAUGE
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
# Some percent metrics use MeanRate attribute
|
||||
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
# Generic gauges for percents
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
# Generic per-second counters with 0-2 key/value pairs
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
# Generic gauges with 0-2 key/value pairs
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
|
||||
# Note that these are missing the '_sum' metric!
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
quantile: "0.$8"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
quantile: "0.$6"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
quantile: "0.$4"
|
||||
# KRaft overall related metrics
|
||||
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
|
||||
- pattern: "kafka.server<type=raft-metrics><>(.+-total|.+-max):"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
type: COUNTER
|
||||
- pattern: "kafka.server<type=raft-metrics><>(current-state): (.+)"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
value: 1
|
||||
type: UNTYPED
|
||||
labels:
|
||||
$1: "$2"
|
||||
- pattern: "kafka.server<type=raft-metrics><>(.+):"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
type: GAUGE
|
||||
# KRaft "low level" channels related metrics
|
||||
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
|
||||
- pattern: "kafka.server<type=raft-channel-metrics><>(.+-total|.+-max):"
|
||||
name: kafka_server_raftchannelmetrics_$1
|
||||
type: COUNTER
|
||||
- pattern: "kafka.server<type=raft-channel-metrics><>(.+):"
|
||||
name: kafka_server_raftchannelmetrics_$1
|
||||
type: GAUGE
|
||||
# Broker metrics related to fetching metadata topic records in KRaft mode
|
||||
- pattern: "kafka.server<type=broker-metadata-metrics><>(.+):"
|
||||
name: kafka_server_brokermetadatametrics_$1
|
||||
type: GAUGE
|
||||
zookeeper-metrics-config.yml: |
|
||||
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
|
||||
lowercaseOutputName: true
|
||||
rules:
|
||||
# replicated Zookeeper
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
|
||||
name: "zookeeper_$2"
|
||||
type: GAUGE
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
|
||||
name: "zookeeper_$3"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(Packets\\w+)"
|
||||
name: "zookeeper_$4"
|
||||
type: COUNTER
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
|
||||
name: "zookeeper_$4"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
|
||||
name: "zookeeper_$4_$5"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
@@ -1,40 +0,0 @@
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VMPodScrape
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: tcp-prometheus
|
||||
scheme: http
|
||||
relabelConfigs:
|
||||
- separator: ;
|
||||
regex: __meta_kubernetes_pod_label_(strimzi_io_.+)
|
||||
replacement: $1
|
||||
action: labelmap
|
||||
- sourceLabels: [__meta_kubernetes_namespace]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: namespace
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: pod
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: node
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_host_ip]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: node_ip
|
||||
replacement: $1
|
||||
action: replace
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: kafka
|
||||
type: kafka
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
app.kubernetes.io/name: kafka
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-zookeeper
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: kafka
|
||||
type: zookeeper
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
app.kubernetes.io/name: zookeeper
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -24,16 +24,6 @@
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store the Kafka data",
|
||||
"default": ""
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -54,16 +44,6 @@
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store the ZooKeeper data",
|
||||
"default": ""
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user