mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
1 Commits
ingress-ap
...
tinkerbell
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a91d2aefde |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
||||
* @kvaps @lllamnyp @klinch0
|
||||
* @kvaps
|
||||
|
||||
53
.github/workflows/backport.yaml
vendored
53
.github/workflows/backport.yaml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Automatic Backport
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed] # fires when PR is closed (merged)
|
||||
|
||||
concurrency:
|
||||
group: backport-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'backport')
|
||||
runs-on: [self-hosted]
|
||||
|
||||
steps:
|
||||
# 1. Decide which maintenance branch should receive the back‑port
|
||||
- name: Determine target maintenance branch
|
||||
id: target
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let rel;
|
||||
try {
|
||||
rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
} catch (e) {
|
||||
core.setFailed('No existing releases found; cannot determine backport target.');
|
||||
return;
|
||||
}
|
||||
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
|
||||
const branch = `release-${maj}.${min}`;
|
||||
core.setOutput('branch', branch);
|
||||
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
|
||||
# 2. Checkout (required by backport‑action)
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# 3. Create the back‑port pull request
|
||||
- name: Create back‑port PR
|
||||
uses: korthout/backport-action@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
label_pattern: '' # don't read labels for targets
|
||||
target_branches: ${{ steps.target.outputs.branch }}
|
||||
11
.github/workflows/pre-commit.yml
vendored
11
.github/workflows/pre-commit.yml
vendored
@@ -1,12 +1,6 @@
|
||||
name: Pre-Commit Checks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
@@ -14,9 +8,6 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
|
||||
172
.github/workflows/pull-requests-release.yaml
vendored
172
.github/workflows/pull-requests-release.yaml
vendored
@@ -1,172 +0,0 @@
|
||||
name: Releasing PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
name: Test Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Run only when the PR carries the "release" label and not closed.
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
finalize:
|
||||
name: Finalize Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
# Extract tag from branch name (branch = release-X.Y.Z*)
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Tag to publish: ${tag}`);
|
||||
|
||||
# Checkout repo & create / push annotated tag
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merge commit
|
||||
run: |
|
||||
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push -f origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
# Ensure maintenance branch release-X.Y
|
||||
- name: Ensure maintenance branch release-X.Y
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
|
||||
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
|
||||
if (!match) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-suffix'`);
|
||||
return;
|
||||
}
|
||||
const line = `${match[1]}.${match[2]}`;
|
||||
const branch = `release-${line}`;
|
||||
try {
|
||||
await github.rest.repos.getBranch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
branch
|
||||
});
|
||||
console.log(`Branch '${branch}' already exists`);
|
||||
} catch (_) {
|
||||
await github.rest.git.createRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `refs/heads/${branch}`,
|
||||
sha: context.sha
|
||||
});
|
||||
console.log(`✅ Branch '${branch}' created at ${context.sha}`);
|
||||
}
|
||||
|
||||
# Get the latest published release
|
||||
- name: Get the latest published release
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare current tag vs latest using semver-utils
|
||||
- name: Semver compare
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.get_tag.outputs.tag }}
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }}
|
||||
|
||||
# Derive flags: prerelease? make_latest?
|
||||
- name: Calculate publish flags
|
||||
id: flags
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1
|
||||
const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
core.setOutput('is_rc', isRc);
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
|
||||
|
||||
# Publish draft release with correct flags
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) throw new Error(`Draft release for ${tag} not found`);
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: draft.id,
|
||||
draft: false,
|
||||
prerelease: ${{ steps.flags.outputs.is_rc }},
|
||||
make_latest: '${{ steps.flags.outputs.make_latest }}'
|
||||
});
|
||||
|
||||
console.log(`🚀 Published release for ${tag}`);
|
||||
41
.github/workflows/pull-requests.yaml
vendored
41
.github/workflows/pull-requests.yaml
vendored
@@ -1,41 +0,0 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build
|
||||
run: make build
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
227
.github/workflows/tags.yaml
vendored
227
.github/workflows/tags.yaml
vendored
@@ -1,227 +0,0 @@
|
||||
name: Versioned Tag
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*' # vX.Y.Z
|
||||
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
|
||||
|
||||
|
||||
concurrency:
|
||||
group: tags-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
name: Prepare Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
pull-requests: write
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
# Check if a non-draft release with this tag already exists
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
|
||||
core.setOutput('skip', exists);
|
||||
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
|
||||
|
||||
# If a published release already exists, skip the rest of the workflow
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
# Parse tag meta‑data (rc?, maintenance line, etc.)
|
||||
- name: Parse tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/); // ['0.31.5', '-rc.1']
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
const [maj, min] = m[1].split('.');
|
||||
core.setOutput('tag', ref); // v0.31.5-rc.1
|
||||
core.setOutput('version', version); // 0.31.5-rc.1
|
||||
core.setOutput('is_rc', isRc); // true
|
||||
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||
|
||||
# Detect base branch (main or release‑X.Y) the tag was pushed from
|
||||
- name: Get base branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: get_base
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const baseRef = context.payload.base_ref;
|
||||
if (!baseRef) {
|
||||
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
|
||||
return;
|
||||
}
|
||||
const branch = baseRef.replace('refs/heads/', '');
|
||||
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
|
||||
if (!ok) {
|
||||
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('branch', branch);
|
||||
|
||||
# Checkout & login once
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GHCR
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
# Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
|
||||
# Commit built artifacts
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
git config user.name "github-actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
git push origin HEAD || true
|
||||
|
||||
# Get `latest_version` from latest published release
|
||||
- name: Get latest published release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare tag (A) with latest (B)
|
||||
- name: Semver compare
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.tag.outputs.tag }} # A
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }} # B
|
||||
|
||||
# Create or reuse DRAFT GitHub Release
|
||||
- name: Create / reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.tag.outputs.tag }}';
|
||||
const isRc = ${{ steps.tag.outputs.is_rc }};
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
const makeLatest = outdated ? false : 'legacy';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
let rel = releases.data.find(r => r.tag_name === tag);
|
||||
if (!rel) {
|
||||
rel = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: tag,
|
||||
draft: true,
|
||||
prerelease: isRc,
|
||||
make_latest: makeLatest
|
||||
});
|
||||
console.log(`Draft release created for ${tag}`);
|
||||
} else {
|
||||
console.log(`Re‑using existing release ${tag}`);
|
||||
}
|
||||
core.setOutput('upload_url', rel.upload_url);
|
||||
|
||||
# Build + upload assets (optional)
|
||||
- name: Build & upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
make assets
|
||||
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Create release‑X.Y.Z branch and push (force‑update)
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH"
|
||||
git push -f origin "$BRANCH"
|
||||
|
||||
# Create pull request into original base branch (if absent)
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
base
|
||||
});
|
||||
if (prs.data.length === 0) {
|
||||
const pr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head,
|
||||
base,
|
||||
title: `Release v${version}`,
|
||||
body: `This PR prepares the release \`v${version}\`.`,
|
||||
draft: false
|
||||
});
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: pr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
console.log(`Created PR #${pr.data.number}`);
|
||||
} else {
|
||||
console.log(`PR already exists from ${head} to ${base}`);
|
||||
}
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
_out
|
||||
.git
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
@@ -76,4 +75,4 @@ fabric.properties
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
.DS_Store
|
||||
**/.DS_Store
|
||||
**/.DS_Store
|
||||
@@ -13,8 +13,8 @@ but it means a lot to us.
|
||||
|
||||
To add your organization to this list, you can either:
|
||||
|
||||
- [open a pull request](https://github.com/cozystack/cozystack/pulls) to directly update this file, or
|
||||
- [edit this file](https://github.com/cozystack/cozystack/blob/main/ADOPTERS.md) directly in GitHub
|
||||
- [open a pull request](https://github.com/aenix-io/cozystack/pulls) to directly update this file, or
|
||||
- [edit this file](https://github.com/aenix-io/cozystack/blob/main/ADOPTERS.md) directly in GitHub
|
||||
|
||||
Feel free to ask in the Slack chat if you any questions and/or require
|
||||
assistance with updating this list.
|
||||
|
||||
@@ -6,13 +6,13 @@ As you get started, you are in the best position to give us feedbacks on areas o
|
||||
|
||||
* Problems found while setting up the development environment
|
||||
* Gaps in our documentation
|
||||
* Bugs in our GitHub actions
|
||||
* Bugs in our Github actions
|
||||
|
||||
First, though, it is important that you read the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
The guidelines below are a starting point. We don't want to limit your
|
||||
creativity, passion, and initiative. If you think there's a better way, please
|
||||
feel free to bring it up in a GitHub discussion, or open a pull request. We're
|
||||
feel free to bring it up in a Github discussion, or open a pull request. We're
|
||||
certain there are always better ways to do things, we just need to start some
|
||||
constructive dialogue!
|
||||
|
||||
@@ -23,9 +23,9 @@ We welcome many types of contributions including:
|
||||
* New features
|
||||
* Builds, CI/CD
|
||||
* Bug fixes
|
||||
* [Documentation](https://GitHub.com/cozystack/cozystack-website/tree/main)
|
||||
* [Documentation](https://github.com/aenix-io/cozystack-website/tree/main)
|
||||
* Issue Triage
|
||||
* Answering questions on Slack or GitHub Discussions
|
||||
* Answering questions on Slack or Github Discussions
|
||||
* Web design
|
||||
* Communications / Social Media / Blog Posts
|
||||
* Events participation
|
||||
@@ -34,7 +34,7 @@ We welcome many types of contributions including:
|
||||
## Ask for Help
|
||||
|
||||
The best way to reach us with a question when contributing is to drop a line in
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new GitHub discussion.
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new Github discussion.
|
||||
|
||||
## Raising Issues
|
||||
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
# Cozystack Governance
|
||||
|
||||
This document defines the governance structure of the Cozystack community, outlining how members collaborate to achieve shared goals.
|
||||
|
||||
## Overview
|
||||
|
||||
**Cozystack**, a Cloud Native Computing Foundation (CNCF) project, is committed
|
||||
to building an open, inclusive, productive, and self-governing open source
|
||||
community focused on building a high-quality open source PaaS and framework for building clouds.
|
||||
|
||||
## Code Repositories
|
||||
|
||||
The following code repositories are governed by the Cozystack community and
|
||||
maintained under the `cozystack` namespace:
|
||||
|
||||
* **[Cozystack](https://github.com/cozystack/cozystack):** Main Cozystack codebase
|
||||
* **[website](https://github.com/cozystack/website):** Cozystack website and documentation sources
|
||||
* **[Talm](https://github.com/cozystack/talm):** Tool for managing Talos Linux the GitOps way
|
||||
* **[cozy-proxy](https://github.com/cozystack/cozy-proxy):** A simple kube-proxy addon for 1:1 NAT services in Kubernetes with NFT backend
|
||||
* **[cozystack-telemetry-server](https://github.com/cozystack/cozystack-telemetry-server):** Cozystack telemetry
|
||||
* **[talos-bootstrap](https://github.com/cozystack/talos-bootstrap):** An interactive Talos Linux installer
|
||||
* **[talos-meta-tool](https://github.com/cozystack/talos-meta-tool):** Tool for writing network metadata into META partition
|
||||
|
||||
## Community Roles
|
||||
|
||||
* **Users:** Members that engage with the Cozystack community via any medium, including Slack, Telegram, GitHub, and mailing lists.
|
||||
* **Contributors:** Members contributing to the projects by contributing and reviewing code, writing documentation,
|
||||
responding to issues, participating in proposal discussions, and so on.
|
||||
* **Directors:** Non-technical project leaders.
|
||||
* **Maintainers**: Technical project leaders.
|
||||
|
||||
## Contributors
|
||||
|
||||
Cozystack is for everyone. Anyone can become a Cozystack contributor simply by
|
||||
contributing to the project, whether through code, documentation, blog posts,
|
||||
community management, or other means.
|
||||
As with all Cozystack community members, contributors are expected to follow the
|
||||
[Cozystack Code of Conduct](https://github.com/cozystack/cozystack/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
All contributions to Cozystack code, documentation, or other components in the
|
||||
Cozystack GitHub organisation must follow the
|
||||
[contributing guidelines](https://github.com/cozystack/cozystack/blob/main/CONTRIBUTING.md).
|
||||
Whether these contributions are merged into the project is the prerogative of the maintainers.
|
||||
|
||||
## Directors
|
||||
|
||||
Directors are responsible for non-technical leadership functions within the project.
|
||||
This includes representing Cozystack and its maintainers to the community, to the press,
|
||||
and to the outside world; interfacing with CNCF and other governance entities;
|
||||
and participating in project decision-making processes when appropriate.
|
||||
|
||||
Directors are elected by a majority vote of the maintainers.
|
||||
|
||||
## Maintainers
|
||||
|
||||
Maintainers have the right to merge code into the project.
|
||||
Anyone can become a Cozystack maintainer (see "Becoming a maintainer" below).
|
||||
|
||||
### Expectations
|
||||
|
||||
Cozystack maintainers are expected to:
|
||||
|
||||
* Review pull requests, triage issues, and fix bugs in their areas of
|
||||
expertise, ensuring that all changes go through the project's code review
|
||||
and integration processes.
|
||||
* Monitor cncf-cozystack-* emails, the Cozystack Slack channels in Kubernetes
|
||||
and CNCF Slack workspaces, Telegram groups, and help out when possible.
|
||||
* Rapidly respond to any time-sensitive security release processes.
|
||||
* Attend Cozystack community meetings.
|
||||
|
||||
If a maintainer is no longer interested in or cannot perform the duties
|
||||
listed above, they should move themselves to emeritus status.
|
||||
If necessary, this can also occur through the decision-making process outlined below.
|
||||
|
||||
### Becoming a Maintainer
|
||||
|
||||
Anyone can become a Cozystack maintainer. Maintainers should be extremely
|
||||
proficient in cloud native technologies and/or Go; have relevant domain expertise;
|
||||
have the time and ability to meet the maintainer's expectations above;
|
||||
and demonstrate the ability to work with the existing maintainers and project processes.
|
||||
|
||||
To become a maintainer, start by expressing interest to existing maintainers.
|
||||
Existing maintainers will then ask you to demonstrate the qualifications above
|
||||
by contributing PRs, doing code reviews, and other such tasks under their guidance.
|
||||
After several months of working together, maintainers will decide whether to grant maintainer status.
|
||||
|
||||
## Project Decision-making Process
|
||||
|
||||
Ideally, all project decisions are resolved by consensus of maintainers and directors.
|
||||
If this is not possible, a vote will be called.
|
||||
The voting process is a simple majority in which each maintainer and director receives one vote.
|
||||
24
Makefile
24
Makefile
@@ -1,24 +1,15 @@
|
||||
.PHONY: manifests repos assets
|
||||
|
||||
build-deps:
|
||||
@command -V find docker skopeo jq gh helm > /dev/null
|
||||
@yq --version | grep -q "mikefarah" || (echo "mikefarah/yq is required" && exit 1)
|
||||
@tar --version | grep -q GNU || (echo "GNU tar is required" && exit 1)
|
||||
@sed --version | grep -q GNU || (echo "GNU sed is required" && exit 1)
|
||||
@awk --version | grep -q GNU || (echo "GNU awk is required" && exit 1)
|
||||
|
||||
build: build-deps
|
||||
build:
|
||||
make -C packages/apps/http-cache image
|
||||
make -C packages/apps/postgres image
|
||||
make -C packages/apps/mysql image
|
||||
make -C packages/apps/clickhouse image
|
||||
make -C packages/apps/kubernetes image
|
||||
make -C packages/extra/monitoring image
|
||||
make -C packages/system/cozystack-api image
|
||||
make -C packages/system/cozystack-controller image
|
||||
make -C packages/system/cilium image
|
||||
make -C packages/system/kubeovn image
|
||||
make -C packages/system/kubeovn-webhook image
|
||||
make -C packages/system/dashboard image
|
||||
make -C packages/system/kamaji image
|
||||
make -C packages/system/bucket image
|
||||
@@ -26,6 +17,10 @@ build: build-deps
|
||||
make -C packages/core/installer image
|
||||
make manifests
|
||||
|
||||
manifests:
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > manifests/cozystack-installer.yaml
|
||||
sed -i 's|@sha256:[^"]\+||' manifests/cozystack-installer.yaml
|
||||
|
||||
repos:
|
||||
rm -rf _out
|
||||
make -C packages/apps check-version-map
|
||||
@@ -36,20 +31,13 @@ repos:
|
||||
mkdir -p _out/logos
|
||||
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
|
||||
|
||||
|
||||
manifests:
|
||||
mkdir -p _out/assets
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||
|
||||
assets:
|
||||
make -C packages/core/installer/ assets
|
||||
|
||||
test:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
make -C packages/core/testing test-applications
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
upload_assets: manifests
|
||||
hack/upload-assets.sh
|
||||
|
||||
53
README.md
53
README.md
@@ -2,31 +2,30 @@
|
||||

|
||||
|
||||
[](https://opensource.org/)
|
||||
[](https://opensource.org/licenses/)
|
||||
[](https://cozystack.io/support/)
|
||||
[](https://github.com/cozystack/cozystack)
|
||||
[](https://github.com/cozystack/cozystack/releases/latest)
|
||||
[](https://github.com/cozystack/cozystack/graphs/contributors)
|
||||
[](https://opensource.org/licenses/)
|
||||
[](https://aenix.io/contact-us/#meet)
|
||||
[](https://aenix.io/cozystack/)
|
||||
[](https://github.com/aenix-io/cozystack)
|
||||
[](https://github.com/aenix-io/cozystack)
|
||||
|
||||
# Cozystack
|
||||
|
||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||
|
||||
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
With Cozystack, you can transform your bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters, Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
|
||||
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||
You can use Cozystack to build your own cloud or to provide a cost-effective development environments.
|
||||
|
||||
## Use-Cases
|
||||
|
||||
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||
You can use Cozystack as a backend for a public cloud
|
||||
* [**Using Cozystack to build public cloud**](https://cozystack.io/docs/use-cases/public-cloud/)
|
||||
You can use Cozystack as backend for a public cloud
|
||||
|
||||
* [**Using Cozystack to build a private cloud**](https://cozystack.io/docs/guides/use-cases/private-cloud/)
|
||||
You can use Cozystack as a platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
* [**Using Cozystack to build private cloud**](https://cozystack.io/docs/use-cases/private-cloud/)
|
||||
You can use Cozystack as platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
|
||||
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
* [**Using Cozystack as Kubernetes distribution**](https://cozystack.io/docs/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as Kubernetes distribution for Bare Metal
|
||||
|
||||
## Screenshot
|
||||
|
||||
@@ -34,32 +33,32 @@ You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is located on the [cozystack.io](https://cozystack.io) website.
|
||||
The documentation is located on official [cozystack.io](https://cozystack.io) website.
|
||||
|
||||
Read the [Getting Started](https://cozystack.io/docs/getting-started/) section for a quick start.
|
||||
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
||||
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/operations/troubleshooting/) and work your way through the process that we've outlined.
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/troubleshooting/), and work your way through the process that we've outlined.
|
||||
|
||||
## Versioning
|
||||
|
||||
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
||||
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/cozystack/cozystack/releases) section.
|
||||
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/aenix-io/cozystack/releases) section.
|
||||
|
||||
- [Roadmap](https://cozystack.io/docs/roadmap/)
|
||||
- [Roadmap](https://github.com/orgs/aenix-io/projects/2)
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are highly appreciated and very welcomed!
|
||||
|
||||
In case of bugs, please check if the issue has already been opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||
If it isn't, you can open a new one. A detailed report will help us replicate it, assess it, and work on a fix.
|
||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/aenix-io/cozystack/issues) section.
|
||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||
|
||||
You can express your intention to on the fix on your own.
|
||||
You can express your intention in working on the fix on your own.
|
||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||
|
||||
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/aenix-io/cozystack/discussions/categories/feature-requests).
|
||||
|
||||
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
|
||||
## License
|
||||
|
||||
@@ -68,4 +67,8 @@ The code is provided as-is with no warranties.
|
||||
|
||||
## Commercial Support
|
||||
|
||||
A list of companies providing commercial support for this project can be found on [official site](https://cozystack.io/support/).
|
||||
[**Ænix**](https://aenix.io) offers enterprise-grade support, available 24/7.
|
||||
|
||||
We provide all types of assistance, including consultations, development of missing features, design, assistance with installation, and integration.
|
||||
|
||||
[Contact us](https://aenix.io/contact/)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
API rule violation: list_type_missing,github.com/cozystack/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
||||
API rule violation: list_type_missing,github.com/aenix-io/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource
|
||||
|
||||
@@ -19,7 +19,7 @@ package main
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/cozystack/cozystack/pkg/cmd/server"
|
||||
"github.com/aenix-io/cozystack/pkg/cmd/server"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/component-base/cli"
|
||||
)
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func main() {
|
||||
addr := flag.String("address", ":8123", "Address to listen on")
|
||||
dir := flag.String("dir", "/cozystack/assets", "Directory to serve files from")
|
||||
flag.Parse()
|
||||
|
||||
absDir, err := filepath.Abs(*dir)
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting absolute path for %s: %v", *dir, err)
|
||||
}
|
||||
|
||||
fs := http.FileServer(http.Dir(absDir))
|
||||
http.Handle("/", fs)
|
||||
|
||||
log.Printf("Server starting on %s, serving directory %s", *addr, absDir)
|
||||
|
||||
err = http.ListenAndServe(*addr, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Server failed to start: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -36,11 +36,9 @@ import (
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
"github.com/cozystack/cozystack/internal/controller"
|
||||
"github.com/cozystack/cozystack/internal/telemetry"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
cozystackiov1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
"github.com/aenix-io/cozystack/internal/controller"
|
||||
"github.com/aenix-io/cozystack/internal/telemetry"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
@@ -53,7 +51,6 @@ func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(helmv2.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
@@ -181,23 +178,6 @@ func main() {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.WorkloadReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.TenantHelmReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
1602
dashboards/control-plane/kube-etcd3.json
Normal file
1602
dashboards/control-plane/kube-etcd3.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -450,7 +450,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -520,7 +520,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -590,7 +590,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -660,7 +660,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
@@ -1128,7 +1128,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1143,7 +1143,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -1527,7 +1527,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1542,7 +1542,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -1909,7 +1909,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2037,7 +2037,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2160,7 +2160,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2288,7 +2288,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
|
||||
@@ -684,7 +684,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -710,7 +710,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -723,7 +723,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -736,7 +736,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -762,7 +762,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -786,7 +786,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -798,7 +798,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -810,7 +810,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -848,7 +848,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -860,7 +860,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1315,7 +1315,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1488,7 +1488,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -1502,7 +1502,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -1642,7 +1642,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -1779,7 +1779,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2095,7 +2095,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
@@ -2109,7 +2109,7 @@
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2295,7 +2295,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -2306,7 +2306,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2468,7 +2468,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -2653,7 +2653,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
@@ -2666,7 +2666,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
@@ -2679,7 +2679,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
@@ -2692,7 +2692,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
@@ -2705,7 +2705,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2837,7 +2837,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -2974,7 +2974,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -3290,56 +3290,56 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"POD\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3834,7 +3834,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -3972,7 +3972,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
|
||||
@@ -656,7 +656,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -680,7 +680,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -692,7 +692,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -704,7 +704,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -728,7 +728,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -740,7 +740,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -752,7 +752,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -764,7 +764,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -776,7 +776,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -814,7 +814,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -826,7 +826,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -877,7 +877,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1475,7 +1475,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -1646,7 +1646,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -1657,7 +1657,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1798,7 +1798,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -1939,7 +1939,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2257,28 +2257,28 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2458,7 +2458,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2470,7 +2470,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2622,7 +2622,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -2799,14 +2799,14 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2814,7 +2814,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2822,14 +2822,14 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2955,7 +2955,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -3091,7 +3091,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -3408,14 +3408,14 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3423,7 +3423,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3431,35 +3431,35 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3910,7 +3910,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -4049,7 +4049,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
|
||||
@@ -869,7 +869,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -878,7 +878,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -895,7 +895,7 @@
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -903,7 +903,7 @@
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -919,7 +919,7 @@
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -935,7 +935,7 @@
|
||||
"refId": "I"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -943,7 +943,7 @@
|
||||
"refId": "J"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -968,7 +968,7 @@
|
||||
"refId": "M"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -977,7 +977,7 @@
|
||||
"refId": "N"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -1449,7 +1449,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -1616,7 +1616,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -1627,7 +1627,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1764,7 +1764,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -1901,7 +1901,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -2210,7 +2210,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2218,21 +2218,21 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2407,7 +2407,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2419,7 +2419,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2572,7 +2572,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -2754,14 +2754,14 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2769,7 +2769,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2777,14 +2777,14 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2910,7 +2910,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -3046,7 +3046,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -3370,14 +3370,14 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3385,7 +3385,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3393,35 +3393,35 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3873,7 +3873,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -4008,7 +4008,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
|
||||
@@ -686,7 +686,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -759,7 +759,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -847,7 +847,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -860,7 +860,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -899,7 +899,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1503,7 +1503,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1669,7 +1669,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1681,7 +1681,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1820,7 +1820,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2269,7 +2269,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
@@ -2476,7 +2476,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2488,7 +2488,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2639,7 +2639,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2816,7 +2816,7 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2824,28 +2824,28 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2974,7 +2974,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3110,7 +3110,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3431,7 +3431,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3439,7 +3439,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3447,28 +3447,28 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
@@ -3482,7 +3482,7 @@
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3930,7 +3930,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ container }}",
|
||||
@@ -4068,7 +4068,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ container }}",
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
166
docs/release.md
166
docs/release.md
@@ -1,166 +0,0 @@
|
||||
# Release Workflow
|
||||
|
||||
This document describes Cozystack’s release process.
|
||||
|
||||
## Introduction
|
||||
|
||||
Cozystack uses a staged release process to ensure stability and flexibility during development.
|
||||
|
||||
There are three types of releases:
|
||||
|
||||
- **Release Candidates (RC)** – Preview versions (e.g., `v0.42.0-rc.1`) used for final testing and validation.
|
||||
- **Regular Releases** – Final versions (e.g., `v0.42.0`) that are feature-complete and thoroughly tested.
|
||||
- **Patch Releases** – Bugfix-only updates (e.g., `v0.42.1`) made after a stable release, based on a dedicated release branch.
|
||||
|
||||
Each type plays a distinct role in delivering reliable and tested updates while allowing ongoing development to continue smoothly.
|
||||
|
||||
## Release Candidates
|
||||
|
||||
Release candidates are Cozystack versions that introduce new features and are published before a stable release.
|
||||
Their purpose is to help validate stability before finalizing a new feature release.
|
||||
They allow for final rounds of testing and bug fixes without freezing development.
|
||||
|
||||
Release candidates are given numbers `vX.Y.0-rc.N`, for example, `v0.42.0-rc.1`.
|
||||
They are created directly in the `main` branch.
|
||||
An RC is typically tagged when all major features for the upcoming release have been merged into main and the release enters its testing phase.
|
||||
However, new features and changes can still be added before the regular release `vX.Y.0`.
|
||||
|
||||
Each RC contributes to a cumulative set of release notes that will be finalized when `vX.Y.0` is released.
|
||||
After testing, if no critical issues remain, the regular release (`vX.Y.0`) is tagged from the last RC or a later commit in main.
|
||||
This begins the regular release process, creates a dedicated `release-X.Y` branch, and opens the way for patch releases.
|
||||
|
||||
## Regular Releases
|
||||
|
||||
When making a regular release, we tag the latest RC or a subsequent minimal-change commit as `vX.Y.0`.
|
||||
In this explanation, we'll use version `v0.42.0` as an example:
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
A regular release sequence starts in the following way:
|
||||
|
||||
1. Maintainer tags a commit in `main` with `v0.42.0` and pushes it to GitHub.
|
||||
2. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.0`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.0`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.0` with updated digests, pushes it to the new branch `release-0.42.0`, and opens a PR to `main`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.0` and uploads them to the release draft page.
|
||||
3. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Pull Request"
|
||||
```
|
||||
|
||||
When testing and editing are completed, the sequence goes on.
|
||||
|
||||
4. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.0`.
|
||||
5. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.0` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
6. The maintainer can now announce the release to the community.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Release v0.42.0" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
## Patch Releases
|
||||
|
||||
Making a patch release has a lot in common with a regular release, with a couple of differences:
|
||||
|
||||
* A release branch is used instead of `main`
|
||||
* Patch commits are cherry-picked to the release branch.
|
||||
* A pull request is opened against the release branch.
|
||||
|
||||
|
||||
Let's assume that we've released `v0.42.0` and that development is ongoing.
|
||||
We have introduced a couple of new features and some fixes to features that we have released
|
||||
in `v0.42.0`.
|
||||
|
||||
Once problems were found and fixed, a patch release is due.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
```
|
||||
|
||||
|
||||
1. The maintainer creates a release branch, `release-0.42,` and cherry-picks patch commits from `main` to `release-0.42`.
|
||||
These must be only patches to features that were present in version `v0.42.0`.
|
||||
|
||||
Cherry-picking can be done as soon as each patch is merged into `main`,
|
||||
or directly before the release.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2"
|
||||
```
|
||||
|
||||
When all relevant patch commits are cherry-picked, the branch is ready for release.
|
||||
|
||||
2. The maintainer tags the `HEAD` commit of branch `release-0.42` as `v0.42.1` and then pushes it to GitHub.
|
||||
3. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.1`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.1`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.1` with updated digests, pushes it to the new branch `release-0.42.1`, and opens a PR to `release-0.42`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.1` and uploads them to the release draft page.
|
||||
4. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2" tag: "v0.42.1"
|
||||
branch release-0.42.1
|
||||
commit id: "Prepare release v0.42.1"
|
||||
checkout release-0.42
|
||||
merge release-0.42.1 id: "Pull request"
|
||||
```
|
||||
|
||||
Finally, when release is confirmed, the release sequence goes on.
|
||||
|
||||
5. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.1`.
|
||||
6. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.1` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
7. The maintainer can now announce the release to the community.
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
// This is a generated file. Do not edit directly.
|
||||
|
||||
module github.com/cozystack/cozystack
|
||||
module github.com/aenix-io/cozystack
|
||||
|
||||
go 1.23.0
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ fix_d8() {
|
||||
}
|
||||
|
||||
swap_pvc_overview() {
|
||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||
}
|
||||
|
||||
deprectaed_remove_faq() {
|
||||
@@ -68,7 +68,7 @@ modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/namespace/
|
||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhost_detail.json
|
||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhosts.json
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/control-plane-status.json
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd.json #TODO
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd3.json #TODO
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/deprecated-resources.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/ntp.json #TODO
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/nodes.json
|
||||
@@ -78,10 +78,6 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/pod.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespaces.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespace.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/capacity-planning/capacity-planning.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-control-plane.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kafka/strimzi-kafka.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//goldpinger/goldpinger.json
|
||||
EOT
|
||||
|
||||
|
||||
@@ -113,3 +109,4 @@ done <<\EOT
|
||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
|
||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
|
||||
EOT
|
||||
|
||||
|
||||
165
hack/e2e.application.sh
Executable file
165
hack/e2e.application.sh
Executable file
@@ -0,0 +1,165 @@
|
||||
#!/bin/bash
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
RESET='\033[0m'
|
||||
YELLOW='\033[0;33m'
|
||||
|
||||
|
||||
ROOT_NS="tenant-root"
|
||||
TEST_TENANT="tenant-e2e"
|
||||
|
||||
values_base_path="/hack/testdata/"
|
||||
checks_base_path="/hack/testdata/"
|
||||
|
||||
function delete_hr() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$release_name" == "tenant-e2e" ]]; then
|
||||
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
kubectl delete helmrelease $release_name -n $namespace
|
||||
}
|
||||
|
||||
function install_helmrelease() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local chart_path="$3"
|
||||
local repo_name="$4"
|
||||
local repo_ns="$5"
|
||||
local values_file="$6"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$chart_path" ]]; then
|
||||
echo -e "${RED}Error: Chart path name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "$values_file" && -f "$values_file" ]]; then
|
||||
local values_section
|
||||
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
|
||||
fi
|
||||
|
||||
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
|
||||
{
|
||||
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
|
||||
echo "kind: HelmRelease"
|
||||
echo "metadata:"
|
||||
echo " labels:"
|
||||
echo " cozystack.io/ui: \"true\""
|
||||
echo " name: \"$release_name\""
|
||||
echo " namespace: \"$namespace\""
|
||||
echo "spec:"
|
||||
echo " chart:"
|
||||
echo " spec:"
|
||||
echo " chart: \"$chart_path\""
|
||||
echo " reconcileStrategy: Revision"
|
||||
echo " sourceRef:"
|
||||
echo " kind: HelmRepository"
|
||||
echo " name: \"$repo_name\""
|
||||
echo " namespace: \"$repo_ns\""
|
||||
echo " version: '*'"
|
||||
echo " interval: 1m0s"
|
||||
echo " timeout: 5m0s"
|
||||
[[ -n "$values_section" ]] && echo "$values_section"
|
||||
} > "$helmrelease_file"
|
||||
|
||||
kubectl apply -f "$helmrelease_file"
|
||||
|
||||
rm -f "$helmrelease_file"
|
||||
}
|
||||
|
||||
function install_tenant (){
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local values_file="${values_base_path}tenant/values.yaml"
|
||||
local repo_name="cozystack-apps"
|
||||
local repo_ns="cozy-public"
|
||||
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
|
||||
}
|
||||
|
||||
function make_extra_checks(){
|
||||
local checks_file="$1"
|
||||
echo "after exec make $checks_file"
|
||||
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
|
||||
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
function check_helmrelease_status() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local checks_file="$3"
|
||||
local timeout=300 # Timeout in seconds
|
||||
local interval=5 # Interval between checks in seconds
|
||||
local elapsed=0
|
||||
|
||||
|
||||
while [[ $elapsed -lt $timeout ]]; do
|
||||
local status_output
|
||||
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
|
||||
|
||||
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
|
||||
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
|
||||
make_extra_checks "$checks_file"
|
||||
delete_hr $release_name $namespace
|
||||
return 0
|
||||
elif [[ "$status_output" == "InstallFailed" ]]; then
|
||||
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
|
||||
exit 1
|
||||
else
|
||||
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
|
||||
fi
|
||||
|
||||
sleep "$interval"
|
||||
elapsed=$((elapsed + interval))
|
||||
done
|
||||
|
||||
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
chart_name="$1"
|
||||
|
||||
if [ -z "$chart_name" ]; then
|
||||
echo -e "${RED}No chart name provided. Exiting...${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
checks_file="${checks_base_path}${chart_name}/check.sh"
|
||||
repo_name="cozystack-apps"
|
||||
repo_ns="cozy-public"
|
||||
release_name="$chart_name-e2e"
|
||||
values_file="${values_base_path}${chart_name}/values.yaml"
|
||||
|
||||
install_tenant $TEST_TENANT $ROOT_NS
|
||||
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
|
||||
|
||||
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
|
||||
|
||||
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
|
||||
check_helmrelease_status $release_name $TEST_TENANT $checks_file
|
||||
37
hack/e2e.sh
37
hack/e2e.sh
@@ -60,8 +60,7 @@ done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
|
||||
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
wget https://github.com/aenix-io/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
@@ -85,9 +84,8 @@ done
|
||||
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i \
|
||||
-netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/data.img,if=virtio,format=raw \
|
||||
@@ -115,15 +113,10 @@ machine:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
@@ -233,18 +226,11 @@ timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for all HelmReleases to be installed
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
failed_hrs=$(kubectl get hr -A | grep -v True)
|
||||
if [ -n "$(echo "$failed_hrs" | grep -v NAME)" ]; then
|
||||
printf 'Failed HelmReleases:\n%s\n' "$failed_hrs" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||
@@ -327,12 +313,7 @@ kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"s
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||
|
||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||
fi
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
||||
|
||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||
"dashboard": true
|
||||
@@ -347,7 +328,7 @@ kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
@@ -366,5 +347,5 @@ kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
file=versions_map
|
||||
|
||||
charts=$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")')
|
||||
|
||||
# <chart> <version> <commit>
|
||||
new_map=$(
|
||||
for chart in $charts; do
|
||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' "$chart/Chart.yaml"
|
||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' $chart/Chart.yaml
|
||||
done
|
||||
)
|
||||
|
||||
@@ -16,46 +15,47 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||
|
||||
# search accross all tags sorted by version
|
||||
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { new_map[$1 " " $2] = $3; next } { if (!($1 " " $2 in new_map)) print $1, $2, $3}' - $file)
|
||||
|
||||
resolved_miss_map=$(
|
||||
echo "$miss_map" | while read -r chart version commit; do
|
||||
# if version is found in HEAD, it's HEAD
|
||||
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||
echo "$chart $version HEAD"
|
||||
continue
|
||||
fi
|
||||
|
||||
# if commit is not HEAD, check if it's valid
|
||||
if [ "$commit" != "HEAD" ]; then
|
||||
if [ "$(git show "${commit}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" != "${version}" ]; then
|
||||
echo "Commit $commit for $chart $version is not valid" >&2
|
||||
exit 1
|
||||
echo "$miss_map" | while read chart version commit; do
|
||||
if [ "$commit" = HEAD ]; then
|
||||
line=$(awk '/^version:/ {print NR; exit}' "./$chart/Chart.yaml")
|
||||
change_commit=$(git --no-pager blame -L"$line",+1 -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
|
||||
if [ "$change_commit" = "00000000" ]; then
|
||||
# Not committed yet, use previous commit
|
||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
||||
commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
if [ $(echo $commit | cut -c1) = "^" ]; then
|
||||
# Previous commit not exists
|
||||
commit=$(echo $commit | cut -c2-)
|
||||
fi
|
||||
else
|
||||
# Committed, but version_map wasn't updated
|
||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
||||
change_commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
if [ $(echo $change_commit | cut -c1) = "^" ]; then
|
||||
# Previous commit not exists
|
||||
commit=$(echo $change_commit | cut -c2-)
|
||||
else
|
||||
commit=$(git describe --always "$change_commit~1")
|
||||
fi
|
||||
fi
|
||||
|
||||
commit=$(git rev-parse --short "$commit")
|
||||
echo "$chart $version $commit"
|
||||
continue
|
||||
fi
|
||||
|
||||
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||
found_tag=""
|
||||
for tag in $search_commits; do
|
||||
if [ "$(git show "${tag}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" = "${version}" ]; then
|
||||
found_tag=$(git rev-parse --short "${tag}")
|
||||
break
|
||||
# Check if the commit belongs to the main branch
|
||||
if ! git merge-base --is-ancestor "$commit" main; then
|
||||
# Find the closest parent commit that belongs to main
|
||||
commit_in_main=$(git log --pretty=format:"%h" main -- "$chart" | head -n 1)
|
||||
if [ -n "$commit_in_main" ]; then
|
||||
commit="$commit_in_main"
|
||||
else
|
||||
# No valid commit found in main branch for $chart, skipping..."
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$found_tag" ]; then
|
||||
echo "Can't find $chart $version in any version tag, removing it" >&2
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "$chart $version $found_tag"
|
||||
echo "$chart $version $commit"
|
||||
done
|
||||
)
|
||||
|
||||
|
||||
1
hack/testdata/http-cache/check.sh
vendored
Normal file
1
hack/testdata/http-cache/check.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
return 0
|
||||
2
hack/testdata/http-cache/values.yaml
vendored
Normal file
2
hack/testdata/http-cache/values.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
endpoints:
|
||||
- 8.8.8.8:443
|
||||
1
hack/testdata/kubernetes/check.sh
vendored
Normal file
1
hack/testdata/kubernetes/check.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
return 0
|
||||
62
hack/testdata/kubernetes/values.yaml
vendored
Normal file
62
hack/testdata/kubernetes/values.yaml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
##
|
||||
host: ""
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
storageClass: replicated
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
##
|
||||
nodeGroups:
|
||||
md0:
|
||||
minReplicas: 0
|
||||
maxReplicas: 10
|
||||
instanceType: "u1.medium"
|
||||
ephemeralStorage: 20Gi
|
||||
roles:
|
||||
- ingress-nginx
|
||||
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
## @section Cluster Addons
|
||||
##
|
||||
addons:
|
||||
|
||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
||||
##
|
||||
certManager:
|
||||
## @param addons.certManager.enabled Enables the cert-manager
|
||||
## @param addons.certManager.valuesOverride Custom values to override
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
||||
## e.g:
|
||||
## hosts:
|
||||
## - example.org
|
||||
## - foo.example.net
|
||||
##
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
## @param addons.fluxcd.enabled Enables Flux CD
|
||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
1
hack/testdata/nats/check.sh
vendored
Normal file
1
hack/testdata/nats/check.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
return 0
|
||||
10
hack/testdata/nats/values.yaml
vendored
Normal file
10
hack/testdata/nats/values.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param replicas Persistent Volume size for NATS
|
||||
## @param storageClass StorageClass used to store the data
|
||||
##
|
||||
external: false
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
1
hack/testdata/tenant/check.sh
vendored
Normal file
1
hack/testdata/tenant/check.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
return 0
|
||||
6
hack/testdata/tenant/values.yaml
vendored
Normal file
6
hack/testdata/tenant/values.yaml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
host: ""
|
||||
etcd: false
|
||||
monitoring: false
|
||||
ingress: false
|
||||
seaweedfs: false
|
||||
isolated: true
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
version=${VERSION:-$(git describe --tags)}
|
||||
|
||||
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/kernel-amd64
|
||||
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
cozystackiov1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
e "errors"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
type TenantHelmReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
hr := &helmv2.HelmRelease{}
|
||||
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "unable to fetch HelmRelease")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(hr.Name, "tenant-") {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.Conditions) == 0 || hr.Status.Conditions[0].Type != "Ready" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.History) == 0 {
|
||||
logger.Info("no history in HelmRelease status", "name", hr.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if hr.Status.History[0].Status != "deployed" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
newDigest := hr.Status.History[0].Digest
|
||||
var hrList helmv2.HelmReleaseList
|
||||
childNamespace := getChildNamespace(hr.Namespace, hr.Name)
|
||||
if childNamespace == "tenant-root" && hr.Name == "tenant-root" {
|
||||
if hr.Spec.Values == nil {
|
||||
logger.Error(e.New("hr.Spec.Values is nil"), "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
err := annotateTenantRootNs(*hr.Spec.Values, r.Client)
|
||||
if err != nil {
|
||||
logger.Error(err, "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Info("namespace 'tenant-root' annotated")
|
||||
}
|
||||
|
||||
if err := r.List(ctx, &hrList, client.InNamespace(childNamespace)); err != nil {
|
||||
logger.Error(err, "unable to list HelmReleases in namespace", "namespace", hr.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, item := range hrList.Items {
|
||||
if item.Name == hr.Name {
|
||||
continue
|
||||
}
|
||||
oldDigest := item.GetAnnotations()["cozystack.io/tenant-config-digest"]
|
||||
if oldDigest == newDigest {
|
||||
continue
|
||||
}
|
||||
patchTarget := item.DeepCopy()
|
||||
|
||||
if patchTarget.Annotations == nil {
|
||||
patchTarget.Annotations = map[string]string{}
|
||||
}
|
||||
ts := time.Now().Format(time.RFC3339Nano)
|
||||
|
||||
patchTarget.Annotations["cozystack.io/tenant-config-digest"] = newDigest
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/forceAt"] = ts
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/requestedAt"] = ts
|
||||
|
||||
patch := client.MergeFrom(item.DeepCopy())
|
||||
if err := r.Patch(ctx, patchTarget, patch); err != nil {
|
||||
logger.Error(err, "failed to patch HelmRelease", "name", patchTarget.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("patched HelmRelease with new digest", "name", patchTarget.Name, "digest", newDigest, "version", hr.Status.History[0].Version)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&helmv2.HelmRelease{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getChildNamespace(currentNamespace, hrName string) string {
|
||||
tenantName := strings.TrimPrefix(hrName, "tenant-")
|
||||
|
||||
switch {
|
||||
case currentNamespace == "tenant-root" && hrName == "tenant-root":
|
||||
// 1) root tenant inside root namespace
|
||||
return "tenant-root"
|
||||
|
||||
case currentNamespace == "tenant-root":
|
||||
// 2) any other tenant in root namespace
|
||||
return fmt.Sprintf("tenant-%s", tenantName)
|
||||
|
||||
default:
|
||||
// 3) tenant in a dedicated namespace
|
||||
return fmt.Sprintf("%s-%s", currentNamespace, tenantName)
|
||||
}
|
||||
}
|
||||
|
||||
func annotateTenantRootNs(values apiextensionsv1.JSON, c client.Client) error {
|
||||
var data map[string]interface{}
|
||||
if err := yaml.Unmarshal(values.Raw, &data); err != nil {
|
||||
return fmt.Errorf("failed to parse HelmRelease values: %w", err)
|
||||
}
|
||||
|
||||
host, ok := data["host"].(string)
|
||||
if !ok || host == "" {
|
||||
return fmt.Errorf("host field not found or not a string")
|
||||
}
|
||||
|
||||
var ns corev1.Namespace
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: "tenant-root"}, &ns); err != nil {
|
||||
return fmt.Errorf("failed to get namespace tenant-root: %w", err)
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.Annotations = map[string]string{}
|
||||
}
|
||||
ns.Annotations["namespace.cozystack.io/host"] = host
|
||||
|
||||
if err := c.Update(context.TODO(), &ns); err != nil {
|
||||
return fmt.Errorf("failed to update namespace: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
type WorkloadReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
err := r.Get(ctx, req.NamespacedName, w)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "Unable to fetch Workload")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// it's being deleted, nothing to handle
|
||||
if w.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
|
||||
if t == nil {
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
if err == nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// error getting object but not 404 -- requeue
|
||||
if !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
// Watch WorkloadMonitor objects
|
||||
For(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
switch {
|
||||
case strings.HasPrefix(w.Name, "pvc-"):
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "svc-"):
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "pod-"):
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pod-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
var obj client.Object
|
||||
return obj
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "unprefixed-name"
|
||||
obj := getMonitoredObject(w)
|
||||
if obj != nil {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodMonitoredObject(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "pod-mypod"
|
||||
obj := getMonitoredObject(w)
|
||||
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -20,7 +19,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
cozyv1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
@@ -34,17 +33,6 @@ type WorkloadMonitorReconciler struct {
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch
|
||||
|
||||
// isServiceReady checks if the service has an external IP bound
|
||||
func (r *WorkloadMonitorReconciler) isServiceReady(svc *corev1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||
}
|
||||
|
||||
// isPVCReady checks if the PVC is bound
|
||||
func (r *WorkloadMonitorReconciler) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool {
|
||||
return pvc.Status.Phase == corev1.ClaimBound
|
||||
}
|
||||
|
||||
// isPodReady checks if the Pod is in the Ready condition.
|
||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||
@@ -100,110 +88,6 @@ func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
||||
obj.SetOwnerReferences(owners)
|
||||
}
|
||||
|
||||
// reconcileServiceForMonitor creates or updates a Workload object for the given Service and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
svc corev1.Service,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%s", svc.Name),
|
||||
Namespace: svc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
quantity := resource.MustParse("0")
|
||||
|
||||
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||
if ing.IP != "" {
|
||||
quantity.Add(resource.MustParse("1"))
|
||||
}
|
||||
}
|
||||
|
||||
var resourceLabel string
|
||||
if svc.Annotations != nil {
|
||||
var ok bool
|
||||
resourceLabel, ok = svc.Annotations["metallb.universe.tf/ip-allocated-from-pool"]
|
||||
if !ok {
|
||||
resourceLabel = "default"
|
||||
}
|
||||
}
|
||||
resourceLabel = fmt.Sprintf("%s.ipaddresspool.metallb.io/requests.ipaddresses", resourceLabel)
|
||||
resources[resourceLabel] = quantity
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = svc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isServiceReady(&svc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePVCForMonitor creates or updates a Workload object for the given PVC and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
pvc corev1.PersistentVolumeClaim,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pvc-%s", pvc.Name),
|
||||
Namespace: pvc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||
storageClass := "default"
|
||||
if pvc.Spec.StorageClassName != nil || *pvc.Spec.StorageClassName == "" {
|
||||
storageClass = *pvc.Spec.StorageClassName
|
||||
}
|
||||
resourceLabel := fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.%s", storageClass, resourceName.String())
|
||||
resources[resourceLabel] = resourceQuantity
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = pvc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isPVCReady(&pvc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
ctx context.Context,
|
||||
@@ -212,12 +96,15 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// totalResources will store the sum of all container resource requests
|
||||
// Combine both init containers and normal containers to sum resources properly
|
||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||
|
||||
// totalResources will store the sum of all container resource limits
|
||||
totalResources := make(map[string]resource.Quantity)
|
||||
|
||||
// Iterate over all containers to aggregate their requests
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, qty := range container.Resources.Requests {
|
||||
// Iterate over all containers to aggregate their Limits
|
||||
for _, container := range combinedContainers {
|
||||
for name, qty := range container.Resources.Limits {
|
||||
if existing, exists := totalResources[name.String()]; exists {
|
||||
existing.Add(qty)
|
||||
totalResources[name.String()] = existing
|
||||
@@ -246,7 +133,7 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%s", pod.Name),
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
}
|
||||
@@ -318,45 +205,6 @@ func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
}
|
||||
}
|
||||
|
||||
pvcList := &corev1.PersistentVolumeClaimList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
pvcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list PVCs for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, pvc := range pvcList.Items {
|
||||
if err := r.reconcilePVCForMonitor(ctx, monitor, pvc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for PVC", "PVC", pvc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
svcList := &corev1.ServiceList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
svcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list Services for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, svc := range svcList.Items {
|
||||
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
|
||||
continue
|
||||
}
|
||||
if err := r.reconcileServiceForMonitor(ctx, monitor, svc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for Service", "Service", svc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Update WorkloadMonitor status based on observed pods
|
||||
monitor.Status.ObservedReplicas = observedReplicas
|
||||
monitor.Status.AvailableReplicas = availableReplicas
|
||||
@@ -385,51 +233,41 @@ func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||
Watches(
|
||||
&corev1.Pod{},
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.Pod{}, r.Client)),
|
||||
).
|
||||
// Watch PVCs as well
|
||||
Watches(
|
||||
&corev1.PersistentVolumeClaim{},
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.PersistentVolumeClaim{}, r.Client)),
|
||||
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
pod, ok := obj.(*corev1.Pod)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}),
|
||||
).
|
||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||
Owns(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
concrete, ok := obj.(T)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := c.List(ctx, &monitorList, client.InNamespace(concrete.GetNamespace())); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
labels := concrete.GetLabels()
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if labelVal, exists := labels[k]; !exists || labelVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
cozyv1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// Collector handles telemetry data collection and sending
|
||||
|
||||
105
manifests/cozystack-installer.yaml
Normal file
105
manifests/cozystack-installer.yaml
Normal file
@@ -0,0 +1,105 @@
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cozy-system
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cozystack
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8123
|
||||
selector:
|
||||
app: cozystack
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cozystack
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cozystack
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: cozystack
|
||||
containers:
|
||||
- name: cozystack
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.23.1"
|
||||
env:
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: localhost
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "7445"
|
||||
- name: K8S_AWAIT_ELECTION_ENABLED
|
||||
value: "1"
|
||||
- name: K8S_AWAIT_ELECTION_NAME
|
||||
value: cozystack
|
||||
- name: K8S_AWAIT_ELECTION_LOCK_NAME
|
||||
value: cozystack
|
||||
- name: K8S_AWAIT_ELECTION_LOCK_NAMESPACE
|
||||
value: cozy-system
|
||||
- name: K8S_AWAIT_ELECTION_IDENTITY
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: darkhttpd
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.23.1"
|
||||
command:
|
||||
- /usr/bin/darkhttpd
|
||||
- /cozystack/assets
|
||||
- --port
|
||||
- "8123"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8123
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
- key: "node.cilium.io/agent-not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
@@ -1,3 +0,0 @@
|
||||
# S3 bucket
|
||||
|
||||
## Parameters
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '>= 0.0.0-0'
|
||||
version: '*'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
{}
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.7.0
|
||||
version: 0.6.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -14,7 +14,6 @@ image:
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/clickhouse-backup.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG))@$$(yq e '."containerimage.digest"' images/clickhouse-backup.json -o json -r)" \
|
||||
> images/clickhouse-backup.tag
|
||||
|
||||
@@ -36,15 +36,13 @@ more details:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.1@sha256:7a99cabdfd541f863aa5d1b2f7b49afd39838fb94c8448986634a1dc9050751c
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -121,11 +121,6 @@ spec:
|
||||
containers:
|
||||
- name: clickhouse
|
||||
image: clickhouse/clickhouse-server:24.9.2.42
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 16 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data-volume-template
|
||||
mountPath: /var/lib/clickhouse
|
||||
|
||||
@@ -17,10 +17,3 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-credentials
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: clickhouse
|
||||
type: clickhouse
|
||||
selector:
|
||||
clickhouse.altinity.com/chi: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -76,16 +76,6 @@
|
||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -46,16 +46,3 @@ backup:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.4.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -21,17 +21,15 @@
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.8.0@sha256:6a8ec7e7052f2d02ec5457d7cbac6ee52b3ed93a883988a192d1394fc7c88117
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -17,10 +17,3 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-credentials
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -6,20 +6,10 @@ metadata:
|
||||
spec:
|
||||
instances: {{ .Values.replicas }}
|
||||
enableSuperuserAccess: true
|
||||
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
||||
{{- if $configMap }}
|
||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
||||
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: ferretdb
|
||||
type: ferretdb
|
||||
selector:
|
||||
app: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -81,16 +81,6 @@
|
||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -48,16 +48,3 @@ backup:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.4.0
|
||||
version: 0.3.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -13,7 +13,6 @@ image-nginx:
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/nginx-cache.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))@$$(yq e '."containerimage.digest"' images/nginx-cache.json -o json -r)" \
|
||||
> images/nginx-cache.tag
|
||||
|
||||
@@ -60,17 +60,13 @@ VTS module shows wrong upstream resonse time
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||
| `haproxy.resources` | Resources | `{}` |
|
||||
| `haproxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `nginx.resources` | Resources | `{}` |
|
||||
| `nginx.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ----------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:4e1f5153d2673a399b315252238f4dc3eb5d6c59295aef594691710cc5b72eb4
|
||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:a3c25199acb8e8426e6952658ccc4acaadb50fe2cfa6359743b64e5166b3fc70
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -33,11 +33,6 @@ spec:
|
||||
containers:
|
||||
- image: haproxy:latest
|
||||
name: haproxy
|
||||
{{- if .Values.haproxy.resources }}
|
||||
resources: {{- toYaml .Values.haproxy.resources | nindent 10 }}
|
||||
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.haproxy.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
|
||||
@@ -52,11 +52,6 @@ spec:
|
||||
shareProcessNamespace: true
|
||||
containers:
|
||||
- name: nginx
|
||||
{{- if $.Values.nginx.resources }}
|
||||
resources: {{- toYaml $.Values.nginx.resources | nindent 10 }}
|
||||
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" $.Values.nginx.resourcesPreset "Release" $.Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
@@ -88,13 +83,6 @@ spec:
|
||||
- name: reloader
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
command: ["/usr/bin/nginx-reloader.sh"]
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
#command: ["sleep", "infinity"]
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx/nginx.conf
|
||||
|
||||
@@ -24,16 +24,6 @@
|
||||
"type": "number",
|
||||
"description": "Number of HAProxy replicas",
|
||||
"default": 2
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -44,16 +34,6 @@
|
||||
"type": "number",
|
||||
"description": "Number of Nginx replicas",
|
||||
"default": 2
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -12,32 +12,8 @@ size: 10Gi
|
||||
storageClass: ""
|
||||
haproxy:
|
||||
replicas: 2
|
||||
## @param haproxy.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param haproxy.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
nginx:
|
||||
replicas: 2
|
||||
## @param nginx.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param nginx.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
|
||||
## @section Configuration parameters
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.3.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -4,19 +4,15 @@
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
||||
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
||||
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
|
||||
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||
| `kafka.resources` | Resources | `{}` |
|
||||
| `kafka.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| `zookeeper.resources` | Resources | `{}` |
|
||||
| `zookeeper.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ----------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
|
||||
| `kafka.replicas` | Number of Kafka replicas | `3` |
|
||||
| `kafka.storageClass` | StorageClass used to store the Kafka data | `""` |
|
||||
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
|
||||
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
|
||||
| `zookeeper.storageClass` | StorageClass used to store the ZooKeeper data | `""` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -17,11 +17,3 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-clients-ca
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups:
|
||||
- cozystack.io
|
||||
resources:
|
||||
- workloadmonitors
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
- {{ $.Release.Name }}-zookeeper
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
@@ -8,11 +8,6 @@ metadata:
|
||||
spec:
|
||||
kafka:
|
||||
replicas: {{ .Values.kafka.replicas }}
|
||||
{{- if .Values.kafka.resources }}
|
||||
resources: {{- toYaml .Values.kafka.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kafka.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kafka.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
listeners:
|
||||
- name: plain
|
||||
port: 9092
|
||||
@@ -62,19 +57,8 @@ spec:
|
||||
class: {{ . }}
|
||||
{{- end }}
|
||||
deleteClaim: true
|
||||
metricsConfig:
|
||||
type: jmxPrometheusExporter
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
key: kafka-metrics-config.yml
|
||||
zookeeper:
|
||||
replicas: {{ .Values.zookeeper.replicas }}
|
||||
{{- if .Values.zookeeper.resources }}
|
||||
resources: {{- toYaml .Values.zookeeper.resources | nindent 6 }}
|
||||
{{- else if ne .Values.zookeeper.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.zookeeper.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
storage:
|
||||
type: persistent-claim
|
||||
{{- with .Values.zookeeper.size }}
|
||||
@@ -84,12 +68,6 @@ spec:
|
||||
class: {{ . }}
|
||||
{{- end }}
|
||||
deleteClaim: false
|
||||
metricsConfig:
|
||||
type: jmxPrometheusExporter
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
key: kafka-metrics-config.yml
|
||||
entityOperator:
|
||||
topicOperator: {}
|
||||
userOperator: {}
|
||||
|
||||
@@ -1,198 +0,0 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-metrics
|
||||
data:
|
||||
kafka-metrics-config.yml: |
|
||||
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
|
||||
lowercaseOutputName: true
|
||||
rules:
|
||||
# Special cases and very specific rules
|
||||
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
|
||||
name: kafka_server_$1_$2
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientId: "$3"
|
||||
topic: "$4"
|
||||
partition: "$5"
|
||||
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
|
||||
name: kafka_server_$1_$2
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientId: "$3"
|
||||
broker: "$4:$5"
|
||||
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
|
||||
name: kafka_server_$1_connections_tls_info
|
||||
type: GAUGE
|
||||
labels:
|
||||
cipher: "$2"
|
||||
protocol: "$3"
|
||||
listener: "$4"
|
||||
networkProcessor: "$5"
|
||||
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
|
||||
name: kafka_server_$1_connections_software
|
||||
type: GAUGE
|
||||
labels:
|
||||
clientSoftwareName: "$2"
|
||||
clientSoftwareVersion: "$3"
|
||||
listener: "$4"
|
||||
networkProcessor: "$5"
|
||||
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total):"
|
||||
name: kafka_server_$1_$4
|
||||
type: COUNTER
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
|
||||
name: kafka_server_$1_$4
|
||||
type: GAUGE
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+-total)
|
||||
name: kafka_server_$1_$4
|
||||
type: COUNTER
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
|
||||
name: kafka_server_$1_$4
|
||||
type: GAUGE
|
||||
labels:
|
||||
listener: "$2"
|
||||
networkProcessor: "$3"
|
||||
# Some percent metrics use MeanRate attribute
|
||||
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
# Generic gauges for percents
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3_percent
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
# Generic per-second counters with 0-2 key/value pairs
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
|
||||
name: kafka_$1_$2_$3_total
|
||||
type: COUNTER
|
||||
# Generic gauges with 0-2 key/value pairs
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
|
||||
# Note that these are missing the '_sum' metric!
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
"$6": "$7"
|
||||
quantile: "0.$8"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
labels:
|
||||
"$4": "$5"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
"$4": "$5"
|
||||
quantile: "0.$6"
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
|
||||
name: kafka_$1_$2_$3_count
|
||||
type: COUNTER
|
||||
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
|
||||
name: kafka_$1_$2_$3
|
||||
type: GAUGE
|
||||
labels:
|
||||
quantile: "0.$4"
|
||||
# KRaft overall related metrics
|
||||
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
|
||||
- pattern: "kafka.server<type=raft-metrics><>(.+-total|.+-max):"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
type: COUNTER
|
||||
- pattern: "kafka.server<type=raft-metrics><>(current-state): (.+)"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
value: 1
|
||||
type: UNTYPED
|
||||
labels:
|
||||
$1: "$2"
|
||||
- pattern: "kafka.server<type=raft-metrics><>(.+):"
|
||||
name: kafka_server_raftmetrics_$1
|
||||
type: GAUGE
|
||||
# KRaft "low level" channels related metrics
|
||||
# distinguish between always increasing COUNTER (total and max) and variable GAUGE (all others) metrics
|
||||
- pattern: "kafka.server<type=raft-channel-metrics><>(.+-total|.+-max):"
|
||||
name: kafka_server_raftchannelmetrics_$1
|
||||
type: COUNTER
|
||||
- pattern: "kafka.server<type=raft-channel-metrics><>(.+):"
|
||||
name: kafka_server_raftchannelmetrics_$1
|
||||
type: GAUGE
|
||||
# Broker metrics related to fetching metadata topic records in KRaft mode
|
||||
- pattern: "kafka.server<type=broker-metadata-metrics><>(.+):"
|
||||
name: kafka_server_brokermetadatametrics_$1
|
||||
type: GAUGE
|
||||
zookeeper-metrics-config.yml: |
|
||||
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
|
||||
lowercaseOutputName: true
|
||||
rules:
|
||||
# replicated Zookeeper
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+)><>(\\w+)"
|
||||
name: "zookeeper_$2"
|
||||
type: GAUGE
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+)><>(\\w+)"
|
||||
name: "zookeeper_$3"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(Packets\\w+)"
|
||||
name: "zookeeper_$4"
|
||||
type: COUNTER
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+)><>(\\w+)"
|
||||
name: "zookeeper_$4"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
- pattern: "org.apache.ZooKeeperService<name0=ReplicatedServer_id(\\d+), name1=replica.(\\d+), name2=(\\w+), name3=(\\w+)><>(\\w+)"
|
||||
name: "zookeeper_$4_$5"
|
||||
type: GAUGE
|
||||
labels:
|
||||
replicaId: "$2"
|
||||
memberType: "$3"
|
||||
@@ -1,40 +0,0 @@
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VMPodScrape
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: tcp-prometheus
|
||||
scheme: http
|
||||
relabelConfigs:
|
||||
- separator: ;
|
||||
regex: __meta_kubernetes_pod_label_(strimzi_io_.+)
|
||||
replacement: $1
|
||||
action: labelmap
|
||||
- sourceLabels: [__meta_kubernetes_namespace]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: namespace
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: pod
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: node
|
||||
replacement: $1
|
||||
action: replace
|
||||
- sourceLabels: [__meta_kubernetes_pod_host_ip]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
targetLabel: node_ip
|
||||
replacement: $1
|
||||
action: replace
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: kafka
|
||||
type: kafka
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
app.kubernetes.io/name: kafka
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-zookeeper
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: kafka
|
||||
type: zookeeper
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
app.kubernetes.io/name: zookeeper
|
||||
version: {{ $.Chart.Version }}
|
||||
@@ -24,16 +24,6 @@
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store the Kafka data",
|
||||
"default": ""
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -54,16 +44,6 @@
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store the ZooKeeper data",
|
||||
"default": ""
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -14,35 +14,10 @@ kafka:
|
||||
size: 10Gi
|
||||
replicas: 3
|
||||
storageClass: ""
|
||||
## @param kafka.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kafka.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
replicas: 3
|
||||
storageClass: ""
|
||||
## @param zookeeper.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param zookeeper.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "nano"
|
||||
|
||||
## @section Configuration parameters
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.20.0
|
||||
version: 0.15.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
KUBERNETES_VERSION = v1.32
|
||||
UBUNTU_CONTAINER_DISK_TAG = v1.30.1
|
||||
KUBERNETES_PKG_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
@@ -6,26 +6,20 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.konnectivity.properties.server.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
|
||||
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
|
||||
|
||||
image-ubuntu-container-disk:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
|
||||
--provenance false \
|
||||
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/ubuntu-container-disk.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||
> images/ubuntu-container-disk.tag
|
||||
rm -f images/ubuntu-container-disk.json
|
||||
|
||||
@@ -38,7 +32,6 @@ image-kubevirt-cloud-provider:
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/kubevirt-cloud-provider.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/kubevirt-cloud-provider:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-cloud-provider.json -o json -r)" \
|
||||
> images/kubevirt-cloud-provider.tag
|
||||
@@ -53,7 +46,6 @@ image-kubevirt-csi-driver:
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/kubevirt-csi-driver.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/kubevirt-csi-driver:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/kubevirt-csi-driver.json -o json -r)" \
|
||||
> images/kubevirt-csi-driver.tag
|
||||
@@ -69,7 +61,6 @@ image-cluster-autoscaler:
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/cluster-autoscaler.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/cluster-autoscaler:$(call settag,$(KUBERNETES_PKG_TAG))@$$(yq e '."containerimage.digest"' images/cluster-autoscaler.json -o json -r)" \
|
||||
> images/cluster-autoscaler.tag
|
||||
|
||||
@@ -27,47 +27,20 @@ How to access to deployed cluster:
|
||||
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
||||
```
|
||||
|
||||
## Parameters
|
||||
# Series
|
||||
|
||||
### Common parameters
|
||||
<!-- source: https://github.com/kubevirt/common-instancetypes/blob/main/README.md -->
|
||||
|
||||
| Name | Description | Value |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
|
||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
|
||||
| `storageClass` | StorageClass used to store user data | `replicated` |
|
||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||
|
||||
### Cluster Addons
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
|
||||
### Kubernetes control plane configuration
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||
| `controlPlane.apiServer.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Resources | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Resources | `{}` |
|
||||
. | U | O | CX | M | RT
|
||||
----------------------------|-----|-----|------|-----|------
|
||||
*Has GPUs* | | | | |
|
||||
*Hugepages* | | | ✓ | ✓ | ✓
|
||||
*Overcommitted Memory* | | ✓ | | |
|
||||
*Dedicated CPU* | | | ✓ | | ✓
|
||||
*Burstable CPU performance* | ✓ | ✓ | | ✓ |
|
||||
*Isolated emulator threads* | | | ✓ | | ✓
|
||||
*vNUMA* | | | ✓ | | ✓
|
||||
*vCPU-To-Memory Ratio* | 1:4 | 1:4 | 1:2 | 1:8 | 1:4
|
||||
|
||||
|
||||
## U Series
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.19.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.15.0@sha256:538ee308f16c9e627ed16ee7c4aaa65919c2e6c4c2778f964a06e4797610d1cd
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.19.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.15.0@sha256:7716c88947d13dc90ccfcc3e60bfdd6e6fa9b201339a75e9c84bf825c76e2b1f
|
||||
|
||||
@@ -3,11 +3,12 @@ FROM --platform=linux/amd64 golang:1.20.6 AS builder
|
||||
|
||||
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& git checkout 443a1fe
|
||||
&& git checkout da9e0cf
|
||||
|
||||
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
|
||||
|
||||
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/335
|
||||
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/336
|
||||
ADD patches /patches
|
||||
RUN git apply /patches/*.diff
|
||||
RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
index a3c1aa33..95c31438 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
@@ -412,11 +412,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
|
||||
// Create the desired port configuration
|
||||
var desiredPorts []discovery.EndpointPort
|
||||
|
||||
- for _, port := range service.Spec.Ports {
|
||||
+ for i := range service.Spec.Ports {
|
||||
desiredPorts = append(desiredPorts, discovery.EndpointPort{
|
||||
- Port: &port.TargetPort.IntVal,
|
||||
- Protocol: &port.Protocol,
|
||||
- Name: &port.Name,
|
||||
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
|
||||
+ Protocol: &service.Spec.Ports[i].Protocol,
|
||||
+ Name: &service.Spec.Ports[i].Name,
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user