Compare commits

..

2 Commits

Author SHA1 Message Date
Timofei Larkin
7acca3582c Debug pre-commit step of CI
Signed-off-by: Timofei Larkin <lllamnyp@gmail.com>
2025-04-22 19:09:52 +03:00
Timofei Larkin
d5b92624b3 Hash tenant config and store in configmap
Every tenant now creates a configmap in its __tenant__ namespace with a
sha256 of its values. Tenants (and eventually all other apps), watch the
configmap in their __release__ namespace, by referencing it in the
valuesFrom part of the HelmRelease. `tenant-root` is an exception, since
it is the only tenant where the release namespace is the same as the
tenant namespace. It references a different configmap in its valesFrom,
created and reconciled by the cozystack installer script. Part of #802.

Signed-off-by: Timofei Larkin <lllamnyp@gmail.com>
2025-04-22 17:44:04 +02:00
152 changed files with 1309 additions and 2131 deletions

View File

@@ -1,53 +0,0 @@
name: Automatic Backport
on:
pull_request_target:
types: [closed] # fires when PR is closed (merged)
concurrency:
group: backport-${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: true
permissions:
contents: write
pull-requests: write
jobs:
backport:
if: |
github.event.pull_request.merged == true &&
contains(github.event.pull_request.labels.*.name, 'backport')
runs-on: [self-hosted]
steps:
# 1. Decide which maintenance branch should receive the backport
- name: Determine target maintenance branch
id: target
uses: actions/github-script@v7
with:
script: |
let rel;
try {
rel = await github.rest.repos.getLatestRelease({
owner: context.repo.owner,
repo: context.repo.repo
});
} catch (e) {
core.setFailed('No existing releases found; cannot determine backport target.');
return;
}
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
const branch = `release-${maj}.${min}`;
core.setOutput('branch', branch);
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
# 2. Checkout (required by backportaction)
- name: Checkout repository
uses: actions/checkout@v4
# 3. Create the backport pull request
- name: Create backport PR
uses: korthout/backport-action@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
label_pattern: '' # don't read labels for targets
target_branches: ${{ steps.target.outputs.branch }}

View File

@@ -1,22 +1,18 @@
name: Pre-Commit Checks
on:
push:
branches:
- main
pull_request:
types: [labeled, opened, synchronize, reopened]
concurrency:
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: true
paths-ignore:
- '**.md'
jobs:
pre-commit:
runs-on: ubuntu-22.04
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
fetch-tags: true
- name: Set up Python
uses: actions/setup-python@v4

View File

@@ -4,10 +4,6 @@ on:
pull_request:
types: [labeled, opened, synchronize, reopened, closed]
concurrency:
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
verify:
name: Test Release
@@ -16,8 +12,8 @@ jobs:
contents: read
packages: write
# Run only when the PR carries the "release" label and not closed.
if: |
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
contains(github.event.pull_request.labels.*.name, 'release') &&
github.event.action != 'closed'
@@ -43,112 +39,38 @@ jobs:
runs-on: [self-hosted]
permissions:
contents: write
if: |
github.event.pull_request.merged == true &&
contains(github.event.pull_request.labels.*.name, 'release')
steps:
# Extract tag from branch name (branch = release-X.Y.Z*)
- name: Extract tag from branch name
id: get_tag
uses: actions/github-script@v7
with:
script: |
const branch = context.payload.pull_request.head.ref;
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
if (!m) {
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
return;
}
const tag = `v${m[1]}`;
core.setOutput('tag', tag);
console.log(`✅ Tag to publish: ${tag}`);
const match = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
# Checkout repo & create / push annotated tag
if (!match) {
core.setFailed(`Branch '${branch}' does not match expected format 'release-X.Y.Z[-suffix]'`);
} else {
const tag = `v${match[1]}`;
core.setOutput('tag', tag);
console.log(`✅ Extracted tag: ${tag}`);
}
- name: Checkout repo
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Create tag on merge commit
- name: Create tag on merged commit
run: |
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
git push -f origin ${{ steps.get_tag.outputs.tag }}
# Ensure maintenance branch release-X.Y
- name: Ensure maintenance branch release-X.Y
uses: actions/github-script@v7
with:
script: |
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
if (!match) {
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-suffix'`);
return;
}
const line = `${match[1]}.${match[2]}`;
const branch = `release-${line}`;
try {
await github.rest.repos.getBranch({
owner: context.repo.owner,
repo: context.repo.repo,
branch
});
console.log(`Branch '${branch}' already exists`);
} catch (_) {
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `refs/heads/${branch}`,
sha: context.sha
});
console.log(`✅ Branch '${branch}' created at ${context.sha}`);
}
# Get the latest published release
- name: Get the latest published release
id: latest_release
uses: actions/github-script@v7
with:
script: |
try {
const rel = await github.rest.repos.getLatestRelease({
owner: context.repo.owner,
repo: context.repo.repo
});
core.setOutput('tag', rel.data.tag_name);
} catch (_) {
core.setOutput('tag', '');
}
# Compare current tag vs latest using semver-utils
- name: Semver compare
id: semver
uses: madhead/semver-utils@v4.3.0
with:
version: ${{ steps.get_tag.outputs.tag }}
compare-to: ${{ steps.latest_release.outputs.tag }}
# Derive flags: prerelease? make_latest?
- name: Calculate publish flags
id: flags
uses: actions/github-script@v7
with:
script: |
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1
const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/);
if (!m) {
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
return;
}
const version = m[1] + (m[2] ?? ''); // 0.31.5rc.1
const isRc = Boolean(m[2]);
core.setOutput('is_rc', isRc);
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
# Publish draft release with correct flags
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }} --force
git push origin ${{ steps.get_tag.outputs.tag }} --force
- name: Publish draft release
uses: actions/github-script@v7
with:
@@ -156,17 +78,19 @@ jobs:
const tag = '${{ steps.get_tag.outputs.tag }}';
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo
repo: context.repo.repo
});
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
if (!draft) throw new Error(`Draft release for ${tag} not found`);
const release = releases.data.find(r => r.tag_name === tag && r.draft);
if (!release) {
throw new Error(`Draft release with tag ${tag} not found`);
}
await github.rest.repos.updateRelease({
owner: context.repo.owner,
repo: context.repo.repo,
release_id: draft.id,
draft: false,
prerelease: ${{ steps.flags.outputs.is_rc }},
make_latest: '${{ steps.flags.outputs.make_latest }}'
owner: context.repo.owner,
repo: context.repo.repo,
release_id: release.id,
draft: false
});
console.log(`🚀 Published release for ${tag}`);
console.log(` Published release for ${tag}`);

View File

@@ -4,10 +4,6 @@ on:
pull_request:
types: [labeled, opened, synchronize, reopened]
concurrency:
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
e2e:
name: Build and Test
@@ -16,8 +12,8 @@ jobs:
contents: read
packages: write
# Never run when the PR carries the "release" label.
if: |
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
!contains(github.event.pull_request.labels.*.name, 'release')
steps:
@@ -34,8 +30,10 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io
- name: Build
run: make build
- name: make build
run: |
make build
- name: Test
run: make test
- name: make test
run: |
make test

View File

@@ -1,15 +1,10 @@
name: Versioned Tag
on:
# Trigger on push if it includes a tag like vX.Y.Z
push:
tags:
- 'v*.*.*' # vX.Y.Z
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
concurrency:
group: tags-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
- 'v*.*.*'
jobs:
prepare-release:
@@ -19,10 +14,9 @@ jobs:
contents: write
packages: write
pull-requests: write
actions: write
steps:
# Check if a non-draft release with this tag already exists
# 1) Check if a non-draft release with this tag already exists
- name: Check if release already exists
id: check_release
uses: actions/github-script@v7
@@ -31,67 +25,57 @@ jobs:
const tag = context.ref.replace('refs/tags/', '');
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo
repo: context.repo.repo
});
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
core.setOutput('skip', exists);
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
if (existing) {
core.setOutput('skip', 'true');
} else {
core.setOutput('skip', 'false');
}
# If a published release already exists, skip the rest of the workflow
- name: Skip if release already exists
if: steps.check_release.outputs.skip == 'true'
run: echo "Release already exists, skipping workflow."
# Parse tag metadata (rc?, maintenance line, etc.)
- name: Parse tag
if: steps.check_release.outputs.skip == 'false'
id: tag
uses: actions/github-script@v7
with:
script: |
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1
const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/); // ['0.31.5', '-rc.1']
if (!m) {
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
return;
}
const version = m[1] + (m[2] ?? ''); // 0.31.5rc.1
const isRc = Boolean(m[2]);
const [maj, min] = m[1].split('.');
core.setOutput('tag', ref); // v0.31.5-rc.1
core.setOutput('version', version); // 0.31.5-rc.1
core.setOutput('is_rc', isRc); // true
core.setOutput('line', `${maj}.${min}`); // 0.31
# Detect base branch (main or releaseX.Y) the tag was pushed from
# 2) Determine the base branch from which the tag was pushed
- name: Get base branch
if: steps.check_release.outputs.skip == 'false'
id: get_base
uses: actions/github-script@v7
with:
script: |
/*
For a push event with a tag, GitHub sets context.payload.base_ref
if the tag was pushed from a branch.
If it's empty, we can't determine the correct base branch and must fail.
*/
const baseRef = context.payload.base_ref;
if (!baseRef) {
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
core.setFailed(`❌ base_ref is empty. Make sure you push the tag from a branch (e.g. 'git push origin HEAD:refs/tags/vX.Y.Z').`);
return;
}
const branch = baseRef.replace('refs/heads/', '');
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
if (!ok) {
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
return;
}
core.setOutput('branch', branch);
# Checkout & login once
const shortBranch = baseRef.replace("refs/heads/", "");
const releasePattern = /^release-\d+\.\d+$/;
if (shortBranch !== "main" && !releasePattern.test(shortBranch)) {
core.setFailed(`❌ Tagged commit must belong to branch 'main' or 'release-X.Y'. Got '${shortBranch}'`);
return;
}
core.setOutput('branch', shortBranch);
# 3) Checkout full git history and tags
- name: Checkout code
if: steps.check_release.outputs.skip == 'false'
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
fetch-tags: true
- name: Login to GHCR
# 4) Login to GitHub Container Registry
- name: Login to GitHub Container Registry
if: steps.check_release.outputs.skip == 'false'
uses: docker/login-action@v3
with:
@@ -99,129 +83,113 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
registry: ghcr.io
# Build project artifacts
# 5) Build project artifacts
- name: Build
if: steps.check_release.outputs.skip == 'false'
run: make build
# Commit built artifacts
# 6) Optionally commit built artifacts to the repository
- name: Commit release artifacts
if: steps.check_release.outputs.skip == 'false'
env:
GIT_AUTHOR_NAME: ${{ github.actor }}
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
run: |
git config user.name "github-actions"
git config user.name "github-actions"
git config user.email "github-actions@github.com"
git add .
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
git push origin HEAD || true
# Get `latest_version` from latest published release
- name: Get latest published release
if: steps.check_release.outputs.skip == 'false'
id: latest_release
uses: actions/github-script@v7
with:
script: |
try {
const rel = await github.rest.repos.getLatestRelease({
owner: context.repo.owner,
repo: context.repo.repo
});
core.setOutput('tag', rel.data.tag_name);
} catch (_) {
core.setOutput('tag', '');
}
# Compare tag (A) with latest (B)
- name: Semver compare
if: steps.check_release.outputs.skip == 'false'
id: semver
uses: madhead/semver-utils@v4.3.0
with:
version: ${{ steps.tag.outputs.tag }} # A
compare-to: ${{ steps.latest_release.outputs.tag }} # B
# Create or reuse DRAFT GitHub Release
- name: Create / reuse draft release
if: steps.check_release.outputs.skip == 'false'
id: release
uses: actions/github-script@v7
with:
script: |
const tag = '${{ steps.tag.outputs.tag }}';
const isRc = ${{ steps.tag.outputs.is_rc }};
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
const makeLatest = outdated ? false : 'legacy';
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo
});
let rel = releases.data.find(r => r.tag_name === tag);
if (!rel) {
rel = await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: tag,
name: tag,
draft: true,
prerelease: isRc,
make_latest: makeLatest
});
console.log(`Draft release created for ${tag}`);
} else {
console.log(`Reusing existing release ${tag}`);
}
core.setOutput('upload_url', rel.upload_url);
# Build + upload assets (optional)
- name: Build & upload assets
if: steps.check_release.outputs.skip == 'false'
run: |
make assets
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Create releaseX.Y.Z branch and push (forceupdate)
# 7) Create a release branch like release-X.Y.Z
- name: Create release branch
if: steps.check_release.outputs.skip == 'false'
run: |
BRANCH="release-${GITHUB_REF#refs/tags/v}"
git branch -f "$BRANCH"
git push -f origin "$BRANCH"
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
git branch -f "$BRANCH_NAME"
git push origin "$BRANCH_NAME" --force
# Create pull request into original base branch (if absent)
# 8) Create a pull request from release-X.Y.Z to the original base branch
- name: Create pull request if not exists
if: steps.check_release.outputs.skip == 'false'
uses: actions/github-script@v7
with:
script: |
const version = context.ref.replace('refs/tags/v', '');
const base = '${{ steps.get_base.outputs.branch }}';
const head = `release-${version}`;
const base = '${{ steps.get_base.outputs.branch }}';
const head = `release-${version}`;
const prs = await github.rest.pulls.list({
owner: context.repo.owner,
repo: context.repo.repo,
head: `${context.repo.owner}:${head}`,
repo: context.repo.repo,
head: `${context.repo.owner}:${head}`,
base
});
if (prs.data.length === 0) {
const pr = await github.rest.pulls.create({
const newPr = await github.rest.pulls.create({
owner: context.repo.owner,
repo: context.repo.repo,
repo: context.repo.repo,
head,
base,
title: `Release v${version}`,
body: `This PR prepares the release \`v${version}\`.`,
body:
`This PR prepares the release \`v${version}\`.\n` +
`(Please merge it before releasing draft)`,
draft: false
});
console.log(`Created pull request #${newPr.data.number} from ${head} to ${base}`);
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr.data.number,
repo: context.repo.repo,
issue_number: newPr.data.number,
labels: ['release']
});
console.log(`Created PR #${pr.data.number}`);
} else {
console.log(`PR already exists from ${head} to ${base}`);
console.log(`Pull request already exists from ${head} to ${base}`);
}
# 9) Create or reuse an existing draft GitHub release for this tag
- name: Create or reuse draft release
if: steps.check_release.outputs.skip == 'false'
id: create_release
uses: actions/github-script@v7
with:
script: |
const tag = context.ref.replace('refs/tags/', '');
const releases = await github.rest.repos.listReleases({
owner: context.repo.owner,
repo: context.repo.repo
});
let release = releases.data.find(r => r.tag_name === tag);
if (!release) {
release = await github.rest.repos.createRelease({
owner: context.repo.owner,
repo: context.repo.repo,
tag_name: tag,
name: `${tag}`,
draft: true,
prerelease: false
});
}
core.setOutput('upload_url', release.upload_url);
# 10) Build additional assets for the release (if needed)
- name: Build assets
if: steps.check_release.outputs.skip == 'false'
run: make assets
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# 11) Upload assets to the draft release
- name: Upload assets
if: steps.check_release.outputs.skip == 'false'
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# 12) Run tests
- name: Run tests
if: steps.check_release.outputs.skip == 'false'
run: make test

3
.gitignore vendored
View File

@@ -1,7 +1,6 @@
_out
.git
.idea
.vscode
# User-specific stuff
.idea/**/workspace.xml
@@ -76,4 +75,4 @@ fabric.properties
.idea/caches/build_file_checksums.ser
.DS_Store
**/.DS_Store
**/.DS_Store

View File

@@ -3,7 +3,7 @@ repos:
hooks:
- id: gen-versions-map
name: Generate versions map and check for changes
entry: sh -c 'make -C packages/apps check-version-map && make -C packages/extra check-version-map'
entry: sh -c 'set -x && make -C packages/apps check-version-map && make -C packages/extra check-version-map'
language: system
types: [file]
pass_filenames: false

View File

@@ -47,6 +47,7 @@ assets:
test:
make -C packages/core/testing apply
make -C packages/core/testing test
#make -C packages/core/testing test-applications
generate:
hack/update-codegen.sh

View File

@@ -39,8 +39,6 @@ import (
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
"github.com/cozystack/cozystack/internal/controller"
"github.com/cozystack/cozystack/internal/telemetry"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
// +kubebuilder:scaffold:imports
)
@@ -53,7 +51,6 @@ func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
utilruntime.Must(helmv2.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
@@ -185,14 +182,6 @@ func main() {
if err = (&controller.WorkloadReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler")
os.Exit(1)
}
if err = (&controller.TenantHelmReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Workload")
os.Exit(1)

View File

@@ -626,7 +626,7 @@
"datasource": {
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"POD\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
"hide": false,
"interval": "",
"legendFormat": "{{pod}}",

View File

@@ -450,7 +450,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
"hide": false,
"legendFormat": "Total",
"range": true,
@@ -520,7 +520,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
"hide": false,
"legendFormat": "Total",
"range": true,
@@ -590,7 +590,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
"hide": false,
"legendFormat": "Total",
"range": true,
@@ -660,7 +660,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
"hide": false,
"legendFormat": "__auto",
"range": true,
@@ -1128,7 +1128,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -1143,7 +1143,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
"hide": false,
"legendFormat": "Total",
"range": true,
@@ -1527,7 +1527,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -1542,7 +1542,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
"hide": false,
"legendFormat": "Total",
"range": true,
@@ -1909,7 +1909,7 @@
},
"editorMode": "code",
"exemplar": false,
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
"format": "table",
"instant": true,
"range": false,
@@ -2037,7 +2037,7 @@
},
"editorMode": "code",
"exemplar": false,
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
"format": "table",
"instant": true,
"range": false,
@@ -2160,7 +2160,7 @@
},
"editorMode": "code",
"exemplar": false,
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
"format": "table",
"instant": true,
"range": false,
@@ -2288,7 +2288,7 @@
},
"editorMode": "code",
"exemplar": false,
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
"format": "table",
"instant": true,
"range": false,

View File

@@ -684,7 +684,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"hide": false,
"instant": true,
@@ -710,7 +710,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"hide": false,
"instant": true,
@@ -723,7 +723,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"hide": false,
"instant": true,
@@ -736,7 +736,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"hide": false,
"instant": true,
@@ -762,7 +762,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -786,7 +786,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -798,7 +798,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -810,7 +810,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -848,7 +848,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -860,7 +860,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -1315,7 +1315,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
@@ -1488,7 +1488,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -1502,7 +1502,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -1642,7 +1642,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ pod }}",
@@ -1779,7 +1779,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -2095,7 +2095,7 @@
"repeatDirection": "h",
"targets": [
{
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Usage",
@@ -2109,7 +2109,7 @@
"refId": "D"
},
{
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "VPA Target",
@@ -2295,7 +2295,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "System",
@@ -2306,7 +2306,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "User",
@@ -2468,7 +2468,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ pod }}",
@@ -2653,7 +2653,7 @@
"uid": "${ds_prometheus}"
},
"editorMode": "code",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "RSS",
@@ -2666,7 +2666,7 @@
"uid": "${ds_prometheus}"
},
"editorMode": "code",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Cache",
@@ -2679,7 +2679,7 @@
"uid": "${ds_prometheus}"
},
"editorMode": "code",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Swap",
@@ -2692,7 +2692,7 @@
"uid": "${ds_prometheus}"
},
"editorMode": "code",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Working set bytes without kmem",
@@ -2705,7 +2705,7 @@
"uid": "${ds_prometheus}"
},
"editorMode": "code",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Kmem",
@@ -2837,7 +2837,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ pod }}",
@@ -2974,7 +2974,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ pod }}",
@@ -3290,56 +3290,56 @@
"repeatDirection": "h",
"targets": [
{
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "RSS",
"refId": "A"
},
{
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Cache",
"refId": "B"
},
{
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Swap",
"refId": "C"
},
{
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Working set bytes without kmem",
"refId": "D"
},
{
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"\", resource=\"memory\"}[$__rate_interval]))\n)",
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"POD\", resource=\"memory\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "VPA Target",
"refId": "E"
},
{
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Limits",
"refId": "F"
},
{
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Requests",
"refId": "G"
},
{
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Kmem",
@@ -3834,7 +3834,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ pod }}",
@@ -3972,7 +3972,7 @@
"uid": "$ds_prometheus"
},
"editorMode": "code",
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ pod }}",

View File

@@ -656,7 +656,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -680,7 +680,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -692,7 +692,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -704,7 +704,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -728,7 +728,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -740,7 +740,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -752,7 +752,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -764,7 +764,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -776,7 +776,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -814,7 +814,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -826,7 +826,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -877,7 +877,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -1475,7 +1475,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ controller }}",
@@ -1646,7 +1646,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "System",
@@ -1657,7 +1657,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "User",
@@ -1798,7 +1798,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ controller }}",
@@ -1939,7 +1939,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
@@ -2257,28 +2257,28 @@
"repeatDirection": "h",
"targets": [
{
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Usage",
"refId": "D"
},
{
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Requests",
"refId": "C"
},
{
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Limits",
"refId": "E"
},
{
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "VPA Target",
@@ -2458,7 +2458,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2470,7 +2470,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "User",
@@ -2622,7 +2622,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ controller }}",
@@ -2799,14 +2799,14 @@
"pluginVersion": "8.5.13",
"targets": [
{
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "RSS",
"refId": "A"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2814,7 +2814,7 @@
"refId": "B"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2822,14 +2822,14 @@
"refId": "C"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Working set bytes without kmem",
"refId": "D"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Kmem",
@@ -2955,7 +2955,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ controller }}",
@@ -3091,7 +3091,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ controller }}",
@@ -3408,14 +3408,14 @@
"repeatDirection": "h",
"targets": [
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "RSS",
"refId": "A"
},
{
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3423,7 +3423,7 @@
"refId": "B"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3431,35 +3431,35 @@
"refId": "C"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Working set bytes without kmem",
"refId": "D"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Requests",
"refId": "E"
},
{
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Limits",
"refId": "F"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "VPA Target",
"refId": "G"
},
{
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Kmem",
@@ -3910,7 +3910,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ controller }}",
@@ -4049,7 +4049,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ controller }}",

View File

@@ -869,7 +869,7 @@
"refId": "A"
},
{
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"hide": false,
"instant": true,
@@ -878,7 +878,7 @@
"refId": "B"
},
{
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"hide": false,
"instant": true,
@@ -895,7 +895,7 @@
"refId": "D"
},
{
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -903,7 +903,7 @@
"refId": "E"
},
{
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -919,7 +919,7 @@
"refId": "G"
},
{
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -935,7 +935,7 @@
"refId": "I"
},
{
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -943,7 +943,7 @@
"refId": "J"
},
{
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -968,7 +968,7 @@
"refId": "M"
},
{
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"hide": false,
"instant": true,
@@ -977,7 +977,7 @@
"refId": "N"
},
{
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
"format": "table",
"hide": false,
"instant": true,
@@ -1449,7 +1449,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ namespace }}",
@@ -1616,7 +1616,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "System",
@@ -1627,7 +1627,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "User",
@@ -1764,7 +1764,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ namespace }}",
@@ -1901,7 +1901,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ namespace }}",
@@ -2210,7 +2210,7 @@
"repeatDirection": "h",
"targets": [
{
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2218,21 +2218,21 @@
"refId": "A"
},
{
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Requests",
"refId": "B"
},
{
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Limits",
"refId": "C"
},
{
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "VPA Target",
@@ -2407,7 +2407,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2419,7 +2419,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "User",
@@ -2572,7 +2572,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ namespace }}",
@@ -2754,14 +2754,14 @@
"pluginVersion": "8.5.13",
"targets": [
{
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "RSS",
"refId": "A"
},
{
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2769,7 +2769,7 @@
"refId": "B"
},
{
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -2777,14 +2777,14 @@
"refId": "C"
},
{
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Working set bytes without kmem",
"refId": "D"
},
{
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Kmem",
@@ -2910,7 +2910,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ namespace }}",
@@ -3046,7 +3046,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ namespace }}",
@@ -3370,14 +3370,14 @@
"repeatDirection": "h",
"targets": [
{
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "RSS",
"refId": "A"
},
{
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3385,7 +3385,7 @@
"refId": "B"
},
{
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3393,35 +3393,35 @@
"refId": "C"
},
{
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Working set bytes without kmem",
"refId": "D"
},
{
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "VPA Target",
"refId": "E"
},
{
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Requests",
"refId": "F"
},
{
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Limits",
"refId": "G"
},
{
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Kmem",
@@ -3873,7 +3873,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ namespace }}",
@@ -4008,7 +4008,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ namespace }}",

View File

@@ -686,7 +686,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
"format": "table",
"hide": false,
"instant": true,
@@ -759,7 +759,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -847,7 +847,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
"format": "table",
"hide": false,
"instant": true,
@@ -860,7 +860,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
"format": "table",
"hide": false,
"instant": true,
@@ -899,7 +899,7 @@
"type": "prometheus",
"uid": "${ds_prometheus}"
},
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
"format": "table",
"instant": true,
"intervalFactor": 1,
@@ -1503,7 +1503,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
@@ -1669,7 +1669,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
@@ -1681,7 +1681,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "User",
@@ -1820,7 +1820,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -2269,7 +2269,7 @@
"repeatDirection": "h",
"targets": [
{
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Usage",
@@ -2476,7 +2476,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
@@ -2488,7 +2488,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "User",
@@ -2639,7 +2639,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
@@ -2816,7 +2816,7 @@
"pluginVersion": "8.5.13",
"targets": [
{
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
@@ -2824,28 +2824,28 @@
"refId": "A"
},
{
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Cache",
"refId": "B"
},
{
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Swap",
"refId": "C"
},
{
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Working set bytes without kmem",
"refId": "D"
},
{
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Kmem",
@@ -2974,7 +2974,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -3110,7 +3110,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -3431,7 +3431,7 @@
"repeatDirection": "h",
"targets": [
{
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"instant": false,
"intervalFactor": 1,
@@ -3439,7 +3439,7 @@
"refId": "A"
},
{
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3447,28 +3447,28 @@
"refId": "B"
},
{
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Swap",
"refId": "C"
},
{
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Working set bytes without kmem",
"refId": "D"
},
{
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Limits",
"refId": "E"
},
{
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Requests",
@@ -3482,7 +3482,7 @@
"refId": "G"
},
{
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "Kmem",
@@ -3930,7 +3930,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ container }}",
@@ -4068,7 +4068,7 @@
"type": "prometheus",
"uid": "$ds_prometheus"
},
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{ container }}",

View File

@@ -1,37 +1,10 @@
# Release Workflow
This document describes Cozystacks release process.
## Introduction
Cozystack uses a staged release process to ensure stability and flexibility during development.
There are three types of releases:
- **Release Candidates (RC)** Preview versions (e.g., `v0.42.0-rc.1`) used for final testing and validation.
- **Regular Releases** Final versions (e.g., `v0.42.0`) that are feature-complete and thoroughly tested.
- **Patch Releases** Bugfix-only updates (e.g., `v0.42.1`) made after a stable release, based on a dedicated release branch.
Each type plays a distinct role in delivering reliable and tested updates while allowing ongoing development to continue smoothly.
## Release Candidates
Release candidates are Cozystack versions that introduce new features and are published before a stable release.
Their purpose is to help validate stability before finalizing a new feature release.
They allow for final rounds of testing and bug fixes without freezing development.
Release candidates are given numbers `vX.Y.0-rc.N`, for example, `v0.42.0-rc.1`.
They are created directly in the `main` branch.
An RC is typically tagged when all major features for the upcoming release have been merged into main and the release enters its testing phase.
However, new features and changes can still be added before the regular release `vX.Y.0`.
Each RC contributes to a cumulative set of release notes that will be finalized when `vX.Y.0` is released.
After testing, if no critical issues remain, the regular release (`vX.Y.0`) is tagged from the last RC or a later commit in main.
This begins the regular release process, creates a dedicated `release-X.Y` branch, and opens the way for patch releases.
This section explains how Cozystack builds and releases are made.
## Regular Releases
When making a regular release, we tag the latest RC or a subsequent minimal-change commit as `vX.Y.0`.
When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
In this explanation, we'll use version `v0.42.0` as an example:
```mermaid

165
hack/e2e.application.sh Executable file
View File

@@ -0,0 +1,165 @@
#!/bin/bash
RED='\033[0;31m'
GREEN='\033[0;32m'
RESET='\033[0m'
YELLOW='\033[0;33m'
ROOT_NS="tenant-root"
TEST_TENANT="tenant-e2e"
values_base_path="/hack/testdata/"
checks_base_path="/hack/testdata/"
function delete_hr() {
local release_name="$1"
local namespace="$2"
if [[ -z "$release_name" ]]; then
echo -e "${RED}Error: Release name is required.${RESET}"
exit 1
fi
if [[ -z "$namespace" ]]; then
echo -e "${RED}Error: Namespace name is required.${RESET}"
exit 1
fi
if [[ "$release_name" == "tenant-e2e" ]]; then
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
return 0
fi
kubectl delete helmrelease $release_name -n $namespace
}
function install_helmrelease() {
local release_name="$1"
local namespace="$2"
local chart_path="$3"
local repo_name="$4"
local repo_ns="$5"
local values_file="$6"
if [[ -z "$release_name" ]]; then
echo -e "${RED}Error: Release name is required.${RESET}"
exit 1
fi
if [[ -z "$namespace" ]]; then
echo -e "${RED}Error: Namespace name is required.${RESET}"
exit 1
fi
if [[ -z "$chart_path" ]]; then
echo -e "${RED}Error: Chart path name is required.${RESET}"
exit 1
fi
if [[ -n "$values_file" && -f "$values_file" ]]; then
local values_section
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
fi
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
{
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
echo "kind: HelmRelease"
echo "metadata:"
echo " labels:"
echo " cozystack.io/ui: \"true\""
echo " name: \"$release_name\""
echo " namespace: \"$namespace\""
echo "spec:"
echo " chart:"
echo " spec:"
echo " chart: \"$chart_path\""
echo " reconcileStrategy: Revision"
echo " sourceRef:"
echo " kind: HelmRepository"
echo " name: \"$repo_name\""
echo " namespace: \"$repo_ns\""
echo " version: '*'"
echo " interval: 1m0s"
echo " timeout: 5m0s"
[[ -n "$values_section" ]] && echo "$values_section"
} > "$helmrelease_file"
kubectl apply -f "$helmrelease_file"
rm -f "$helmrelease_file"
}
function install_tenant (){
local release_name="$1"
local namespace="$2"
local values_file="${values_base_path}tenant/values.yaml"
local repo_name="cozystack-apps"
local repo_ns="cozy-public"
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
}
function make_extra_checks(){
local checks_file="$1"
echo "after exec make $checks_file"
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
fi
}
function check_helmrelease_status() {
local release_name="$1"
local namespace="$2"
local checks_file="$3"
local timeout=300 # Timeout in seconds
local interval=5 # Interval between checks in seconds
local elapsed=0
while [[ $elapsed -lt $timeout ]]; do
local status_output
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
make_extra_checks "$checks_file"
delete_hr $release_name $namespace
return 0
elif [[ "$status_output" == "InstallFailed" ]]; then
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
exit 1
else
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
fi
sleep "$interval"
elapsed=$((elapsed + interval))
done
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
exit 1
}
chart_name="$1"
if [ -z "$chart_name" ]; then
echo -e "${RED}No chart name provided. Exiting...${RESET}"
exit 1
fi
checks_file="${checks_base_path}${chart_name}/check.sh"
repo_name="cozystack-apps"
repo_ns="cozy-public"
release_name="$chart_name-e2e"
values_file="${values_base_path}${chart_name}/values.yaml"
install_tenant $TEST_TENANT $ROOT_NS
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
check_helmrelease_status $release_name $TEST_TENANT $checks_file

View File

@@ -60,8 +60,7 @@ done
# Prepare system drive
if [ ! -f nocloud-amd64.raw ]; then
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
rm -f nocloud-amd64.raw
xz --decompress nocloud-amd64.raw.xz
fi
@@ -86,8 +85,7 @@ done
# Start VMs
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i \
-netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
-drive file=srv$i/system.img,if=virtio,format=raw \
-drive file=srv$i/seed.img,if=virtio,format=raw \
-drive file=srv$i/data.img,if=virtio,format=raw \
@@ -123,7 +121,7 @@ machine:
files:
- content: |
[plugins]
[plugins."io.containerd.cri.v1.runtime"]
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
@@ -233,18 +231,11 @@ timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
sleep 5
# Wait for all HelmReleases to be installed
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
failed_hrs=$(kubectl get hr -A | grep -v True)
if [ -n "$(echo "$failed_hrs" | grep -v NAME)" ]; then
printf 'Failed HelmReleases:\n%s\n' "$failed_hrs" >&2
exit 1
fi
# Wait for Cluster-API providers
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
# Wait for linstor controller
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
@@ -366,5 +357,5 @@ kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
"oidc-enabled": "true"
}}'
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator

View File

@@ -24,14 +24,14 @@ search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-
resolved_miss_map=$(
echo "$miss_map" | while read -r chart version commit; do
# if version is found in HEAD, it's HEAD
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
if [ $(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml) = "${version}" ]; then
echo "$chart $version HEAD"
continue
fi
# if commit is not HEAD, check if it's valid
if [ "$commit" != "HEAD" ]; then
if [ "$(git show "${commit}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" != "${version}" ]; then
if [ "x$commit" != "xHEAD" ]; then
if [ $(git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') != "${version}" ]; then
echo "Commit $commit for $chart $version is not valid" >&2
exit 1
fi
@@ -44,7 +44,7 @@ resolved_miss_map=$(
# if commit is HEAD, but version is not found in HEAD, check all tags
found_tag=""
for tag in $search_commits; do
if [ "$(git show "${tag}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" = "${version}" ]; then
if [ $(git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') = "${version}" ]; then
found_tag=$(git rev-parse --short "${tag}")
break
fi

1
hack/testdata/http-cache/check.sh vendored Normal file
View File

@@ -0,0 +1 @@
return 0

2
hack/testdata/http-cache/values.yaml vendored Normal file
View File

@@ -0,0 +1,2 @@
endpoints:
- 8.8.8.8:443

1
hack/testdata/kubernetes/check.sh vendored Normal file
View File

@@ -0,0 +1 @@
return 0

62
hack/testdata/kubernetes/values.yaml vendored Normal file
View File

@@ -0,0 +1,62 @@
## @section Common parameters
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
## @param storageClass StorageClass used to store user data
##
host: ""
controlPlane:
replicas: 2
storageClass: replicated
## @param nodeGroups [object] nodeGroups configuration
##
nodeGroups:
md0:
minReplicas: 0
maxReplicas: 10
instanceType: "u1.medium"
ephemeralStorage: 20Gi
roles:
- ingress-nginx
resources:
cpu: ""
memory: ""
## @section Cluster Addons
##
addons:
## Cert-manager: automatically creates and manages SSL/TLS certificate
##
certManager:
## @param addons.certManager.enabled Enables the cert-manager
## @param addons.certManager.valuesOverride Custom values to override
enabled: true
valuesOverride: {}
## Ingress-NGINX Controller
##
ingressNginx:
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
## @param addons.ingressNginx.valuesOverride Custom values to override
##
enabled: true
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
## e.g:
## hosts:
## - example.org
## - foo.example.net
##
hosts: []
valuesOverride: {}
## Flux CD
##
fluxcd:
## @param addons.fluxcd.enabled Enables Flux CD
## @param addons.fluxcd.valuesOverride Custom values to override
##
enabled: true
valuesOverride: {}

1
hack/testdata/nats/check.sh vendored Normal file
View File

@@ -0,0 +1 @@
return 0

10
hack/testdata/nats/values.yaml vendored Normal file
View File

@@ -0,0 +1,10 @@
## @section Common parameters
## @param external Enable external access from outside the cluster
## @param replicas Persistent Volume size for NATS
## @param storageClass StorageClass used to store the data
##
external: false
replicas: 2
storageClass: ""

1
hack/testdata/tenant/check.sh vendored Normal file
View File

@@ -0,0 +1 @@
return 0

6
hack/testdata/tenant/values.yaml vendored Normal file
View File

@@ -0,0 +1,6 @@
host: ""
etcd: false
monitoring: false
ingress: false
seaweedfs: false
isolated: true

View File

@@ -1,158 +0,0 @@
package controller
import (
"context"
"fmt"
"strings"
"time"
e "errors"
helmv2 "github.com/fluxcd/helm-controller/api/v2"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
)
type TenantHelmReconciler struct {
client.Client
Scheme *runtime.Scheme
}
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := log.FromContext(ctx)
hr := &helmv2.HelmRelease{}
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
if errors.IsNotFound(err) {
return ctrl.Result{}, nil
}
logger.Error(err, "unable to fetch HelmRelease")
return ctrl.Result{}, err
}
if !strings.HasPrefix(hr.Name, "tenant-") {
return ctrl.Result{}, nil
}
if len(hr.Status.Conditions) == 0 || hr.Status.Conditions[0].Type != "Ready" {
return ctrl.Result{}, nil
}
if len(hr.Status.History) == 0 {
logger.Info("no history in HelmRelease status", "name", hr.Name)
return ctrl.Result{}, nil
}
if hr.Status.History[0].Status != "deployed" {
return ctrl.Result{}, nil
}
newDigest := hr.Status.History[0].Digest
var hrList helmv2.HelmReleaseList
childNamespace := getChildNamespace(hr.Namespace, hr.Name)
if childNamespace == "tenant-root" && hr.Name == "tenant-root" {
if hr.Spec.Values == nil {
logger.Error(e.New("hr.Spec.Values is nil"), "cant annotate tenant-root ns")
return ctrl.Result{}, nil
}
err := annotateTenantRootNs(*hr.Spec.Values, r.Client)
if err != nil {
logger.Error(err, "cant annotate tenant-root ns")
return ctrl.Result{}, nil
}
logger.Info("namespace 'tenant-root' annotated")
}
if err := r.List(ctx, &hrList, client.InNamespace(childNamespace)); err != nil {
logger.Error(err, "unable to list HelmReleases in namespace", "namespace", hr.Name)
return ctrl.Result{}, err
}
for _, item := range hrList.Items {
if item.Name == hr.Name {
continue
}
oldDigest := item.GetAnnotations()["cozystack.io/tenant-config-digest"]
if oldDigest == newDigest {
continue
}
patchTarget := item.DeepCopy()
if patchTarget.Annotations == nil {
patchTarget.Annotations = map[string]string{}
}
ts := time.Now().Format(time.RFC3339Nano)
patchTarget.Annotations["cozystack.io/tenant-config-digest"] = newDigest
patchTarget.Annotations["reconcile.fluxcd.io/forceAt"] = ts
patchTarget.Annotations["reconcile.fluxcd.io/requestedAt"] = ts
patch := client.MergeFrom(item.DeepCopy())
if err := r.Patch(ctx, patchTarget, patch); err != nil {
logger.Error(err, "failed to patch HelmRelease", "name", patchTarget.Name)
continue
}
logger.Info("patched HelmRelease with new digest", "name", patchTarget.Name, "digest", newDigest, "version", hr.Status.History[0].Version)
}
return ctrl.Result{}, nil
}
func (r *TenantHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&helmv2.HelmRelease{}).
Complete(r)
}
func getChildNamespace(currentNamespace, hrName string) string {
tenantName := strings.TrimPrefix(hrName, "tenant-")
switch {
case currentNamespace == "tenant-root" && hrName == "tenant-root":
// 1) root tenant inside root namespace
return "tenant-root"
case currentNamespace == "tenant-root":
// 2) any other tenant in root namespace
return fmt.Sprintf("tenant-%s", tenantName)
default:
// 3) tenant in a dedicated namespace
return fmt.Sprintf("%s-%s", currentNamespace, tenantName)
}
}
func annotateTenantRootNs(values apiextensionsv1.JSON, c client.Client) error {
var data map[string]interface{}
if err := yaml.Unmarshal(values.Raw, &data); err != nil {
return fmt.Errorf("failed to parse HelmRelease values: %w", err)
}
host, ok := data["host"].(string)
if !ok || host == "" {
return fmt.Errorf("host field not found or not a string")
}
var ns corev1.Namespace
if err := c.Get(context.TODO(), client.ObjectKey{Name: "tenant-root"}, &ns); err != nil {
return fmt.Errorf("failed to get namespace tenant-root: %w", err)
}
if ns.Annotations == nil {
ns.Annotations = map[string]string{}
}
ns.Annotations["namespace.cozystack.io/host"] = host
if err := c.Update(context.TODO(), &ns); err != nil {
return fmt.Errorf("failed to update namespace: %w", err)
}
return nil
}

View File

@@ -39,15 +39,6 @@ func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (c
}
t := getMonitoredObject(w)
if t == nil {
err = r.Delete(ctx, w)
if err != nil {
logger.Error(err, "failed to delete workload")
}
return ctrl.Result{}, err
}
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
// found object, nothing to do
@@ -77,23 +68,20 @@ func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
}
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
switch {
case strings.HasPrefix(w.Name, "pvc-"):
if strings.HasPrefix(w.Name, "pvc-") {
obj := &corev1.PersistentVolumeClaim{}
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
obj.Namespace = w.Namespace
return obj
case strings.HasPrefix(w.Name, "svc-"):
}
if strings.HasPrefix(w.Name, "svc-") {
obj := &corev1.Service{}
obj.Name = strings.TrimPrefix(w.Name, "svc-")
obj.Namespace = w.Namespace
return obj
case strings.HasPrefix(w.Name, "pod-"):
obj := &corev1.Pod{}
obj.Name = strings.TrimPrefix(w.Name, "pod-")
obj.Namespace = w.Namespace
return obj
}
var obj client.Object
obj := &corev1.Pod{}
obj.Name = w.Name
obj.Namespace = w.Namespace
return obj
}

View File

@@ -1,26 +0,0 @@
package controller
import (
"testing"
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
)
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
w := &cozyv1alpha1.Workload{}
w.Name = "unprefixed-name"
obj := getMonitoredObject(w)
if obj != nil {
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
}
}
func TestPodMonitoredObject(t *testing.T) {
w := &cozyv1alpha1.Workload{}
w.Name = "pod-mypod"
obj := getMonitoredObject(w)
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
}
}

View File

@@ -212,12 +212,15 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
) error {
logger := log.FromContext(ctx)
// totalResources will store the sum of all container resource requests
// Combine both init containers and normal containers to sum resources properly
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
// totalResources will store the sum of all container resource limits
totalResources := make(map[string]resource.Quantity)
// Iterate over all containers to aggregate their requests
for _, container := range pod.Spec.Containers {
for name, qty := range container.Resources.Requests {
// Iterate over all containers to aggregate their Limits
for _, container := range combinedContainers {
for name, qty := range container.Resources.Limits {
if existing, exists := totalResources[name.String()]; exists {
existing.Add(qty)
totalResources[name.String()] = existing
@@ -246,7 +249,7 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
workload := &cozyv1alpha1.Workload{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%s", pod.Name),
Name: pod.Name,
Namespace: pod.Namespace,
},
}

View File

@@ -1,3 +0,0 @@
# S3 bucket
## Parameters

View File

@@ -11,7 +11,7 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
version: '*'
interval: 1m0s
timeout: 5m0s
values:

View File

@@ -1,5 +0,0 @@
{
"title": "Chart Values",
"type": "object",
"properties": {}
}

View File

@@ -1 +0,0 @@
{}

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/postgres-backup:0.10.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:4e1f5153d2673a399b315252238f4dc3eb5d6c59295aef594691710cc5b72eb4
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.20.0
version: 0.18.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1,4 +1,4 @@
KUBERNETES_VERSION = v1.32
UBUNTU_CONTAINER_DISK_TAG = v1.30.1
KUBERNETES_PKG_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
include ../../../scripts/common-envs.mk
@@ -6,26 +6,21 @@ include ../../../scripts/package.mk
generate:
readme-generator -v values.yaml -s values.schema.json -r README.md
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
yq -o json -i '.properties.controlPlane.properties.konnectivity.properties.server.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
image-ubuntu-container-disk:
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
--provenance false \
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
--cache-to type=inline \
--metadata-file images/ubuntu-container-disk.json \
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
> images/ubuntu-container-disk.tag
rm -f images/ubuntu-container-disk.json

View File

@@ -27,47 +27,20 @@ How to access to deployed cluster:
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
```
## Parameters
# Series
### Common parameters
<!-- source: https://github.com/kubevirt/common-instancetypes/blob/main/README.md -->
| Name | Description | Value |
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
| `storageClass` | StorageClass used to store user data | `replicated` |
| `nodeGroups` | nodeGroups configuration | `{}` |
### Cluster Addons
| Name | Description | Value |
| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
| `addons.cilium.valuesOverride` | Custom values to override | `{}` |
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
| `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
| `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
### Kubernetes control plane configuration
| Name | Description | Value |
| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
| `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
| `controlPlane.apiServer.resources` | Resources | `{}` |
| `controlPlane.controllerManager.resources` | Resources | `{}` |
| `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `controlPlane.scheduler.resources` | Resources | `{}` |
| `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
| `controlPlane.konnectivity.server.resources` | Resources | `{}` |
. | U | O | CX | M | RT
----------------------------|-----|-----|------|-----|------
*Has GPUs* | | | | |
*Hugepages* | | | | ✓ | ✓
*Overcommitted Memory* | | | | |
*Dedicated CPU* | | | | | ✓
*Burstable CPU performance* | ✓ | ✓ | | ✓ |
*Isolated emulator threads* | | | ✓ | | ✓
*vNUMA* | | | ✓ | | ✓
*vCPU-To-Memory Ratio* | 1:4 | 1:4 | 1:2 | 1:8 | 1:4
## U Series

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.19.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.19.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.19.0@sha256:5717919c75e609902c6d67138311a2a8fd07be822e2173f3802b67cf5f3486e9
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.0@sha256:6f9091c3e7e4951c5e43fdafd505705fcc9f1ead290ee3ae42e97e9ec2b87b20

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:4a4f8bee150e04d1efcd5ff1ea83e12f495a98851cc5fd47ef41ac7aebce9b74
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a

View File

@@ -1,4 +1,3 @@
# TODO: Here we use ubuntu:22.04, as guestfish has some network issues running in ubuntu:24.04
FROM ubuntu:22.04 as guestfish
ARG DEBIAN_FRONTEND=noninteractive
@@ -6,7 +5,6 @@ RUN apt-get update \
&& apt-get -y install \
libguestfs-tools \
linux-image-generic \
wget \
make \
bash-completion \
&& apt-get clean
@@ -15,10 +13,7 @@ WORKDIR /build
FROM guestfish as builder
# noble is a code name for the Ubuntu 24.04 LTS release
RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
ARG KUBERNETES_VERSION
RUN wget -O image.img https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
RUN qemu-img resize image.img 5G \
&& eval "$(guestfish --listen --network)" \
@@ -31,8 +26,8 @@ RUN qemu-img resize image.img 5G \
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
# kubernetes repo
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
# install containerd
&& guestfish --remote command "apt-get update -y" \
&& guestfish --remote command "apt-get install -y containerd.io" \

View File

@@ -39,13 +39,6 @@ spec:
sockets: 1
{{- end }}
devices:
{{- if .group.gpus }}
gpus:
{{- range $i, $gpu := .group.gpus }}
- name: gpu{{ add $i 1 }}
deviceName: {{ $gpu.name }}
{{- end }}
{{- end }}
disks:
- name: system
disk:
@@ -110,22 +103,22 @@ metadata:
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
spec:
apiServer:
{{- if .Values.controlPlane.apiServer.resources }}
resources: {{- toYaml .Values.controlPlane.apiServer.resources | nindent 6 }}
{{- else if ne .Values.controlPlane.apiServer.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
{{- if .Values.kamajiControlPlane.apiServer.resources }}
resources: {{- toYaml .Values.kamajiControlPlane.apiServer.resources | nindent 6 }}
{{- else if ne .Values.kamajiControlPlane.apiServer.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
controllerManager:
{{- if .Values.controlPlane.controllerManager.resources }}
resources: {{- toYaml .Values.controlPlane.controllerManager.resources | nindent 6 }}
{{- else if ne .Values.controlPlane.controllerManager.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
{{- if .Values.kamajiControlPlane.controllerManager.resources }}
resources: {{- toYaml .Values.kamajiControlPlane.controllerManager.resources | nindent 6 }}
{{- else if ne .Values.kamajiControlPlane.controllerManager.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
scheduler:
{{- if .Values.controlPlane.scheduler.resources }}
resources: {{- toYaml .Values.controlPlane.scheduler.resources | nindent 6 }}
{{- else if ne .Values.controlPlane.scheduler.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
{{- if .Values.kamajiControlPlane.scheduler.resources }}
resources: {{- toYaml .Values.kamajiControlPlane.scheduler.resources | nindent 6 }}
{{- else if ne .Values.kamajiControlPlane.scheduler.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
{{- end }}
dataStoreName: "{{ $etcd }}"
addons:
@@ -135,10 +128,10 @@ spec:
konnectivity:
server:
port: 8132
{{- if .Values.controlPlane.konnectivity.server.resources }}
resources: {{- toYaml .Values.controlPlane.konnectivity.server.resources | nindent 10 }}
{{- else if ne .Values.controlPlane.konnectivity.server.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
{{- if .Values.kamajiControlPlane.addons.konnectivity.server.resources }}
resources: {{- toYaml .Values.kamajiControlPlane.addons.konnectivity.server.resources | nindent 10 }}
{{- else if ne .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "none" }}
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
{{- end }}
kubelet:
cgroupfs: systemd
@@ -283,7 +276,7 @@ spec:
kind: KubevirtMachineTemplate
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
namespace: {{ $.Release.Namespace }}
version: v1.32.3
version: v1.30.1
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck

View File

@@ -4,7 +4,7 @@ metadata:
name: {{ .Release.Name }}-cert-manager-crds
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: cert-manager-crds
@@ -16,7 +16,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -5,7 +5,7 @@ metadata:
name: {{ .Release.Name }}-cert-manager
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: cert-manager
@@ -17,7 +17,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
@@ -31,9 +30,11 @@ spec:
upgrade:
remediation:
retries: -1
{{- with .Values.addons.certManager.valuesOverride }}
values:
{{- toYaml . | nindent 4 }}
{{- if .Values.addons.certManager.valuesOverride }}
valuesFrom:
- kind: Secret
name: {{ .Release.Name }}-cert-manager-values-override
valuesKey: values
{{- end }}
dependsOn:
@@ -46,3 +47,13 @@ spec:
- name: {{ .Release.Name }}-cert-manager-crds
namespace: {{ .Release.Namespace }}
{{- end }}
{{- if .Values.addons.certManager.valuesOverride }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-cert-manager-values-override
stringData:
values: |
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
{{- end }}

View File

@@ -1,19 +1,10 @@
{{- define "cozystack.defaultCiliumValues" -}}
cilium:
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
k8sServicePort: 6443
routingMode: tunnel
enableIPv4Masquerade: true
ipv4NativeRoutingCIDR: ""
{{- end }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-cilium
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: cilium
@@ -25,7 +16,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
@@ -40,7 +30,12 @@ spec:
remediation:
retries: -1
values:
{{- toYaml (deepCopy .Values.addons.cilium.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultCiliumValues" .))) | nindent 4 }}
cilium:
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
k8sServicePort: 6443
routingMode: tunnel
enableIPv4Masquerade: true
ipv4NativeRoutingCIDR: ""
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}

View File

@@ -4,7 +4,7 @@ metadata:
name: {{ .Release.Name }}-csi
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: csi
@@ -16,7 +16,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -20,7 +20,7 @@ spec:
effect: "NoSchedule"
containers:
- name: kubectl
image: docker.io/clastix/kubectl:v1.32
image: docker.io/clastix/kubectl:v1.30.1
command:
- /bin/sh
- -c
@@ -38,7 +38,6 @@ spec:
{{ .Release.Name }}-ingress-nginx
{{ .Release.Name }}-fluxcd-operator
{{ .Release.Name }}-fluxcd
{{ .Release.Name }}-gpu-operator
-p '{"spec": {"suspend": true}}'
--type=merge --field-manager=flux-client-side-apply || true
---
@@ -77,7 +76,6 @@ rules:
- {{ .Release.Name }}-ingress-nginx
- {{ .Release.Name }}-fluxcd-operator
- {{ .Release.Name }}-fluxcd
- {{ .Release.Name }}-gpu-operator
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding

View File

@@ -5,7 +5,7 @@ metadata:
name: {{ .Release.Name }}-fluxcd-operator
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: fluxcd-operator
@@ -17,7 +17,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
@@ -50,7 +49,7 @@ metadata:
name: {{ .Release.Name }}-fluxcd
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: fluxcd
@@ -62,7 +61,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
@@ -75,9 +73,11 @@ spec:
upgrade:
remediation:
retries: -1
{{- with .Values.addons.fluxcd.valuesOverride }}
values:
{{- toYaml . | nindent 4 }}
{{- if .Values.addons.fluxcd.valuesOverride }}
valuesFrom:
- kind: Secret
name: {{ .Release.Name }}-fluxcd-values-override
valuesKey: values
{{- end }}
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
@@ -89,3 +89,14 @@ spec:
- name: {{ .Release.Name }}-fluxcd-operator
namespace: {{ .Release.Namespace }}
{{- end }}
{{- if .Values.addons.fluxcd.valuesOverride }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-fluxcd-values-override
stringData:
values: |
{{- toYaml .Values.addons.fluxcd.valuesOverride | nindent 4 }}
{{- end }}

View File

@@ -1,46 +0,0 @@
{{- if .Values.addons.gpuOperator.enabled }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-gpu-operator
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: gpu-operator
chart:
spec:
chart: cozy-gpu-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
key: super-admin.svc
targetNamespace: cozy-gpu-operator
storageNamespace: cozy-gpu-operator
install:
createNamespace: true
remediation:
retries: -1
upgrade:
remediation:
retries: -1
{{- with .Values.addons.gpuOperator.valuesOverride }}
values:
{{- toYaml . | nindent 4 }}
{{- end }}
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}
- name: {{ .Release.Name }}-cilium
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,15 +1,3 @@
{{- define "cozystack.defaultIngressValues" -}}
ingress-nginx:
fullnameOverride: ingress-nginx
controller:
kind: DaemonSet
hostNetwork: true
service:
enabled: false
nodeSelector:
node-role.kubernetes.io/ingress-nginx: ""
{{- end }}
{{- if .Values.addons.ingressNginx.enabled }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
@@ -17,7 +5,7 @@ metadata:
name: {{ .Release.Name }}-ingress-nginx
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: ingress-nginx
@@ -29,7 +17,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
@@ -44,7 +31,21 @@ spec:
remediation:
retries: -1
values:
{{- toYaml (deepCopy .Values.addons.ingressNginx.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultIngressValues" .))) | nindent 4 }}
ingress-nginx:
fullnameOverride: ingress-nginx
controller:
kind: DaemonSet
hostNetwork: true
service:
enabled: false
nodeSelector:
node-role.kubernetes.io/ingress-nginx: ""
{{- if .Values.addons.ingressNginx.valuesOverride }}
valuesFrom:
- kind: Secret
name: {{ .Release.Name }}-ingress-nginx-values-override
valuesKey: values
{{- end }}
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
@@ -53,3 +54,14 @@ spec:
- name: {{ .Release.Name }}-cilium
namespace: {{ .Release.Namespace }}
{{- end }}
{{- if .Values.addons.ingressNginx.valuesOverride }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-ingress-nginx-values-override
stringData:
values: |
{{- toYaml .Values.addons.ingressNginx.valuesOverride | nindent 4 }}
{{- end }}

View File

@@ -7,7 +7,7 @@ metadata:
name: {{ .Release.Name }}-monitoring-agents
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: cozy-monitoring-agents
@@ -19,7 +19,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -5,7 +5,7 @@ metadata:
name: {{ .Release.Name }}-vertical-pod-autoscaler-crds
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: vertical-pod-autoscaler-crds
@@ -17,7 +17,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -1,28 +1,5 @@
{{- define "cozystack.defaultVPAValues" -}}
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
vertical-pod-autoscaler:
recommender:
extraArgs:
container-name-label: container
container-namespace-label: namespace
container-pod-name-label: pod
storage: prometheus
memory-saver: true
pod-label-prefix: label_
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
pod-name-label: pod
pod-namespace-label: namespace
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
prometheus-cadvisor-job-name: cadvisor
resources:
limits:
memory: 1600Mi
requests:
cpu: 100m
memory: 1600Mi
{{- end }}
{{- if .Values.addons.monitoringAgents.enabled }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
@@ -30,7 +7,7 @@ metadata:
name: {{ .Release.Name }}-vertical-pod-autoscaler
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: vertical-pod-autoscaler
@@ -42,7 +19,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
@@ -57,7 +33,32 @@ spec:
remediation:
retries: -1
values:
{{- toYaml (deepCopy .Values.addons.verticalPodAutoscaler.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultVPAValues" .))) | nindent 4 }}
vertical-pod-autoscaler:
recommender:
extraArgs:
container-name-label: container
container-namespace-label: namespace
container-pod-name-label: pod
storage: prometheus
memory-saver: true
pod-label-prefix: label_
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
pod-name-label: pod
pod-namespace-label: namespace
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
prometheus-cadvisor-job-name: cadvisor
resources:
limits:
memory: 1600Mi
requests:
cpu: 100m
memory: 1600Mi
{{- if .Values.addons.verticalPodAutoscaler.valuesOverride }}
valuesFrom:
- kind: Secret
name: {{ .Release.Name }}-vertical-pod-autoscaler-values-override
valuesKey: values
{{- end }}
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}

View File

@@ -5,7 +5,7 @@ metadata:
name: {{ .Release.Name }}-cozy-victoria-metrics-operator
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: cozy-victoria-metrics-operator
@@ -17,7 +17,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig

View File

@@ -1,237 +1,97 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"host": {
"type": "string",
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
"default": ""
},
"controlPlane": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of replicas for Kubernetes control-plane components",
"default": 2
"title": "Chart Values",
"type": "object",
"properties": {
"host": {
"type": "string",
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
"default": ""
},
"apiServer": {
"type": "object",
"properties": {
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "small",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
}
}
},
"controllerManager": {
"type": "object",
"properties": {
"resources": {
"type": "object",
"description": "Resources",
"default": {}
},
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "micro",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
}
}
},
"scheduler": {
"type": "object",
"properties": {
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "micro",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
}
}
},
"konnectivity": {
"type": "object",
"properties": {
"server": {
"type": "object",
"properties": {
"resourcesPreset": {
"type": "string",
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
"default": "micro",
"enum": [
"none",
"nano",
"micro",
"small",
"medium",
"large",
"xlarge",
"2xlarge"
]
},
"resources": {
"type": "object",
"description": "Resources",
"default": {}
"controlPlane": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of replicas for Kubernetes contorl-plane components",
"default": 2
}
}
},
"storageClass": {
"type": "string",
"description": "StorageClass used to store user data",
"default": "replicated"
},
"addons": {
"type": "object",
"properties": {
"certManager": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enables the cert-manager",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
},
"ingressNginx": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
},
"hosts": {
"type": "array",
"description": "List of domain names that should be passed through to the cluster by upper cluster",
"default": [],
"items": {}
}
}
},
"fluxcd": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enables Flux CD",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
},
"monitoringAgents": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
}
}
}
}
}
}
},
"storageClass": {
"type": "string",
"description": "StorageClass used to store user data",
"default": "replicated"
},
"addons": {
"type": "object",
"properties": {
"certManager": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enables the cert-manager",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
},
"cilium": {
"type": "object",
"properties": {
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
},
"ingressNginx": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
},
"hosts": {
"type": "array",
"description": "List of domain names that should be passed through to the cluster by upper cluster",
"default": [],
"items": {}
}
}
},
"gpuOperator": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enables the gpu-operator",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
},
"fluxcd": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enables Flux CD",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
},
"monitoringAgents": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
"default": false
},
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
},
"verticalPodAutoscaler": {
"type": "object",
"properties": {
"valuesOverride": {
"type": "object",
"description": "Custom values to override",
"default": {}
}
}
}
}
}
}
}

View File

@@ -1,10 +1,12 @@
## @section Common parameters
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
## @param storageClass StorageClass used to store user data
##
host: ""
controlPlane:
replicas: 2
storageClass: replicated
## @param nodeGroups [object] nodeGroups configuration
@@ -22,14 +24,6 @@ nodeGroups:
cpu: ""
memory: ""
## List of GPUs to attach (WARN: NVIDIA driver requires at least 4 GiB of RAM)
## e.g:
## instanceType: "u1.xlarge"
## gpus:
## - name: nvidia.com/AD102GL_L40S
gpus: []
## @section Cluster Addons
##
addons:
@@ -42,12 +36,6 @@ addons:
enabled: false
valuesOverride: {}
## Cilium CNI plugin
##
cilium:
## @param addons.cilium.valuesOverride Custom values to override
valuesOverride: {}
## Ingress-NGINX Controller
##
ingressNginx:
@@ -64,14 +52,6 @@ addons:
hosts: []
valuesOverride: {}
## GPU-operator: NVIDIA GPU Operator
##
gpuOperator:
## @param addons.gpuOperator.enabled Enables the gpu-operator
## @param addons.gpuOperator.valuesOverride Custom values to override
enabled: false
valuesOverride: {}
## Flux CD
##
fluxcd:
@@ -97,42 +77,62 @@ addons:
##
valuesOverride: {}
## @section Kubernetes control plane configuration
## @section Kamaji control plane
##
controlPlane:
replicas: 2
kamajiControlPlane:
apiServer:
## @param controlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
## @param controlPlane.apiServer.resources Resources
## e.g:
## resources:
## limits:
## cpu: 4000m
## memory: 4Gi
## requests:
## cpu: 100m
## memory: 512Mi
##
resourcesPreset: "small"
## @param kamajiControlPlane.apiServer.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param kamajiControlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "small"
controllerManager:
## @param controlPlane.controllerManager.resources Resources
## @param controlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "micro"
## @param kamajiControlPlane.controllerManager.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param kamajiControlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "micro"
scheduler:
## @param controlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
## @param controlPlane.scheduler.resources Resources
resourcesPreset: "micro"
## @param kamajiControlPlane.scheduler.resources Resources
resources: {}
konnectivity:
server:
## @param controlPlane.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
## @param controlPlane.konnectivity.server.resources Resources
resourcesPreset: "micro"
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param kamajiControlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "micro"
addons:
konnectivity:
server:
## @param kamajiControlPlane.addons.konnectivity.server.resources Resources
resources: {}
# resources:
# limits:
# cpu: 4000m
# memory: 4Gi
# requests:
# cpu: 100m
# memory: 512Mi
## @param kamajiControlPlane.addons.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
resourcesPreset: "micro"

View File

@@ -33,7 +33,7 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
version: '*'
interval: 1m0s
timeout: 5m0s
values:

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.10.1
version: 0.10.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/postgres-backup:0.10.1@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f

View File

@@ -13,6 +13,9 @@ spec:
jobTemplate:
spec:
backoffLimit: 2
template:
spec:
restartPolicy: OnFailure
template:
metadata:
annotations:
@@ -21,7 +24,7 @@ spec:
spec:
imagePullSecrets:
- name: {{ .Release.Name }}-regsecret
restartPolicy: OnFailure
restartPolicy: Never
containers:
- name: pgdump
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"

View File

@@ -4,4 +4,4 @@ description: Separated tenant namespace
icon: /logos/tenant.svg
type: application
version: 1.9.2
version: 1.9.1

View File

@@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: cozy-tenant-configuration-hash
namespace: {{ include "tenant.name" . }}
data:
cozyTenantConfigurationHash: {{ sha256sum (toJson .Values) | quote }}

View File

@@ -24,7 +24,6 @@ spec:
ingress:
- fromEntities:
- world
- cluster
egress:
- toEntities:
- world

View File

@@ -59,8 +59,7 @@ kubernetes 0.16.0 077045b0
kubernetes 0.17.0 1fbbfcd0
kubernetes 0.17.1 fd240701
kubernetes 0.18.0 721c12a7
kubernetes 0.19.0 93bdf411
kubernetes 0.20.0 HEAD
kubernetes 0.18.1 HEAD
mysql 0.1.0 263e47be
mysql 0.2.0 c24a103f
mysql 0.3.0 53f2365e
@@ -90,8 +89,7 @@ postgres 0.7.0 4b90bf5a
postgres 0.7.1 1ec10165
postgres 0.8.0 4e68e65c
postgres 0.9.0 8267072d
postgres 0.10.0 721c12a7
postgres 0.10.1 HEAD
postgres 0.10.0 HEAD
rabbitmq 0.1.0 263e47be
rabbitmq 0.2.0 53f2365e
rabbitmq 0.3.0 6c5cf5bf
@@ -132,8 +130,7 @@ tenant 1.6.8 bc95159a
tenant 1.7.0 24fa7222
tenant 1.8.0 160e4e2a
tenant 1.9.0 728743db
tenant 1.9.1 721c12a7
tenant 1.9.2 HEAD
tenant 1.9.1 HEAD
virtual-machine 0.1.4 f2015d65
virtual-machine 0.1.5 263e47be
virtual-machine 0.2.0 c0685f43
@@ -146,8 +143,7 @@ virtual-machine 0.7.1 0ab39f20
virtual-machine 0.8.0 3fa4dd3a
virtual-machine 0.8.1 93c46161
virtual-machine 0.8.2 de19450f
virtual-machine 0.9.0 721c12a7
virtual-machine 0.9.1 HEAD
virtual-machine 0.9.0 HEAD
vm-disk 0.1.0 d971f2ff
vm-disk 0.1.1 HEAD
vm-instance 0.1.0 1ec10165
@@ -157,8 +153,7 @@ vm-instance 0.4.0 e23286a3
vm-instance 0.4.1 0ab39f20
vm-instance 0.5.0 3fa4dd3a
vm-instance 0.5.1 de19450f
vm-instance 0.6.0 721c12a7
vm-instance 0.6.1 HEAD
vm-instance 0.6.0 HEAD
vpn 0.1.0 263e47be
vpn 0.2.0 53f2365e
vpn 0.3.0 6c5cf5bf

View File

@@ -17,7 +17,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.9.1
version: 0.9.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -74,8 +74,7 @@ spec:
{{- if .Values.gpus }}
gpus:
{{- range $i, $gpu := .Values.gpus }}
- name: gpu{{ add $i 1 }}
deviceName: {{ $gpu.name }}
- deviceName: {{ $gpu.name }}
{{- end }}
{{- end }}
disks:

View File

@@ -17,7 +17,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.6.1
version: 0.6.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -46,8 +46,7 @@ spec:
{{- if .Values.gpus }}
gpus:
{{- range $i, $gpu := .Values.gpus }}
- name: gpu{{ add $i 1 }}
deviceName: {{ $gpu.name }}
- deviceName: {{ $gpu.name }}
{{- end }}
{{- end }}
disks:

View File

@@ -1,2 +1,2 @@
cozystack:
image: ghcr.io/cozystack/cozystack/installer:v0.31.0-rc.1@sha256:ab0e8fd97632ba784a42a3d0714806ea327440f82ffa5c4896a87c5fb7c1ec6e
image: ghcr.io/cozystack/cozystack/installer:v0.30.2@sha256:59996588b5d59b5593fb34442b2f2ed8ef466d138b229a8d37beb6f70141a690

View File

@@ -161,7 +161,7 @@ releases:
releaseName: piraeus-operator
chart: cozy-piraeus-operator
namespace: cozy-linstor
dependsOn: [cilium,cert-manager]
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
- name: snapshot-controller
releaseName: snapshot-controller

View File

@@ -134,11 +134,6 @@ releases:
namespace: cozy-kubevirt
privileged: true
dependsOn: [cilium,kubeovn,kubevirt-operator]
{{- $cpuAllocationRatio := index $cozyConfig.data "cpu-allocation-ratio" }}
{{- if $cpuAllocationRatio }}
values:
cpuAllocationRatio: {{ $cpuAllocationRatio }}
{{- end }}
- name: kubevirt-instancetypes
releaseName: kubevirt-instancetypes
@@ -321,7 +316,12 @@ releases:
name: kubeapps-auth-config
valuesKey: values.yaml
{{- end }}
{{- if eq $oidcEnabled "true" }}
dependsOn: [keycloak-configure]
{{- else }}
dependsOn: []
{{- end }}
- name: kamaji
releaseName: kamaji

View File

@@ -8,7 +8,7 @@
{{- $host = index $cozyConfig.data "root-host" }}
{{- end }}
{{- end }}
{{- $tenantRoot := dict }}
{{- $tenantRoot := list }}
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
{{- $tenantRoot = lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" "tenant-root" "tenant-root" }}
{{- end }}
@@ -37,7 +37,7 @@ metadata:
labels:
cozystack.io/ui: "true"
spec:
interval: 0s
interval: 1m
releaseName: tenant-root
install:
remediation:
@@ -54,6 +54,12 @@ spec:
namespace: cozy-public
values:
host: "{{ $host }}"
valuesFrom:
- kind: ConfigMap
name: "cozy-system-configuration-hash"
valuesKey: "cozyTenantConfigurationHash"
targetPath: "cozyTenantConfigurationHash"
optional: true
dependsOn:
{{- range $x := $bundle.releases }}
{{- if has $x.name (list "cilium" "kubeovn") }}

View File

@@ -0,0 +1,14 @@
{{- $rootTenantConfiguration := dict "values" .Values }}
{{- $cozyConfig := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack" ) "data" }}
{{- $cozyScheduling := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling") "data" }}
{{- $cozyBranding := index (lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" ) "data" }}
{{- $_ := set $rootTenantConfiguration "config" $cozyConfig }}
{{- $_ := set $rootTenantConfiguration "scheduling" $cozyScheduling }}
{{- $_ := set $rootTenantConfiguration "branding" $cozyBranding }}
apiVersion: v1
kind: ConfigMap
metadata:
name: cozy-system-configuration-hash
namespace: tenant-root
data:
cozyTenantConfigurationHash: {{ sha256sum (toJson $rootTenantConfiguration) | quote }}

View File

@@ -7,9 +7,6 @@
{{/* collect dependency namespaces from releases */}}
{{- range $x := $bundle.releases }}
{{- if or (has $x.name $disabledComponents) (and ($x.optional) (not (has $x.name $enabledComponents))) }}
{{- continue }}
{{- end }}
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
{{- end }}
@@ -58,7 +55,6 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
{{- with $x.valuesFiles }}
valuesFiles:
{{- toYaml $x.valuesFiles | nindent 6 }}
@@ -89,7 +85,7 @@ spec:
{{- with $x.dependsOn }}
dependsOn:
{{- range $dep := . }}
{{- if hasKey $dependencyNamespaces $dep }}
{{- if not (has $dep $disabledComponents) }}
- name: {{ $dep }}
namespace: {{ index $dependencyNamespaces $dep }}
{{- end }}

View File

@@ -11,6 +11,14 @@ include ../../../scripts/common-envs.mk
help: ## Show this help.
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
show:
helm template -n $(NAMESPACE) $(NAME) .
apply: ## Create sandbox in existing Kubernetes cluster.
helm template -n $(NAMESPACE) $(NAME) . | kubectl apply -f -
diff:
helm template -n $(NAMESPACE) $(NAME) . | kubectl diff -f -
image: image-e2e-sandbox
@@ -31,11 +39,26 @@ image-e2e-sandbox:
test: ## Run the end-to-end tests in existing sandbox.
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/e2e.sh'
test-applications: ## Run the end-to-end tests in existing sandbox for applications.
for app in $(TESTING_APPS); do \
docker exec ${SANDBOX_NAME} bash -c "/hack/e2e.application.sh $${app}"; \
done
docker exec ${SANDBOX_NAME} bash -c "kubectl get hr -A | grep -v 'True'"
delete: ## Remove sandbox from existing Kubernetes cluster.
docker rm -f "${SANDBOX_NAME}" || true
exec: ## Opens an interactive shell in the sandbox container.
docker exec -ti "${SANDBOX_NAME}" bash
docker exec -ti "${SANDBOX_NAME}" -- bash
proxy: sync-hosts ## Enable a SOCKS5 proxy server; mirrord and gost must be installed.
mirrord exec --target deploy/cozystack-e2e-sandbox --target-namespace cozy-e2e-tests -- gost -L=127.0.0.1:10080
login: ## Downloads the kubeconfig into a temporary directory and runs a shell with the sandbox environment; mirrord must be installed.
mirrord exec --target deploy/cozystack-e2e-sandbox --target-namespace cozy-e2e-tests -- "$$SHELL"
sync-hosts:
kubectl exec -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'kubectl get ing -A -o go-template='\''{{ "127.0.0.1 localhost\n"}}{{ range .items }}{{ range .status.loadBalancer.ingress }}{{ .ip }}{{ end }} {{ range .spec.rules }}{{ .host }}{{ end }}{{ "\n" }}{{ end }}'\'' > /etc/hosts'
apply: delete
docker run -d --rm --name "${SANDBOX_NAME}" --privileged "$$(yq .e2e.image values.yaml)" sleep infinity

View File

@@ -1,2 +1,2 @@
e2e:
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.31.0-rc.1@sha256:a20a6834527ccfc8daf7413a15234f3f7dbbd7774810c8e1966736d487ef7d0c
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.30.2@sha256:31273d6b42dc88c2be2ff9ba64564d1b12e70ae8a5480953341b0d113ac7d4bd

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/matchbox:v0.31.0-rc.1@sha256:de69166fd6efec988cad7ad5be41bbb57c8134508c531d7496fc7f15772e4993
ghcr.io/cozystack/cozystack/matchbox:v0.30.2@sha256:307d382f75f1dcb39820c73b93b2ce576cdb6d58032679bda7d926999c677900

View File

@@ -3,4 +3,4 @@ name: info
description: Info
icon: /logos/info.svg
type: application
version: 1.0.1
version: 1.0.0

View File

@@ -11,13 +11,6 @@
{{- $k8sClient := index $k8sClientSecret.data "client-secret-key" | b64dec }}
{{- $rootSaConfigMap := lookup "v1" "ConfigMap" "kube-system" "kube-root-ca.crt" }}
{{- $k8sCa := index $rootSaConfigMap.data "ca.crt" | b64enc }}
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
{{- $tenantRoot := lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" "tenant-root" "tenant-root" }}
{{- if and $tenantRoot $tenantRoot.spec $tenantRoot.spec.values $tenantRoot.spec.values.host }}
{{- $host = $tenantRoot.spec.values.host }}
{{- end }}
{{- end }}
---
apiVersion: v1
kind: Secret

View File

@@ -3,4 +3,4 @@ name: ingress
description: NGINX Ingress Controller
icon: /logos/ingress-nginx.svg
type: application
version: 1.5.1
version: 1.4.0

View File

@@ -4,13 +4,12 @@
### Common parameters
| Name | Description | Value |
| ----------------- | ----------------------------------------------------------------- | ------- |
| `replicas` | Number of ingress-nginx replicas | `2` |
| `externalIPs` | List of externalIPs for service. | `[]` |
| `whitelist` | List of client networks | `[]` |
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
| `dashboard` | Should ingress serve Cozystack service dashboard | `false` |
| `cdiUploadProxy` | Should ingress serve CDI upload proxy | `false` |
| `virtExportProxy` | Should ingress serve KubeVirt export proxy | `false` |
| Name | Description | Value |
| ---------------- | ----------------------------------------------------------------- | ------- |
| `replicas` | Number of ingress-nginx replicas | `2` |
| `externalIPs` | List of externalIPs for service. | `[]` |
| `whitelist` | List of client networks | `[]` |
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
| `dashboard` | Should ingress serve Cozystack service dashboard | `false` |
| `cdiUploadProxy` | Should ingress serve CDI upload proxy | `false` |

View File

@@ -4,15 +4,6 @@
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
{{- $tenantRoot := dict }}
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
{{- $tenantRoot = lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" "tenant-root" "tenant-root" }}
{{- end }}
{{- if and $tenantRoot $tenantRoot.spec $tenantRoot.spec.values $tenantRoot.spec.values.host }}
{{- $host = $tenantRoot.spec.values.host }}
{{- else }}
{{- end }}
{{- if .Values.dashboard }}
apiVersion: networking.k8s.io/v1
kind: Ingress

View File

@@ -11,7 +11,7 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
version: '*'
interval: 1m0s
timeout: 5m0s
values:

View File

@@ -35,11 +35,6 @@
"type": "boolean",
"description": "Should ingress serve CDI upload proxy",
"default": false
},
"virtExportProxy": {
"type": "boolean",
"description": "Should ingress serve KubeVirt export proxy",
"default": false
}
}
}

View File

@@ -30,6 +30,3 @@ dashboard: false
## @param cdiUploadProxy Should ingress serve CDI upload proxy
cdiUploadProxy: false
## @param virtExportProxy Should ingress serve KubeVirt export proxy
virtExportProxy: false

View File

@@ -1,37 +0,0 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
{{- $issuerType := (index $cozyConfig.data "clusterissuer") | default "http01" }}
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
{{- if .Values.virtExportProxy }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
cert-manager.io/cluster-issuer: letsencrypt-prod
{{- if eq $issuerType "cloudflare" }}
{{- else }}
acme.cert-manager.io/http01-ingress-class: {{ .Release.Namespace }}
{{- end }}
name: virt-exportproxy-{{ .Release.Namespace }}
namespace: cozy-kubevirt
spec:
ingressClassName: {{ .Release.Namespace }}
rules:
- host: virt-exportproxy.{{ $host }}
http:
paths:
- backend:
service:
name: virt-exportproxy
port:
number: 443
path: /
pathType: ImplementationSpecific
tls:
- hosts:
virt-exportproxy.{{ $host }}
secretName: virt-exportproxy-{{ .Release.Namespace }}-tls
{{- end }}

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:66c4547efd18b4d7475ff73b2c4e2f39e9b4471d55e85237e2fe3e87af05c302
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:c63978e1ed0304e8518b31ddee56c4e8115541b997d8efbe1c0a74da57140399

View File

@@ -14,7 +14,7 @@ spec:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
version: '*'
interval: 1m0s
timeout: 5m0s
values:

View File

@@ -11,15 +11,12 @@ etcd 2.5.0 24fa7222
etcd 2.6.0 8c460528
etcd 2.6.1 45a7416c
etcd 2.7.0 HEAD
info 1.0.0 93bdf411
info 1.0.1 HEAD
info 1.0.0 HEAD
ingress 1.0.0 d7cfa53c
ingress 1.1.0 5bbc488e
ingress 1.2.0 28fca4ef
ingress 1.3.0 fde4bcfa
ingress 1.4.0 fd240701
ingress 1.5.0 93bdf411
ingress 1.5.1 HEAD
ingress 1.4.0 HEAD
monitoring 1.0.0 d7cfa53c
monitoring 1.1.0 25221fdc
monitoring 1.2.0 f81be075

View File

@@ -5,7 +5,7 @@ include ../../scripts/common-envs.mk
repo:
rm -rf "$(OUT)"
mkdir -p "$(OUT)"
helm package -d "$(OUT)" $$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")') --version $(COZYSTACK_VERSION)
helm package -d "$(OUT)" $$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")') --version $(VERSION)
cd "$(OUT)" && helm repo index .
fix-chartnames:

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:67e4a5da0ab43d93e8b75094d5a2db8159cb927a47b94f945f80d0ffb93d3301
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:a47d2743d01bff0ce60aa745fdff54f9b7184dff8679b11ab4ecd08ac663012b

View File

@@ -1,6 +1,6 @@
apiVersion: v2
appVersion: 0.19.0
appVersion: 0.18.1
description: Cluster API Operator
name: cluster-api-operator
type: application
version: 0.19.0
version: 0.18.1

View File

@@ -1,8 +1,26 @@
# Addon provider
{{- range $name, $addon := $.Values.addon }}
{{- $addonNamespace := default ( printf "%s-%s" $name "addon-system" ) (get $addon "namespace") }}
{{- $addonName := $name }}
{{- $addonVersion := get $addon "version" }}
{{- if .Values.addon }}
{{- $addons := split ";" .Values.addon }}
{{- $addonNamespace := "" }}
{{- $addonName := "" }}
{{- $addonVersion := "" }}
{{- range $addon := $addons }}
{{- $addonArgs := split ":" $addon }}
{{- $addonArgsLen := len $addonArgs }}
{{- if eq $addonArgsLen 3 }}
{{- $addonNamespace = $addonArgs._0 }}
{{- $addonName = $addonArgs._1 }}
{{- $addonVersion = $addonArgs._2 }}
{{- else if eq $addonArgsLen 2 }}
{{- $addonNamespace = print $addonArgs._0 "-addon-system" }}
{{- $addonName = $addonArgs._0 }}
{{- $addonVersion = $addonArgs._1 }}
{{- else if eq $addonArgsLen 1 }}
{{- $addonNamespace = print $addonArgs._0 "-addon-system" }}
{{- $addonName = $addonArgs._0 }}
{{- else }}
{{- fail "addon provider argument should have the following format helm:v1.0.0 or mynamespace:helm:v1.0.0" }}
{{- end }}
---
apiVersion: v1
kind: Namespace
@@ -38,24 +56,5 @@ spec:
{{- if $.Values.secretNamespace }}
secretNamespace: {{ $.Values.secretNamespace }}
{{- end }}
{{- if $addon.manifestPatches }}
manifestPatches: {{ toYaml $addon.manifestPatches | nindent 4 }}
{{- end }}
{{- if $addon.additionalManifests }}
additionalManifests:
name: {{ $addon.additionalManifests.name }}
{{- if $addon.additionalManifests.namespace }}
namespace: {{ $addon.additionalManifests.namespace }}
{{- end }} {{/* if $addon.additionalManifests.namespace */}}
{{- end }}
{{- if $addon.additionalManifests }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $addon.additionalManifests.name }}
namespace: {{ default $addonNamespace $addon.additionalManifests.namespace }}
data:
manifests: {{- toYaml $addon.additionalManifests.manifests | nindent 4 }}
{{- end }}
{{- end }} {{/* range $name, $addon := .Values.addon */}}

View File

@@ -1,8 +1,26 @@
# Bootstrap provider
{{- range $name, $bootstrap := $.Values.bootstrap }}
{{- $bootstrapNamespace := default ( printf "%s-%s" $name "bootstrap-system" ) (get $bootstrap "namespace") }}
{{- $bootstrapName := $name }}
{{- $bootstrapVersion := get $bootstrap "version" }}
{{- if .Values.bootstrap }}
{{- $bootstraps := split ";" .Values.bootstrap }}
{{- $bootstrapNamespace := "" }}
{{- $bootstrapName := "" }}
{{- $bootstrapVersion := "" }}
{{- range $bootstrap := $bootstraps }}
{{- $bootstrapArgs := split ":" $bootstrap }}
{{- $bootstrapArgsLen := len $bootstrapArgs }}
{{- if eq $bootstrapArgsLen 3 }}
{{- $bootstrapNamespace = $bootstrapArgs._0 }}
{{- $bootstrapName = $bootstrapArgs._1 }}
{{- $bootstrapVersion = $bootstrapArgs._2 }}
{{- else if eq $bootstrapArgsLen 2 }}
{{- $bootstrapNamespace = print $bootstrapArgs._0 "-bootstrap-system" }}
{{- $bootstrapName = $bootstrapArgs._0 }}
{{- $bootstrapVersion = $bootstrapArgs._1 }}
{{- else if eq $bootstrapArgsLen 1 }}
{{- $bootstrapNamespace = print $bootstrapArgs._0 "-bootstrap-system" }}
{{- $bootstrapName = $bootstrapArgs._0 }}
{{- else }}
{{- fail "bootstrap provider argument should have the following format kubeadm:v1.0.0 or mynamespace:kubeadm:v1.0.0" }}
{{- end }}
---
apiVersion: v1
kind: Namespace
@@ -39,24 +57,5 @@ spec:
namespace: {{ $.Values.configSecret.namespace }}
{{- end }}
{{- end }}
{{- if $bootstrap.manifestPatches }}
manifestPatches: {{ toYaml $bootstrap.manifestPatches | nindent 4 }}
{{- end }}
{{- if $bootstrap.additionalManifests }}
additionalManifests:
name: {{ $bootstrap.additionalManifests.name }}
{{- if $bootstrap.additionalManifests.namespace }}
namespace: {{ $bootstrap.additionalManifests.namespace }}
{{- end }} {{/* if $bootstrap.additionalManifests.namespace */}}
{{- end }}
{{- if $bootstrap.additionalManifests }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $bootstrap.additionalManifests.name }}
namespace: {{ default $bootstrapNamespace $bootstrap.additionalManifests.namespace }}
data:
manifests: {{- toYaml $bootstrap.additionalManifests.manifests | nindent 4 }}
{{- end }}
{{- end }} {{/* range $name, $bootstrap := .Values.bootstrap */}}

View File

@@ -1,8 +1,26 @@
# Control plane provider
{{- range $name, $controlPlane := $.Values.controlPlane }}
{{- $controlPlaneNamespace := default ( printf "%s-%s" $name "control-plane-system" ) (get $controlPlane "namespace") }}
{{- $controlPlaneName := $name }}
{{- $controlPlaneVersion := get $controlPlane "version" }}
{{- if .Values.controlPlane }}
{{- $controlPlanes := split ";" .Values.controlPlane }}
{{- $controlPlaneNamespace := "" }}
{{- $controlPlaneName := "" }}
{{- $controlPlaneVersion := "" }}
{{- range $controlPlane := $controlPlanes }}
{{- $controlPlaneArgs := split ":" $controlPlane }}
{{- $controlPlaneArgsLen := len $controlPlaneArgs }}
{{- if eq $controlPlaneArgsLen 3 }}
{{- $controlPlaneNamespace = $controlPlaneArgs._0 }}
{{- $controlPlaneName = $controlPlaneArgs._1 }}
{{- $controlPlaneVersion = $controlPlaneArgs._2 }}
{{- else if eq $controlPlaneArgsLen 2 }}
{{- $controlPlaneNamespace = print $controlPlaneArgs._0 "-control-plane-system" }}
{{- $controlPlaneName = $controlPlaneArgs._0 }}
{{- $controlPlaneVersion = $controlPlaneArgs._1 }}
{{- else if eq $controlPlaneArgsLen 1 }}
{{- $controlPlaneNamespace = print $controlPlaneArgs._0 "-control-plane-system" }}
{{- $controlPlaneName = $controlPlaneArgs._0 }}
{{- else }}
{{- fail "controlplane provider argument should have the following format kubeadm:v1.0.0 or mynamespace:kubeadm:v1.0.0" }}
{{- end }}
---
apiVersion: v1
kind: Namespace
@@ -52,24 +70,5 @@ spec:
namespace: {{ $.Values.configSecret.namespace }}
{{- end }}
{{- end }}
{{- if $controlPlane.manifestPatches }}
manifestPatches: {{ toYaml $controlPlane.manifestPatches | nindent 4 }}
{{- end }}
{{- if $controlPlane.additionalManifests }}
additionalManifests:
name: {{ $controlPlane.additionalManifests.name }}
{{- if $controlPlane.additionalManifests.namespace }}
namespace: {{ $controlPlane.additionalManifests.namespace }}
{{- end }} {{/* if $controlPlane.additionalManifests.namespace */}}
{{- end }}
{{- if $controlPlane.additionalManifests }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $controlPlane.additionalManifests.name }}
namespace: {{ default $controlPlaneNamespace $controlPlane.additionalManifests.namespace }}
data:
manifests: {{- toYaml $controlPlane.additionalManifests.manifests | nindent 4 }}
{{- end }}
{{- end }} {{/* range $name, $controlPlane := .Values.controlPlane */}}

View File

@@ -1,8 +1,25 @@
# Core provider
{{- range $name, $core := $.Values.core }}
{{- $coreNamespace := default "capi-system" (get $core "namespace") }}
{{- $coreName := $name }}
{{- $coreVersion := get $core "version" }}
{{- if .Values.core }}
{{- $coreArgs := split ":" .Values.core }}
{{- $coreArgsLen := len $coreArgs }}
{{- $coreVersion := "" }}
{{- $coreNamespace := "" }}
{{- $coreName := "" }}
{{- $coreVersion := "" }}
{{- if eq $coreArgsLen 3 }}
{{- $coreNamespace = $coreArgs._0 }}
{{- $coreName = $coreArgs._1 }}
{{- $coreVersion = $coreArgs._2 }}
{{- else if eq $coreArgsLen 2 }}
{{- $coreNamespace = "capi-system" }}
{{- $coreName = $coreArgs._0 }}
{{- $coreVersion = $coreArgs._1 }}
{{- else if eq $coreArgsLen 1 }}
{{- $coreNamespace = "capi-system" }}
{{- $coreName = $coreArgs._0 }}
{{- else }}
{{- fail "core provider argument should have the following format cluster-api:v1.0.0 or mynamespace:cluster-api:v1.0.0" }}
{{- end }}
---
apiVersion: v1
kind: Namespace
@@ -48,24 +65,4 @@ spec:
namespace: {{ $.Values.configSecret.namespace }}
{{- end }}
{{- end }}
{{- if $core.manifestPatches }}
manifestPatches: {{ toYaml $core.manifestPatches | nindent 4 }}
{{- end }}
{{- if $core.additionalManifests }}
additionalManifests:
name: {{ $core.additionalManifests.name }}
{{- if $core.additionalManifests.namespace }}
namespace: {{ $core.additionalManifests.namespace }}
{{- end }}
{{- end }}
{{- if $core.additionalManifests }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $core.additionalManifests.name }}
namespace: {{ default $coreNamespace $core.additionalManifests.namespace }}
data:
manifests: {{- toYaml $core.additionalManifests.manifests | nindent 4 }}
{{- end }}
{{- end }} {{/* range $name, $core := .Values.core */}}

Some files were not shown because too many files have changed in this diff Show More