mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-30 02:18:49 +00:00
Compare commits
134 Commits
fix-tests
...
ci-pull-re
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3b3266a30 | ||
|
|
894cb14d49 | ||
|
|
a0935e9ae4 | ||
|
|
f2c248acbd | ||
|
|
590f14a614 | ||
|
|
4c8dba880a | ||
|
|
de0c7b94f4 | ||
|
|
2682a6e674 | ||
|
|
e3e0b21612 | ||
|
|
455d66fbe4 | ||
|
|
7db7277636 | ||
|
|
7be5db8cff | ||
|
|
249950d94b | ||
|
|
44565dca88 | ||
|
|
cefcd24ebb | ||
|
|
13d7df47d7 | ||
|
|
1ccd3074dc | ||
|
|
70d3591ed2 | ||
|
|
700991f4fa | ||
|
|
d89acbf44d | ||
|
|
59ef3296f0 | ||
|
|
3ed0cdee1c | ||
|
|
9f5230a342 | ||
|
|
b895ccfdeb | ||
|
|
d54a407d68 | ||
|
|
f9ec630509 | ||
|
|
3f47181c10 | ||
|
|
19409d801d | ||
|
|
8a4793d571 | ||
|
|
0fc3fdcb3d | ||
|
|
04e2b3952b | ||
|
|
b56624a781 | ||
|
|
07d7fadb1a | ||
|
|
8db92d53d1 | ||
|
|
7537235f43 | ||
|
|
4bb524e53d | ||
|
|
e7ded52f93 | ||
|
|
8547dc3b21 | ||
|
|
c22603bf7e | ||
|
|
89525dedb5 | ||
|
|
1c53a6f9f6 | ||
|
|
16ee0f2c3a | ||
|
|
72d0394475 | ||
|
|
0a998c8b49 | ||
|
|
7bfad655c2 | ||
|
|
e81cbf780c | ||
|
|
e8cc44450a | ||
|
|
d3a8a4a7de | ||
|
|
fc2c5a0f6b | ||
|
|
0f8b8e1744 | ||
|
|
197434ff94 | ||
|
|
703073a164 | ||
|
|
6a0fc64475 | ||
|
|
f1624353ef | ||
|
|
277b438f68 | ||
|
|
405863cb11 | ||
|
|
63ebab5c2a | ||
|
|
0ddaff9380 | ||
|
|
a6b02bf381 | ||
|
|
39ede77fec | ||
|
|
e505857832 | ||
|
|
d8f3547db7 | ||
|
|
6d8a99269b | ||
|
|
b9112a398e | ||
|
|
719fdd29cc | ||
|
|
9e1376f709 | ||
|
|
7a9a1fcba4 | ||
|
|
2def9f4e83 | ||
|
|
c1046aae6a | ||
|
|
53cf1c537c | ||
|
|
ccedcb7419 | ||
|
|
f94a01febd | ||
|
|
495e584313 | ||
|
|
172e660cd1 | ||
|
|
14262cdd2a | ||
|
|
80576cb757 | ||
|
|
fde6e9cc73 | ||
|
|
57ca60c5a5 | ||
|
|
1d0ee15948 | ||
|
|
eeaa1b4517 | ||
|
|
a14bcf98dd | ||
|
|
be84fc6e4e | ||
|
|
73a3f481bc | ||
|
|
5903bbc64a | ||
|
|
f204809e43 | ||
|
|
fe4806ce49 | ||
|
|
8f535acc3f | ||
|
|
53cbb4ae12 | ||
|
|
4e9446d934 | ||
|
|
acbfb6ad64 | ||
|
|
8570449080 | ||
|
|
ffe6109dfb | ||
|
|
7dbb8a1d75 | ||
|
|
86210c1fc1 | ||
|
|
e96f15773d | ||
|
|
bc5635dd8e | ||
|
|
5d71c90f0a | ||
|
|
05d6ab9516 | ||
|
|
ccb001ee97 | ||
|
|
5a5cf91742 | ||
|
|
6a0d4913f2 | ||
|
|
685e50bf6c | ||
|
|
f90fc6f681 | ||
|
|
d8f3f2dee1 | ||
|
|
da8100965f | ||
|
|
6d2ea1295e | ||
|
|
d7914ff9aa | ||
|
|
7f4af5ebbc | ||
|
|
8f575c455c | ||
|
|
819166eb35 | ||
|
|
f507802ec9 | ||
|
|
62267811cb | ||
|
|
12bedef2d3 | ||
|
|
fd240701f8 | ||
|
|
60b96e0a62 | ||
|
|
1d377bab9d | ||
|
|
799690dc07 | ||
|
|
aa02d0c5e6 | ||
|
|
6b8ecf3953 | ||
|
|
e5b81f367e | ||
|
|
ba4798464d | ||
|
|
655e8be382 | ||
|
|
fd9a5b0d7b | ||
|
|
d09314bbb5 | ||
|
|
371791215a | ||
|
|
4c220bb443 | ||
|
|
e27611d45a | ||
|
|
a3e647c547 | ||
|
|
be52fe5461 | ||
|
|
1d639fda0d | ||
|
|
bbdde79428 | ||
|
|
1966f86120 | ||
|
|
fa7c98bc38 | ||
|
|
6671869acc |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
||||
* @kvaps @lllamnyp
|
||||
* @kvaps @lllamnyp @klinch0
|
||||
|
||||
53
.github/workflows/backport.yaml
vendored
Normal file
53
.github/workflows/backport.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Automatic Backport
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed] # fires when PR is closed (merged)
|
||||
|
||||
concurrency:
|
||||
group: backport-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'backport')
|
||||
runs-on: [self-hosted]
|
||||
|
||||
steps:
|
||||
# 1. Decide which maintenance branch should receive the back‑port
|
||||
- name: Determine target maintenance branch
|
||||
id: target
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let rel;
|
||||
try {
|
||||
rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
} catch (e) {
|
||||
core.setFailed('No existing releases found; cannot determine backport target.');
|
||||
return;
|
||||
}
|
||||
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
|
||||
const branch = `release-${maj}.${min}`;
|
||||
core.setOutput('branch', branch);
|
||||
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
|
||||
# 2. Checkout (required by backport‑action)
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# 3. Create the back‑port pull request
|
||||
- name: Create back‑port PR
|
||||
uses: korthout/backport-action@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
label_pattern: '' # don't read labels for targets
|
||||
target_branches: ${{ steps.target.outputs.branch }}
|
||||
18
.github/workflows/pre-commit.yml
vendored
18
.github/workflows/pre-commit.yml
vendored
@@ -1,18 +1,26 @@
|
||||
name: Pre-Commit Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
|
||||
concurrency:
|
||||
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
- name: Checkout code (PR branch)
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
|
||||
144
.github/workflows/pull-requests-release.yaml
vendored
144
.github/workflows/pull-requests-release.yaml
vendored
@@ -1,9 +1,13 @@
|
||||
name: Releasing PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
name: Test Release
|
||||
@@ -12,17 +16,19 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Run only when the PR carries the "release" label and not closed.
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
- name: Checkout code (PR branch)
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -39,38 +45,112 @@ jobs:
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
|
||||
steps:
|
||||
# Extract tag from branch name (branch = release-X.Y.Z*)
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const match = branch.match(/^release-(v\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
|
||||
if (!match) {
|
||||
core.setFailed(`Branch '${branch}' does not match expected format 'release-vX.Y.Z[-suffix]'`);
|
||||
} else {
|
||||
const tag = match[1];
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Extracted tag: ${tag}`);
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Tag to publish: ${tag}`);
|
||||
|
||||
# Checkout merged commit (default ref -> merge SHA)
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merged commit
|
||||
|
||||
- name: Create tag on merge commit
|
||||
run: |
|
||||
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push -f origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
# Ensure maintenance branch release-X.Y
|
||||
- name: Ensure maintenance branch release-X.Y
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
|
||||
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
|
||||
if (!match) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-suffix'`);
|
||||
return;
|
||||
}
|
||||
const line = `${match[1]}.${match[2]}`;
|
||||
const branch = `release-${line}`;
|
||||
try {
|
||||
await github.rest.repos.getBranch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
branch
|
||||
});
|
||||
console.log(`Branch '${branch}' already exists`);
|
||||
} catch (_) {
|
||||
await github.rest.git.createRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `refs/heads/${branch}`,
|
||||
sha: context.sha
|
||||
});
|
||||
console.log(`✅ Branch '${branch}' created at ${context.sha}`);
|
||||
}
|
||||
|
||||
# Get the latest published release
|
||||
- name: Get the latest published release
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare current tag vs latest using semver-utils
|
||||
- name: Semver compare
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.get_tag.outputs.tag }}
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }}
|
||||
|
||||
# Derive flags: prerelease? make_latest?
|
||||
- name: Calculate publish flags
|
||||
id: flags
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc1
|
||||
const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc1
|
||||
const isRc = Boolean(m[2]);
|
||||
core.setOutput('is_rc', isRc);
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
|
||||
|
||||
# Publish draft release with correct flags
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
@@ -78,19 +158,17 @@ jobs:
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
const release = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!release) {
|
||||
throw new Error(`Draft release with tag ${tag} not found`);
|
||||
}
|
||||
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) throw new Error(`Draft release for ${tag} not found`);
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: release.id,
|
||||
draft: false
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: draft.id,
|
||||
draft: false,
|
||||
prerelease: ${{ steps.flags.outputs.is_rc }},
|
||||
make_latest: '${{ steps.flags.outputs.make_latest }}'
|
||||
});
|
||||
|
||||
console.log(`✅ Published release for ${tag}`);
|
||||
|
||||
console.log(`🚀 Published release for ${tag}`);
|
||||
|
||||
24
.github/workflows/pull-requests.yaml
vendored
24
.github/workflows/pull-requests.yaml
vendored
@@ -1,9 +1,13 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
@@ -12,16 +16,18 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
- name: Checkout code (PR branch)
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -30,10 +36,8 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: make build
|
||||
run: |
|
||||
make build
|
||||
- name: Build
|
||||
run: make build
|
||||
|
||||
- name: make test
|
||||
run: |
|
||||
make test
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
229
.github/workflows/tags.yaml
vendored
229
.github/workflows/tags.yaml
vendored
@@ -3,7 +3,11 @@ name: Versioned Tag
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
- 'v*.*.*' # vX.Y.Z or vX.Y.Z-rcN
|
||||
|
||||
concurrency:
|
||||
group: tags-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
@@ -15,6 +19,7 @@ jobs:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
# Check if a non-draft release with this tag already exists
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
@@ -23,28 +28,67 @@ jobs:
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
|
||||
core.setOutput('skip', exists);
|
||||
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
|
||||
|
||||
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
|
||||
if (existing) {
|
||||
core.setOutput('skip', 'true');
|
||||
} else {
|
||||
core.setOutput('skip', 'false');
|
||||
}
|
||||
|
||||
# If a published release already exists, skip the rest of the workflow
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
# Parse tag meta‑data (rc?, maintenance line, etc.)
|
||||
- name: Parse tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc1
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc1
|
||||
const isRc = Boolean(m[2]);
|
||||
const [maj, min] = m[1].split('.');
|
||||
core.setOutput('tag', ref);
|
||||
core.setOutput('version', version);
|
||||
core.setOutput('is_rc', isRc);
|
||||
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||
|
||||
# Detect base branch (main or release‑X.Y) the tag was pushed from
|
||||
- name: Get base branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: get_base
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const baseRef = context.payload.base_ref;
|
||||
if (!baseRef) {
|
||||
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
|
||||
return;
|
||||
}
|
||||
const branch = baseRef.replace('refs/heads/', '');
|
||||
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
|
||||
if (!ok) {
|
||||
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('branch', branch);
|
||||
|
||||
# Checkout & login once
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
- name: Login to GHCR
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
@@ -52,108 +96,129 @@ jobs:
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
# Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
|
||||
# Commit built artifacts
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GIT_AUTHOR_NAME: ${{ github.actor }}
|
||||
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
|
||||
run: |
|
||||
git config user.name "$GIT_AUTHOR_NAME"
|
||||
git config user.email "$GIT_AUTHOR_EMAIL"
|
||||
git config user.name "github-actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
git push origin HEAD || true
|
||||
|
||||
# Get `latest_version` from latest published release
|
||||
- name: Get latest published release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare tag (A) with latest (B)
|
||||
- name: Semver compare
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.tag.outputs.tag }} # A
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }} # B
|
||||
|
||||
# Create or reuse DRAFT GitHub Release
|
||||
- name: Create / reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.tag.outputs.tag }}';
|
||||
const isRc = ${{ steps.tag.outputs.is_rc }};
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
const makeLatest = outdated ? false : 'legacy';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
let rel = releases.data.find(r => r.tag_name === tag);
|
||||
if (!rel) {
|
||||
rel = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: tag,
|
||||
draft: true,
|
||||
prerelease: isRc,
|
||||
make_latest: makeLatest
|
||||
});
|
||||
console.log(`Draft release created for ${tag}`);
|
||||
} else {
|
||||
console.log(`Re‑using existing release ${tag}`);
|
||||
}
|
||||
core.setOutput('upload_url', rel.upload_url);
|
||||
|
||||
# Build + upload assets (optional)
|
||||
- name: Build & upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
make assets
|
||||
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Create release‑X.Y.Z branch and push (force‑update)
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH_NAME"
|
||||
git push origin "$BRANCH_NAME" --force
|
||||
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH"
|
||||
git push -f origin "$BRANCH"
|
||||
|
||||
# Create pull request into original base branch (if absent)
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const branch = `release-${version}`;
|
||||
const base = 'main';
|
||||
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${branch}`,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
base
|
||||
});
|
||||
|
||||
if (prs.data.length === 0) {
|
||||
const newPr = await github.rest.pulls.create({
|
||||
const pr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: branch,
|
||||
base: base,
|
||||
repo: context.repo.repo,
|
||||
head,
|
||||
base,
|
||||
title: `Release v${version}`,
|
||||
body:
|
||||
`This PR prepares the release \`v${version}\`.\n` +
|
||||
`(Please merge it before releasing draft)`,
|
||||
body: `This PR prepares the release \`v${version}\`.`,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`Created pull request #${newPr.data.number} from ${branch} to ${base}`);
|
||||
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: newPr.data.number,
|
||||
labels: ['release', 'ok-to-test']
|
||||
repo: context.repo.repo,
|
||||
issue_number: pr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
|
||||
console.log(`Created PR #${pr.data.number}`);
|
||||
} else {
|
||||
console.log(`Pull request already exists from ${branch} to ${base}`);
|
||||
console.log(`PR already exists from ${head} to ${base}`);
|
||||
}
|
||||
|
||||
- name: Create or reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: create_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
let release = releases.data.find(r => r.tag_name === tag);
|
||||
if (!release) {
|
||||
release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: `${tag}`,
|
||||
draft: true,
|
||||
prerelease: false
|
||||
});
|
||||
}
|
||||
core.setOutput('upload_url', release.upload_url);
|
||||
|
||||
- name: Build assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make assets
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Delete pushed tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
git push --delete origin ${GITHUB_REF#refs/tags/}
|
||||
|
||||
@@ -626,7 +626,7 @@
|
||||
"datasource": {
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"POD\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
|
||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
|
||||
"hide": false,
|
||||
"interval": "",
|
||||
"legendFormat": "{{pod}}",
|
||||
|
||||
@@ -450,7 +450,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -520,7 +520,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -590,7 +590,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -660,7 +660,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
@@ -1128,7 +1128,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1143,7 +1143,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -1527,7 +1527,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1542,7 +1542,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -1909,7 +1909,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2037,7 +2037,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2160,7 +2160,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2288,7 +2288,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
|
||||
@@ -684,7 +684,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -710,7 +710,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -723,7 +723,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -736,7 +736,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -762,7 +762,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -786,7 +786,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -798,7 +798,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -810,7 +810,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -848,7 +848,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -860,7 +860,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1315,7 +1315,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1488,7 +1488,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -1502,7 +1502,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -1642,7 +1642,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -1779,7 +1779,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2095,7 +2095,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
@@ -2109,7 +2109,7 @@
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2295,7 +2295,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -2306,7 +2306,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2468,7 +2468,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -2653,7 +2653,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
@@ -2666,7 +2666,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
@@ -2679,7 +2679,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
@@ -2692,7 +2692,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
@@ -2705,7 +2705,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2837,7 +2837,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -2974,7 +2974,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -3290,56 +3290,56 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"POD\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3834,7 +3834,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -3972,7 +3972,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
|
||||
@@ -656,7 +656,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -680,7 +680,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -692,7 +692,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -704,7 +704,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -728,7 +728,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -740,7 +740,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -752,7 +752,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -764,7 +764,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -776,7 +776,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -814,7 +814,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -826,7 +826,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -877,7 +877,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1475,7 +1475,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -1646,7 +1646,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -1657,7 +1657,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1798,7 +1798,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -1939,7 +1939,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2257,28 +2257,28 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2458,7 +2458,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2470,7 +2470,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2622,7 +2622,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -2799,14 +2799,14 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2814,7 +2814,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2822,14 +2822,14 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2955,7 +2955,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -3091,7 +3091,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -3408,14 +3408,14 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3423,7 +3423,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3431,35 +3431,35 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3910,7 +3910,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -4049,7 +4049,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
|
||||
@@ -869,7 +869,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -878,7 +878,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -895,7 +895,7 @@
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -903,7 +903,7 @@
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -919,7 +919,7 @@
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -935,7 +935,7 @@
|
||||
"refId": "I"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -943,7 +943,7 @@
|
||||
"refId": "J"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -968,7 +968,7 @@
|
||||
"refId": "M"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -977,7 +977,7 @@
|
||||
"refId": "N"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -1449,7 +1449,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -1616,7 +1616,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -1627,7 +1627,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1764,7 +1764,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -1901,7 +1901,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -2210,7 +2210,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2218,21 +2218,21 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2407,7 +2407,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2419,7 +2419,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2572,7 +2572,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -2754,14 +2754,14 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2769,7 +2769,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2777,14 +2777,14 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2910,7 +2910,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -3046,7 +3046,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -3370,14 +3370,14 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3385,7 +3385,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3393,35 +3393,35 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3873,7 +3873,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -4008,7 +4008,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
|
||||
@@ -686,7 +686,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -759,7 +759,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -847,7 +847,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -860,7 +860,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -899,7 +899,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1503,7 +1503,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1669,7 +1669,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1681,7 +1681,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1820,7 +1820,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2269,7 +2269,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
@@ -2476,7 +2476,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2488,7 +2488,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2639,7 +2639,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2816,7 +2816,7 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2824,28 +2824,28 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2974,7 +2974,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3110,7 +3110,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3431,7 +3431,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3439,7 +3439,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3447,28 +3447,28 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
@@ -3482,7 +3482,7 @@
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3930,7 +3930,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ container }}",
|
||||
@@ -4068,7 +4068,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ container }}",
|
||||
|
||||
139
docs/release.md
Normal file
139
docs/release.md
Normal file
@@ -0,0 +1,139 @@
|
||||
# Release Workflow
|
||||
|
||||
This section explains how Cozystack builds and releases are made.
|
||||
|
||||
## Regular Releases
|
||||
|
||||
When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
|
||||
In this explanation, we'll use version `v0.42.0` as an example:
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
A regular release sequence starts in the following way:
|
||||
|
||||
1. Maintainer tags a commit in `main` with `v0.42.0` and pushes it to GitHub.
|
||||
2. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.0`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.0`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.0` with updated digests, pushes it to the new branch `release-0.42.0`, and opens a PR to `main`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.0` and uploads them to the release draft page.
|
||||
3. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Pull Request"
|
||||
```
|
||||
|
||||
When testing and editing are completed, the sequence goes on.
|
||||
|
||||
4. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.0`.
|
||||
5. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.0` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
6. The maintainer can now announce the release to the community.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Release v0.42.0" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
## Patch Releases
|
||||
|
||||
Making a patch release has a lot in common with a regular release, with a couple of differences:
|
||||
|
||||
* A release branch is used instead of `main`
|
||||
* Patch commits are cherry-picked to the release branch.
|
||||
* A pull request is opened against the release branch.
|
||||
|
||||
|
||||
Let's assume that we've released `v0.42.0` and that development is ongoing.
|
||||
We have introduced a couple of new features and some fixes to features that we have released
|
||||
in `v0.42.0`.
|
||||
|
||||
Once problems were found and fixed, a patch release is due.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
```
|
||||
|
||||
|
||||
1. The maintainer creates a release branch, `release-0.42,` and cherry-picks patch commits from `main` to `release-0.42`.
|
||||
These must be only patches to features that were present in version `v0.42.0`.
|
||||
|
||||
Cherry-picking can be done as soon as each patch is merged into `main`,
|
||||
or directly before the release.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2"
|
||||
```
|
||||
|
||||
When all relevant patch commits are cherry-picked, the branch is ready for release.
|
||||
|
||||
2. The maintainer tags the `HEAD` commit of branch `release-0.42` as `v0.42.1` and then pushes it to GitHub.
|
||||
3. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.1`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.1`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.1` with updated digests, pushes it to the new branch `release-0.42.1`, and opens a PR to `release-0.42`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.1` and uploads them to the release draft page.
|
||||
4. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2" tag: "v0.42.1"
|
||||
branch release-0.42.1
|
||||
commit id: "Prepare release v0.42.1"
|
||||
checkout release-0.42
|
||||
merge release-0.42.1 id: "Pull request"
|
||||
```
|
||||
|
||||
Finally, when release is confirmed, the release sequence goes on.
|
||||
|
||||
5. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.1`.
|
||||
6. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.1` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
7. The maintainer can now announce the release to the community.
|
||||
17
hack/e2e.sh
17
hack/e2e.sh
@@ -60,7 +60,7 @@ done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
@@ -234,8 +234,8 @@ sleep 5
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||
@@ -318,7 +318,12 @@ kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"s
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||
|
||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||
fi
|
||||
|
||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||
"dashboard": true
|
||||
@@ -333,7 +338,7 @@ kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
@@ -352,5 +357,5 @@ kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
|
||||
@@ -24,14 +24,14 @@ search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-
|
||||
resolved_miss_map=$(
|
||||
echo "$miss_map" | while read -r chart version commit; do
|
||||
# if version is found in HEAD, it's HEAD
|
||||
if [ $(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml) = "${version}" ]; then
|
||||
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||
echo "$chart $version HEAD"
|
||||
continue
|
||||
fi
|
||||
|
||||
# if commit is not HEAD, check if it's valid
|
||||
if [ $commit != "HEAD" ]; then
|
||||
if [ $(git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') != "${version}" ]; then
|
||||
if [ "$commit" != "HEAD" ]; then
|
||||
if [ "$(git show "${commit}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" != "${version}" ]; then
|
||||
echo "Commit $commit for $chart $version is not valid" >&2
|
||||
exit 1
|
||||
fi
|
||||
@@ -44,7 +44,7 @@ resolved_miss_map=$(
|
||||
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||
found_tag=""
|
||||
for tag in $search_commits; do
|
||||
if [ $(git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') = "${version}" ]; then
|
||||
if [ "$(git show "${tag}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" = "${version}" ]; then
|
||||
found_tag=$(git rev-parse --short "${tag}")
|
||||
break
|
||||
fi
|
||||
|
||||
@@ -116,15 +116,24 @@ func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
q := resource.MustParse("0")
|
||||
quantity := resource.MustParse("0")
|
||||
|
||||
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||
if ing.IP != "" {
|
||||
q.Add(resource.MustParse("1"))
|
||||
quantity.Add(resource.MustParse("1"))
|
||||
}
|
||||
}
|
||||
|
||||
resources["public-ips"] = q
|
||||
var resourceLabel string
|
||||
if svc.Annotations != nil {
|
||||
var ok bool
|
||||
resourceLabel, ok = svc.Annotations["metallb.universe.tf/ip-allocated-from-pool"]
|
||||
if !ok {
|
||||
resourceLabel = "default"
|
||||
}
|
||||
}
|
||||
resourceLabel = fmt.Sprintf("%s.ipaddresspool.metallb.io/requests.ipaddresses", resourceLabel)
|
||||
resources[resourceLabel] = quantity
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
@@ -165,7 +174,12 @@ func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||
resources[resourceName.String()] = resourceQuantity
|
||||
storageClass := "default"
|
||||
if pvc.Spec.StorageClassName != nil || *pvc.Spec.StorageClassName == "" {
|
||||
storageClass = *pvc.Spec.StorageClassName
|
||||
}
|
||||
resourceLabel := fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.%s", storageClass, resourceName.String())
|
||||
resources[resourceLabel] = resourceQuantity
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:0f4d8e6863ed074e90f8a7a8390ccd98dae0220119346aba19e85054bb902e2f
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.17.1
|
||||
version: 0.19.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
UBUNTU_CONTAINER_DISK_TAG = v1.30.1
|
||||
KUBERNETES_VERSION = v1.32
|
||||
KUBERNETES_PKG_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
@@ -6,21 +6,26 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
yq -o json -i '.properties.controlPlane.properties.konnectivity.properties.server.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||
|
||||
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
|
||||
|
||||
image-ubuntu-container-disk:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
|
||||
--provenance false \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
|
||||
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \
|
||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/ubuntu-container-disk.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||
> images/ubuntu-container-disk.tag
|
||||
rm -f images/ubuntu-container-disk.json
|
||||
|
||||
|
||||
@@ -27,20 +27,46 @@ How to access to deployed cluster:
|
||||
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
||||
```
|
||||
|
||||
# Series
|
||||
## Parameters
|
||||
|
||||
<!-- source: https://github.com/kubevirt/common-instancetypes/blob/main/README.md -->
|
||||
### Common parameters
|
||||
|
||||
. | U | O | CX | M | RT
|
||||
----------------------------|-----|-----|------|-----|------
|
||||
*Has GPUs* | | | | |
|
||||
*Hugepages* | | | ✓ | ✓ | ✓
|
||||
*Overcommitted Memory* | | ✓ | | |
|
||||
*Dedicated CPU* | | | ✓ | | ✓
|
||||
*Burstable CPU performance* | ✓ | ✓ | | ✓ |
|
||||
*Isolated emulator threads* | | | ✓ | | ✓
|
||||
*vNUMA* | | | ✓ | | ✓
|
||||
*vCPU-To-Memory Ratio* | 1:4 | 1:4 | 1:2 | 1:8 | 1:4
|
||||
| Name | Description | Value |
|
||||
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
|
||||
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
|
||||
| `storageClass` | StorageClass used to store user data | `replicated` |
|
||||
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||
|
||||
### Cluster Addons
|
||||
|
||||
| Name | Description | Value |
|
||||
| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
|
||||
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
|
||||
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
|
||||
| `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
|
||||
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
|
||||
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
|
||||
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||
|
||||
### Kubernetes control plane configuration
|
||||
|
||||
| Name | Description | Value |
|
||||
| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||
| `controlPlane.apiServer.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resources` | Resources | `{}` |
|
||||
| `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.scheduler.resources` | Resources | `{}` |
|
||||
| `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||
| `controlPlane.konnectivity.server.resources` | Resources | `{}` |
|
||||
|
||||
|
||||
## U Series
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.17.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.17.0@sha256:53f4734109799da8b27f35a3b1afdb4746b5992f1d7b9d1c132ea6242cdd8cf0
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.17.0@sha256:1a6605d3bff6342e12bcc257e852a4f89e97e8af6d3d259930ec07c7ad5f001d
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.0@sha256:6f9091c3e7e4951c5e43fdafd505705fcc9f1ead290ee3ae42e97e9ec2b87b20
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:d842de4637ea6188999464f133c89f63a3bd13f1cb202c10f1f8c0c1c3c3dbd4
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# TODO: Here we use ubuntu:22.04, as guestfish has some network issues running in ubuntu:24.04
|
||||
FROM ubuntu:22.04 as guestfish
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
@@ -5,6 +6,7 @@ RUN apt-get update \
|
||||
&& apt-get -y install \
|
||||
libguestfs-tools \
|
||||
linux-image-generic \
|
||||
wget \
|
||||
make \
|
||||
bash-completion \
|
||||
&& apt-get clean
|
||||
@@ -13,7 +15,10 @@ WORKDIR /build
|
||||
|
||||
FROM guestfish as builder
|
||||
|
||||
RUN wget -O image.img https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
|
||||
# noble is a code name for the Ubuntu 24.04 LTS release
|
||||
RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||
|
||||
ARG KUBERNETES_VERSION
|
||||
|
||||
RUN qemu-img resize image.img 5G \
|
||||
&& eval "$(guestfish --listen --network)" \
|
||||
@@ -26,8 +31,8 @@ RUN qemu-img resize image.img 5G \
|
||||
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
|
||||
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
|
||||
# kubernetes repo
|
||||
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
||||
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
||||
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
||||
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
||||
# install containerd
|
||||
&& guestfish --remote command "apt-get update -y" \
|
||||
&& guestfish --remote command "apt-get install -y containerd.io" \
|
||||
|
||||
@@ -39,6 +39,13 @@ spec:
|
||||
sockets: 1
|
||||
{{- end }}
|
||||
devices:
|
||||
{{- if .group.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .group.gpus }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
- name: system
|
||||
disk:
|
||||
@@ -103,22 +110,22 @@ metadata:
|
||||
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
||||
spec:
|
||||
apiServer:
|
||||
{{- if .Values.kamajiControlPlane.apiServer.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.apiServer.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.apiServer.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.controlPlane.apiServer.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.apiServer.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.apiServer.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
controllerManager:
|
||||
{{- if .Values.kamajiControlPlane.controllerManager.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.controllerManager.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.controllerManager.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.controlPlane.controllerManager.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.controllerManager.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.controllerManager.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
scheduler:
|
||||
{{- if .Values.kamajiControlPlane.scheduler.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.scheduler.resources | nindent 6 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.scheduler.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- if .Values.controlPlane.scheduler.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.scheduler.resources | nindent 6 }}
|
||||
{{- else if ne .Values.controlPlane.scheduler.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||
{{- end }}
|
||||
dataStoreName: "{{ $etcd }}"
|
||||
addons:
|
||||
@@ -128,10 +135,10 @@ spec:
|
||||
konnectivity:
|
||||
server:
|
||||
port: 8132
|
||||
{{- if .Values.kamajiControlPlane.addons.konnectivity.server.resources }}
|
||||
resources: {{- toYaml .Values.kamajiControlPlane.addons.konnectivity.server.resources | nindent 10 }}
|
||||
{{- else if ne .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- if .Values.controlPlane.konnectivity.server.resources }}
|
||||
resources: {{- toYaml .Values.controlPlane.konnectivity.server.resources | nindent 10 }}
|
||||
{{- else if ne .Values.controlPlane.konnectivity.server.resourcesPreset "none" }}
|
||||
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||
{{- end }}
|
||||
kubelet:
|
||||
cgroupfs: systemd
|
||||
@@ -276,7 +283,7 @@ spec:
|
||||
kind: KubevirtMachineTemplate
|
||||
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
|
||||
namespace: {{ $.Release.Namespace }}
|
||||
version: v1.30.1
|
||||
version: v1.32.3
|
||||
---
|
||||
apiVersion: cluster.x-k8s.io/v1beta1
|
||||
kind: MachineHealthCheck
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cert-manager-crds
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cert-manager-crds
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cert-manager
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cert-manager
|
||||
@@ -30,11 +30,9 @@ spec:
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- if .Values.addons.certManager.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-cert-manager-values-override
|
||||
valuesKey: values
|
||||
{{- with .Values.addons.certManager.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
dependsOn:
|
||||
@@ -47,13 +45,3 @@ spec:
|
||||
- name: {{ .Release.Name }}-cert-manager-crds
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.addons.certManager.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-cert-manager-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cilium
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cilium
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-csi
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: csi
|
||||
|
||||
@@ -20,7 +20,7 @@ spec:
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: docker.io/clastix/kubectl:v1.30.1
|
||||
image: docker.io/clastix/kubectl:v1.32
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
@@ -32,9 +32,13 @@ spec:
|
||||
{{ .Release.Name }}-cilium
|
||||
{{ .Release.Name }}-csi
|
||||
{{ .Release.Name }}-cert-manager
|
||||
{{ .Release.Name }}-cert-manager-crds
|
||||
{{ .Release.Name }}-vertical-pod-autoscaler
|
||||
{{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||
{{ .Release.Name }}-ingress-nginx
|
||||
{{ .Release.Name }}-fluxcd-operator
|
||||
{{ .Release.Name }}-fluxcd
|
||||
{{ .Release.Name }}-gpu-operator
|
||||
-p '{"spec": {"suspend": true}}'
|
||||
--type=merge --field-manager=flux-client-side-apply || true
|
||||
---
|
||||
@@ -67,9 +71,13 @@ rules:
|
||||
- {{ .Release.Name }}-cilium
|
||||
- {{ .Release.Name }}-csi
|
||||
- {{ .Release.Name }}-cert-manager
|
||||
- {{ .Release.Name }}-cert-manager-crds
|
||||
- {{ .Release.Name }}-vertical-pod-autoscaler
|
||||
- {{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||
- {{ .Release.Name }}-ingress-nginx
|
||||
- {{ .Release.Name }}-fluxcd-operator
|
||||
- {{ .Release.Name }}-fluxcd
|
||||
- {{ .Release.Name }}-gpu-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-fluxcd-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: fluxcd-operator
|
||||
@@ -49,7 +49,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-fluxcd
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: fluxcd
|
||||
@@ -73,11 +73,9 @@ spec:
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- if .Values.addons.fluxcd.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-fluxcd-values-override
|
||||
valuesKey: values
|
||||
{{- with .Values.addons.fluxcd.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
@@ -89,14 +87,3 @@ spec:
|
||||
- name: {{ .Release.Name }}-fluxcd-operator
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.fluxcd.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-fluxcd-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.fluxcd.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
{{- if .Values.addons.gpuOperator.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-gpu-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: gpu-operator
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-gpu-operator
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: cozy-gpu-operator
|
||||
storageNamespace: cozy-gpu-operator
|
||||
install:
|
||||
createNamespace: true
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
{{- with .Values.addons.gpuOperator.valuesOverride }}
|
||||
values:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,3 +1,15 @@
|
||||
{{- define "cozystack.defaultIngressValues" -}}
|
||||
ingress-nginx:
|
||||
fullnameOverride: ingress-nginx
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
hostNetwork: true
|
||||
service:
|
||||
enabled: false
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/ingress-nginx: ""
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.ingressNginx.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
@@ -5,7 +17,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-ingress-nginx
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: ingress-nginx
|
||||
@@ -31,21 +43,7 @@ spec:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
ingress-nginx:
|
||||
fullnameOverride: ingress-nginx
|
||||
controller:
|
||||
kind: DaemonSet
|
||||
hostNetwork: true
|
||||
service:
|
||||
enabled: false
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/ingress-nginx: ""
|
||||
{{- if .Values.addons.ingressNginx.valuesOverride }}
|
||||
valuesFrom:
|
||||
- kind: Secret
|
||||
name: {{ .Release.Name }}-ingress-nginx-values-override
|
||||
valuesKey: values
|
||||
{{- end }}
|
||||
{{- toYaml (deepCopy .Values.addons.ingressNginx.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultIngressValues" .))) | nindent 4 }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
@@ -54,14 +52,3 @@ spec:
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.ingressNginx.valuesOverride }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-ingress-nginx-values-override
|
||||
stringData:
|
||||
values: |
|
||||
{{- toYaml .Values.addons.ingressNginx.valuesOverride | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-monitoring-agents
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cozy-monitoring-agents
|
||||
@@ -38,10 +38,10 @@ spec:
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
- name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
||||
namespace: {{ .Release.Namespace }}
|
||||
- name: {{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||
namespace: {{ .Release.Namespace }}
|
||||
values:
|
||||
vmagent:
|
||||
externalLabels:
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
{{- if .Values.addons.monitoringAgents.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: vertical-pod-autoscaler-crds
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-vertical-pod-autoscaler-crds
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: cozy-vertical-pod-autoscaler-crds
|
||||
storageNamespace: cozy-vertical-pod-autoscaler-crds
|
||||
install:
|
||||
createNamespace: true
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
- name: {{ .Release.Name }}-cilium
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,67 @@
|
||||
{{- define "cozystack.defaultVPAValues" -}}
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
|
||||
vertical-pod-autoscaler:
|
||||
recommender:
|
||||
extraArgs:
|
||||
container-name-label: container
|
||||
container-namespace-label: namespace
|
||||
container-pod-name-label: pod
|
||||
storage: prometheus
|
||||
memory-saver: true
|
||||
pod-label-prefix: label_
|
||||
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||
pod-name-label: pod
|
||||
pod-namespace-label: namespace
|
||||
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||
prometheus-cadvisor-job-name: cadvisor
|
||||
resources:
|
||||
limits:
|
||||
memory: 1600Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 1600Mi
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.addons.monitoringAgents.enabled }}
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-vertical-pod-autoscaler
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: vertical-pod-autoscaler
|
||||
chart:
|
||||
spec:
|
||||
chart: cozy-vertical-pod-autoscaler
|
||||
reconcileStrategy: Revision
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
kubeConfig:
|
||||
secretRef:
|
||||
name: {{ .Release.Name }}-admin-kubeconfig
|
||||
key: super-admin.svc
|
||||
targetNamespace: cozy-vertical-pod-autoscaler
|
||||
storageNamespace: cozy-vertical-pod-autoscaler
|
||||
install:
|
||||
createNamespace: true
|
||||
remediation:
|
||||
retries: -1
|
||||
upgrade:
|
||||
remediation:
|
||||
retries: -1
|
||||
values:
|
||||
{{- toYaml (deepCopy .Values.addons.verticalPodAutoscaler.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultVPAValues" .))) | nindent 4 }}
|
||||
dependsOn:
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||
- name: {{ .Release.Name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
- name: {{ .Release.Name }}-monitoring-agents
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
||||
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: cozy-victoria-metrics-operator
|
||||
|
||||
@@ -1,97 +1,227 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||
"default": ""
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"host": {
|
||||
"type": "string",
|
||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||
"default": ""
|
||||
},
|
||||
"controlPlane": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of replicas for Kubernetes control-plane components",
|
||||
"default": 2
|
||||
},
|
||||
"controlPlane": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of replicas for Kubernetes contorl-plane components",
|
||||
"default": 2
|
||||
}
|
||||
"apiServer": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "small",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageClass": {
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store user data",
|
||||
"default": "replicated"
|
||||
},
|
||||
"addons": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"certManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the cert-manager",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ingressNginx": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"fluxcd": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables Flux CD",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"monitoringAgents": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
"controllerManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"scheduler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"konnectivity": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"server": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||
"default": "micro",
|
||||
"enum": [
|
||||
"none",
|
||||
"nano",
|
||||
"micro",
|
||||
"small",
|
||||
"medium",
|
||||
"large",
|
||||
"xlarge",
|
||||
"2xlarge"
|
||||
]
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageClass": {
|
||||
"type": "string",
|
||||
"description": "StorageClass used to store user data",
|
||||
"default": "replicated"
|
||||
},
|
||||
"addons": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"certManager": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the cert-manager",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ingressNginx": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
},
|
||||
"hosts": {
|
||||
"type": "array",
|
||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||
"default": [],
|
||||
"items": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"gpuOperator": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables the gpu-operator",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"fluxcd": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables Flux CD",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"monitoringAgents": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||
"default": false
|
||||
},
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"verticalPodAutoscaler": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"valuesOverride": {
|
||||
"type": "object",
|
||||
"description": "Custom values to override",
|
||||
"default": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
##
|
||||
host: ""
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
storageClass: replicated
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
@@ -24,6 +22,14 @@ nodeGroups:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
## List of GPUs to attach (WARN: NVIDIA driver requires at least 4 GiB of RAM)
|
||||
## e.g:
|
||||
## instanceType: "u1.xlarge"
|
||||
## gpus:
|
||||
## - name: nvidia.com/AD102GL_L40S
|
||||
gpus: []
|
||||
|
||||
|
||||
## @section Cluster Addons
|
||||
##
|
||||
addons:
|
||||
@@ -52,6 +58,14 @@ addons:
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
## GPU-operator: NVIDIA GPU Operator
|
||||
##
|
||||
gpuOperator:
|
||||
## @param addons.gpuOperator.enabled Enables the gpu-operator
|
||||
## @param addons.gpuOperator.valuesOverride Custom values to override
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
@@ -70,62 +84,49 @@ addons:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
|
||||
## @section Kamaji control plane
|
||||
## VerticalPodAutoscaler
|
||||
##
|
||||
verticalPodAutoscaler:
|
||||
## @param addons.verticalPodAutoscaler.valuesOverride Custom values to override
|
||||
##
|
||||
valuesOverride: {}
|
||||
|
||||
## @section Kubernetes control plane configuration
|
||||
##
|
||||
kamajiControlPlane:
|
||||
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
|
||||
apiServer:
|
||||
## @param kamajiControlPlane.apiServer.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.apiServer.resources Resources
|
||||
## e.g:
|
||||
## resources:
|
||||
## limits:
|
||||
## cpu: 4000m
|
||||
## memory: 4Gi
|
||||
## requests:
|
||||
## cpu: 100m
|
||||
## memory: 512Mi
|
||||
##
|
||||
resourcesPreset: "small"
|
||||
resources: {}
|
||||
|
||||
controllerManager:
|
||||
## @param kamajiControlPlane.controllerManager.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.controllerManager.resources Resources
|
||||
## @param controlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
scheduler:
|
||||
## @param kamajiControlPlane.scheduler.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.scheduler.resources Resources
|
||||
resourcesPreset: "micro"
|
||||
addons:
|
||||
konnectivity:
|
||||
server:
|
||||
## @param kamajiControlPlane.addons.konnectivity.server.resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# limits:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.addons.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
|
||||
resources: {}
|
||||
|
||||
konnectivity:
|
||||
server:
|
||||
## @param controlPlane.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
## @param controlPlane.konnectivity.server.resources Resources
|
||||
resourcesPreset: "micro"
|
||||
resources: {}
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.10.0
|
||||
version: 0.10.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -13,9 +13,6 @@ spec:
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 2
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
@@ -24,7 +21,7 @@ spec:
|
||||
spec:
|
||||
imagePullSecrets:
|
||||
- name: {{ .Release.Name }}-regsecret
|
||||
restartPolicy: Never
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: pgdump
|
||||
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
|
||||
|
||||
@@ -4,4 +4,4 @@ description: Separated tenant namespace
|
||||
icon: /logos/tenant.svg
|
||||
|
||||
type: application
|
||||
version: 1.9.1
|
||||
version: 1.9.2
|
||||
|
||||
@@ -24,6 +24,7 @@ spec:
|
||||
ingress:
|
||||
- fromEntities:
|
||||
- world
|
||||
- cluster
|
||||
egress:
|
||||
- toEntities:
|
||||
- world
|
||||
|
||||
@@ -57,7 +57,9 @@ kubernetes 0.15.1 160e4e2a
|
||||
kubernetes 0.15.2 8267072d
|
||||
kubernetes 0.16.0 077045b0
|
||||
kubernetes 0.17.0 1fbbfcd0
|
||||
kubernetes 0.17.1 HEAD
|
||||
kubernetes 0.17.1 fd240701
|
||||
kubernetes 0.18.0 721c12a7
|
||||
kubernetes 0.19.0 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
@@ -87,7 +89,8 @@ postgres 0.7.0 4b90bf5a
|
||||
postgres 0.7.1 1ec10165
|
||||
postgres 0.8.0 4e68e65c
|
||||
postgres 0.9.0 8267072d
|
||||
postgres 0.10.0 HEAD
|
||||
postgres 0.10.0 721c12a7
|
||||
postgres 0.10.1 HEAD
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
@@ -128,7 +131,8 @@ tenant 1.6.8 bc95159a
|
||||
tenant 1.7.0 24fa7222
|
||||
tenant 1.8.0 160e4e2a
|
||||
tenant 1.9.0 728743db
|
||||
tenant 1.9.1 HEAD
|
||||
tenant 1.9.1 721c12a7
|
||||
tenant 1.9.2 HEAD
|
||||
virtual-machine 0.1.4 f2015d65
|
||||
virtual-machine 0.1.5 263e47be
|
||||
virtual-machine 0.2.0 c0685f43
|
||||
@@ -141,7 +145,8 @@ virtual-machine 0.7.1 0ab39f20
|
||||
virtual-machine 0.8.0 3fa4dd3a
|
||||
virtual-machine 0.8.1 93c46161
|
||||
virtual-machine 0.8.2 de19450f
|
||||
virtual-machine 0.9.0 HEAD
|
||||
virtual-machine 0.9.0 721c12a7
|
||||
virtual-machine 0.9.1 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 HEAD
|
||||
vm-instance 0.1.0 1ec10165
|
||||
@@ -151,7 +156,8 @@ vm-instance 0.4.0 e23286a3
|
||||
vm-instance 0.4.1 0ab39f20
|
||||
vm-instance 0.5.0 3fa4dd3a
|
||||
vm-instance 0.5.1 de19450f
|
||||
vm-instance 0.6.0 HEAD
|
||||
vm-instance 0.6.0 721c12a7
|
||||
vm-instance 0.6.1 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.0
|
||||
version: 0.9.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -74,7 +74,8 @@ spec:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
|
||||
@@ -17,7 +17,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.6.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -46,7 +46,8 @@ spec:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
- name: gpu{{ add $i 1 }}
|
||||
deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
|
||||
@@ -30,6 +30,8 @@ FROM alpine:3.21
|
||||
|
||||
RUN apk add --no-cache make
|
||||
RUN apk add helm kubectl --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
|
||||
RUN apk add yq
|
||||
RUN apk add coreutils
|
||||
|
||||
COPY scripts /cozystack/scripts
|
||||
COPY --from=builder /src/packages/core /cozystack/packages/core
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.29.1@sha256:d63b1cc791ca75d53a7270940189d1401bbeb08f0d54d8ae29dae0ab8a6ef230
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.30.2@sha256:59996588b5d59b5593fb34442b2f2ed8ef466d138b229a8d37beb6f70141a690
|
||||
|
||||
@@ -7,7 +7,11 @@ show:
|
||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS)
|
||||
|
||||
apply:
|
||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) | kubectl apply -f-
|
||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) \
|
||||
| kubectl apply -f-
|
||||
kubectl delete helmreleases.helm.toolkit.fluxcd.io -l cozystack.io/marked-for-deletion=true -A
|
||||
|
||||
reconcile: apply
|
||||
|
||||
namespaces-show:
|
||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml
|
||||
|
||||
@@ -270,7 +270,10 @@ releases:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
frontend:
|
||||
resourcesPreset: "none"
|
||||
dashboard:
|
||||
resourcesPreset: "none"
|
||||
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
||||
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
||||
{{- if $branding }}
|
||||
|
||||
@@ -168,7 +168,10 @@ releases:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
frontend:
|
||||
resourcesPreset: "none"
|
||||
dashboard:
|
||||
resourcesPreset: "none"
|
||||
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
||||
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
||||
{{- if $branding }}
|
||||
|
||||
@@ -7,12 +7,23 @@
|
||||
|
||||
{{/* collect dependency namespaces from releases */}}
|
||||
{{- range $x := $bundle.releases }}
|
||||
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
|
||||
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $x := $bundle.releases }}
|
||||
{{- if not (has $x.name $disabledComponents) }}
|
||||
{{- if or (not $x.optional) (and ($x.optional) (has $x.name $enabledComponents)) }}
|
||||
|
||||
{{- $shouldInstall := true }}
|
||||
{{- $shouldDelete := false }}
|
||||
{{- if or (has $x.name $disabledComponents) (and ($x.optional) (not (has $x.name $enabledComponents))) }}
|
||||
{{- $shouldInstall = false }}
|
||||
{{- if $.Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
|
||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" $x.namespace $x.name }}
|
||||
{{- $shouldDelete = true }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if or $shouldInstall $shouldDelete }}
|
||||
---
|
||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||
kind: HelmRelease
|
||||
@@ -22,6 +33,9 @@ metadata:
|
||||
labels:
|
||||
cozystack.io/repository: system
|
||||
cozystack.io/system-app: "true"
|
||||
{{- if $shouldDelete }}
|
||||
cozystack.io/marked-for-deletion: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
interval: 5m
|
||||
releaseName: {{ $x.releaseName | default $x.name }}
|
||||
@@ -47,10 +61,10 @@ spec:
|
||||
{{- end }}
|
||||
{{- $values := dict }}
|
||||
{{- with $x.values }}
|
||||
{{- $values = merge . $values }}
|
||||
{{- $values = merge . $values }}
|
||||
{{- end }}
|
||||
{{- with index $cozyConfig.data (printf "values-%s" $x.name) }}
|
||||
{{- $values = merge (fromYaml .) $values }}
|
||||
{{- $values = merge (fromYaml .) $values }}
|
||||
{{- end }}
|
||||
{{- with $values }}
|
||||
values:
|
||||
@@ -70,13 +84,12 @@ spec:
|
||||
|
||||
{{- with $x.dependsOn }}
|
||||
dependsOn:
|
||||
{{- range $dep := . }}
|
||||
{{- if not (has $dep $disabledComponents) }}
|
||||
{{- range $dep := . }}
|
||||
{{- if not (has $dep $disabledComponents) }}
|
||||
- name: {{ $dep }}
|
||||
namespace: {{ index $dependencyNamespaces $dep }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -2,7 +2,7 @@ NAMESPACE=cozy-e2e-tests
|
||||
NAME := sandbox
|
||||
CLEAN := 1
|
||||
TESTING_APPS := $(shell find ../../apps -maxdepth 1 -mindepth 1 -type d | awk -F/ '{print $$NF}')
|
||||
SANDBOX_NAME := cozy-e2e-sandbox
|
||||
SANDBOX_NAME := cozy-e2e-sandbox-$(shell echo "$$(hostname):$$(pwd)" | sha256sum | cut -c -6)
|
||||
|
||||
ROOT_DIR = $(dir $(abspath $(firstword $(MAKEFILE_LIST))/../../..))
|
||||
|
||||
|
||||
@@ -14,3 +14,4 @@ RUN curl -LO "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/amd64/kube
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
RUN curl -sSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -s - --version "v${HELM_VERSION}"
|
||||
RUN wget https://github.com/mikefarah/yq/releases/download/v4.44.3/yq_linux_amd64 -O /usr/local/bin/yq && chmod +x /usr/local/bin/yq
|
||||
RUN curl -s https://fluxcd.io/install.sh | bash
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.29.1@sha256:f239dc2d06dfe43fb3192531e994bdb10414d42d56d8659b10951bb4fe434f80
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.30.2@sha256:31273d6b42dc88c2be2ff9ba64564d1b12e70ae8a5480953341b0d113ac7d4bd
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.29.1@sha256:f0c1d531af04ffde003755df2b6fb2fef9ba0d8355aa55d728de523c623b08a0
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.30.2@sha256:307d382f75f1dcb39820c73b93b2ce576cdb6d58032679bda7d926999c677900
|
||||
|
||||
@@ -3,4 +3,4 @@ name: ingress
|
||||
description: NGINX Ingress Controller
|
||||
icon: /logos/ingress-nginx.svg
|
||||
type: application
|
||||
version: 1.4.0
|
||||
version: 1.5.0
|
||||
|
||||
@@ -4,12 +4,13 @@
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------- | ----------------------------------------------------------------- | ------- |
|
||||
| `replicas` | Number of ingress-nginx replicas | `2` |
|
||||
| `externalIPs` | List of externalIPs for service. | `[]` |
|
||||
| `whitelist` | List of client networks | `[]` |
|
||||
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
|
||||
| `dashboard` | Should ingress serve Cozystack service dashboard | `false` |
|
||||
| `cdiUploadProxy` | Should ingress serve CDI upload proxy | `false` |
|
||||
| Name | Description | Value |
|
||||
| ----------------- | ----------------------------------------------------------------- | ------- |
|
||||
| `replicas` | Number of ingress-nginx replicas | `2` |
|
||||
| `externalIPs` | List of externalIPs for service. | `[]` |
|
||||
| `whitelist` | List of client networks | `[]` |
|
||||
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
|
||||
| `dashboard` | Should ingress serve Cozystack service dashboard | `false` |
|
||||
| `cdiUploadProxy` | Should ingress serve CDI upload proxy | `false` |
|
||||
| `virtExportProxy` | Should ingress serve KubeVirt export proxy | `false` |
|
||||
|
||||
|
||||
@@ -35,6 +35,11 @@
|
||||
"type": "boolean",
|
||||
"description": "Should ingress serve CDI upload proxy",
|
||||
"default": false
|
||||
},
|
||||
"virtExportProxy": {
|
||||
"type": "boolean",
|
||||
"description": "Should ingress serve KubeVirt export proxy",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -30,3 +30,6 @@ dashboard: false
|
||||
|
||||
## @param cdiUploadProxy Should ingress serve CDI upload proxy
|
||||
cdiUploadProxy: false
|
||||
|
||||
## @param virtExportProxy Should ingress serve KubeVirt export proxy
|
||||
virtExportProxy: false
|
||||
|
||||
37
packages/extra/ingress/vm-exportproxy.yaml
Normal file
37
packages/extra/ingress/vm-exportproxy.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $issuerType := (index $cozyConfig.data "clusterissuer") | default "http01" }}
|
||||
|
||||
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
|
||||
|
||||
{{- if .Values.virtExportProxy }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
{{- if eq $issuerType "cloudflare" }}
|
||||
{{- else }}
|
||||
acme.cert-manager.io/http01-ingress-class: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
name: virt-exportproxy-{{ .Release.Namespace }}
|
||||
namespace: cozy-kubevirt
|
||||
spec:
|
||||
ingressClassName: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- host: virt-exportproxy.{{ $host }}
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: virt-exportproxy
|
||||
port:
|
||||
number: 443
|
||||
path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls:
|
||||
- hosts:
|
||||
virt-exportproxy.{{ $host }}
|
||||
secretName: virt-exportproxy-{{ .Release.Namespace }}-tls
|
||||
{{- end }}
|
||||
@@ -3,4 +3,4 @@ name: monitoring
|
||||
description: Monitoring and observability stack
|
||||
icon: /logos/monitoring.svg
|
||||
type: application
|
||||
version: 1.9.1
|
||||
version: 1.9.2
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.1@sha256:24382d445bf7a39ed988ef4dc7a0d9f084db891fcb5f42fd2e64622710b9457e
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:c63978e1ed0304e8518b31ddee56c4e8115541b997d8efbe1c0a74da57140399
|
||||
|
||||
@@ -4,6 +4,8 @@ kind: VLogs
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
spec:
|
||||
image:
|
||||
tag: v1.17.0-victorialogs
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
|
||||
@@ -16,7 +16,8 @@ ingress 1.0.0 d7cfa53c
|
||||
ingress 1.1.0 5bbc488e
|
||||
ingress 1.2.0 28fca4ef
|
||||
ingress 1.3.0 fde4bcfa
|
||||
ingress 1.4.0 HEAD
|
||||
ingress 1.4.0 fd240701
|
||||
ingress 1.5.0 HEAD
|
||||
monitoring 1.0.0 d7cfa53c
|
||||
monitoring 1.1.0 25221fdc
|
||||
monitoring 1.2.0 f81be075
|
||||
@@ -34,7 +35,8 @@ monitoring 1.7.0 2a976afe
|
||||
monitoring 1.8.0 8c460528
|
||||
monitoring 1.8.1 8267072d
|
||||
monitoring 1.9.0 45a7416c
|
||||
monitoring 1.9.1 HEAD
|
||||
monitoring 1.9.1 fd240701
|
||||
monitoring 1.9.2 HEAD
|
||||
seaweedfs 0.1.0 71514249
|
||||
seaweedfs 0.2.0 5fb9cfe3
|
||||
seaweedfs 0.2.1 fde4bcfa
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:6e0a47fb639b27181848d38575577a3cc145486828f50d5fb899e167a3b46c84
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:a47d2743d01bff0ce60aa745fdff54f9b7184dff8679b11ab4ecd08ac663012b
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.17.0
|
||||
appVersion: 0.19.0
|
||||
description: Cluster API Operator
|
||||
name: cluster-api-operator
|
||||
type: application
|
||||
version: 0.17.0
|
||||
version: 0.19.0
|
||||
|
||||
@@ -1,33 +1,17 @@
|
||||
# Addon provider
|
||||
{{- if .Values.addon }}
|
||||
{{- $addons := split ";" .Values.addon }}
|
||||
{{- $addonNamespace := "" }}
|
||||
{{- $addonName := "" }}
|
||||
{{- $addonVersion := "" }}
|
||||
{{- range $addon := $addons }}
|
||||
{{- $addonArgs := split ":" $addon }}
|
||||
{{- $addonArgsLen := len $addonArgs }}
|
||||
{{- if eq $addonArgsLen 3 }}
|
||||
{{- $addonNamespace = $addonArgs._0 }}
|
||||
{{- $addonName = $addonArgs._1 }}
|
||||
{{- $addonVersion = $addonArgs._2 }}
|
||||
{{- else if eq $addonArgsLen 2 }}
|
||||
{{- $addonNamespace = print $addonArgs._0 "-addon-system" }}
|
||||
{{- $addonName = $addonArgs._0 }}
|
||||
{{- $addonVersion = $addonArgs._1 }}
|
||||
{{- else if eq $addonArgsLen 1 }}
|
||||
{{- $addonNamespace = print $addonArgs._0 "-addon-system" }}
|
||||
{{- $addonName = $addonArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "addon provider argument should have the following format helm:v1.0.0 or mynamespace:helm:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $addon := $.Values.addon }}
|
||||
{{- $addonNamespace := default ( printf "%s-%s" $name "addon-system" ) (get $addon "namespace") }}
|
||||
{{- $addonName := $name }}
|
||||
{{- $addonVersion := get $addon "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $addonNamespace }}
|
||||
---
|
||||
@@ -37,8 +21,10 @@ metadata:
|
||||
name: {{ $addonName }}
|
||||
namespace: {{ $addonNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $addonVersion $.Values.secretName }}
|
||||
spec:
|
||||
@@ -52,5 +38,24 @@ spec:
|
||||
{{- if $.Values.secretNamespace }}
|
||||
secretNamespace: {{ $.Values.secretNamespace }}
|
||||
{{- end }}
|
||||
{{- if $addon.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $addon.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $addon.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $addon.additionalManifests.name }}
|
||||
{{- if $addon.additionalManifests.namespace }}
|
||||
namespace: {{ $addon.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $addon.additionalManifests.namespace */}}
|
||||
{{- end }}
|
||||
{{- if $addon.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $addon.additionalManifests.name }}
|
||||
namespace: {{ default $addonNamespace $addon.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $addon.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $addon := .Values.addon */}}
|
||||
|
||||
@@ -1,33 +1,18 @@
|
||||
# Bootstrap provider
|
||||
{{- if .Values.bootstrap }}
|
||||
{{- $bootstraps := split ";" .Values.bootstrap }}
|
||||
{{- $bootstrapNamespace := "" }}
|
||||
{{- $bootstrapName := "" }}
|
||||
{{- $bootstrapVersion := "" }}
|
||||
{{- range $bootstrap := $bootstraps }}
|
||||
{{- $bootstrapArgs := split ":" $bootstrap }}
|
||||
{{- $bootstrapArgsLen := len $bootstrapArgs }}
|
||||
{{- if eq $bootstrapArgsLen 3 }}
|
||||
{{- $bootstrapNamespace = $bootstrapArgs._0 }}
|
||||
{{- $bootstrapName = $bootstrapArgs._1 }}
|
||||
{{- $bootstrapVersion = $bootstrapArgs._2 }}
|
||||
{{- else if eq $bootstrapArgsLen 2 }}
|
||||
{{- $bootstrapNamespace = print $bootstrapArgs._0 "-bootstrap-system" }}
|
||||
{{- $bootstrapName = $bootstrapArgs._0 }}
|
||||
{{- $bootstrapVersion = $bootstrapArgs._1 }}
|
||||
{{- else if eq $bootstrapArgsLen 1 }}
|
||||
{{- $bootstrapNamespace = print $bootstrapArgs._0 "-bootstrap-system" }}
|
||||
{{- $bootstrapName = $bootstrapArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "bootstrap provider argument should have the following format kubeadm:v1.0.0 or mynamespace:kubeadm:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $bootstrap := $.Values.bootstrap }}
|
||||
{{- $bootstrapNamespace := default ( printf "%s-%s" $name "bootstrap-system" ) (get $bootstrap "namespace") }}
|
||||
{{- $bootstrapName := $name }}
|
||||
{{- $bootstrapVersion := get $bootstrap "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $bootstrapNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -36,8 +21,11 @@ metadata:
|
||||
name: {{ $bootstrapName }}
|
||||
namespace: {{ $bootstrapNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $bootstrapVersion $.Values.configSecret.name }}
|
||||
spec:
|
||||
{{- end}}
|
||||
@@ -51,5 +39,24 @@ spec:
|
||||
namespace: {{ $.Values.configSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $bootstrap.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $bootstrap.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $bootstrap.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $bootstrap.additionalManifests.name }}
|
||||
{{- if $bootstrap.additionalManifests.namespace }}
|
||||
namespace: {{ $bootstrap.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $bootstrap.additionalManifests.namespace */}}
|
||||
{{- end }}
|
||||
{{- if $bootstrap.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $bootstrap.additionalManifests.name }}
|
||||
namespace: {{ default $bootstrapNamespace $bootstrap.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $bootstrap.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $bootstrap := .Values.bootstrap */}}
|
||||
|
||||
@@ -1,33 +1,18 @@
|
||||
# Control plane provider
|
||||
{{- if .Values.controlPlane }}
|
||||
{{- $controlPlanes := split ";" .Values.controlPlane }}
|
||||
{{- $controlPlaneNamespace := "" }}
|
||||
{{- $controlPlaneName := "" }}
|
||||
{{- $controlPlaneVersion := "" }}
|
||||
{{- range $controlPlane := $controlPlanes }}
|
||||
{{- $controlPlaneArgs := split ":" $controlPlane }}
|
||||
{{- $controlPlaneArgsLen := len $controlPlaneArgs }}
|
||||
{{- if eq $controlPlaneArgsLen 3 }}
|
||||
{{- $controlPlaneNamespace = $controlPlaneArgs._0 }}
|
||||
{{- $controlPlaneName = $controlPlaneArgs._1 }}
|
||||
{{- $controlPlaneVersion = $controlPlaneArgs._2 }}
|
||||
{{- else if eq $controlPlaneArgsLen 2 }}
|
||||
{{- $controlPlaneNamespace = print $controlPlaneArgs._0 "-control-plane-system" }}
|
||||
{{- $controlPlaneName = $controlPlaneArgs._0 }}
|
||||
{{- $controlPlaneVersion = $controlPlaneArgs._1 }}
|
||||
{{- else if eq $controlPlaneArgsLen 1 }}
|
||||
{{- $controlPlaneNamespace = print $controlPlaneArgs._0 "-control-plane-system" }}
|
||||
{{- $controlPlaneName = $controlPlaneArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "controlplane provider argument should have the following format kubeadm:v1.0.0 or mynamespace:kubeadm:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $controlPlane := $.Values.controlPlane }}
|
||||
{{- $controlPlaneNamespace := default ( printf "%s-%s" $name "control-plane-system" ) (get $controlPlane "namespace") }}
|
||||
{{- $controlPlaneName := $name }}
|
||||
{{- $controlPlaneVersion := get $controlPlane "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $controlPlaneNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -36,8 +21,11 @@ metadata:
|
||||
name: {{ $controlPlaneName }}
|
||||
namespace: {{ $controlPlaneNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $controlPlaneVersion $.Values.configSecret.name $.Values.manager }}
|
||||
spec:
|
||||
{{- end}}
|
||||
@@ -64,5 +52,24 @@ spec:
|
||||
namespace: {{ $.Values.configSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $controlPlane.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $controlPlane.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $controlPlane.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $controlPlane.additionalManifests.name }}
|
||||
{{- if $controlPlane.additionalManifests.namespace }}
|
||||
namespace: {{ $controlPlane.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $controlPlane.additionalManifests.namespace */}}
|
||||
{{- end }}
|
||||
{{- if $controlPlane.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $controlPlane.additionalManifests.name }}
|
||||
namespace: {{ default $controlPlaneNamespace $controlPlane.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $controlPlane.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $controlPlane := .Values.controlPlane */}}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if or .Values.addon .Values.bootstrap .Values.controlPlane .Values.infrastructure }}
|
||||
{{- if or .Values.addon .Values.bootstrap .Values.controlPlane .Values.infrastructure .Values.ipam }}
|
||||
# Deploy core components if not specified
|
||||
{{- if not .Values.core }}
|
||||
---
|
||||
@@ -6,8 +6,11 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-system
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -16,8 +19,11 @@ metadata:
|
||||
name: cluster-api
|
||||
namespace: capi-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
configSecret:
|
||||
@@ -28,4 +34,3 @@ spec:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -1,32 +1,18 @@
|
||||
# Core provider
|
||||
{{- if .Values.core }}
|
||||
{{- $coreArgs := split ":" .Values.core }}
|
||||
{{- $coreArgsLen := len $coreArgs }}
|
||||
{{- $coreVersion := "" }}
|
||||
{{- $coreNamespace := "" }}
|
||||
{{- $coreName := "" }}
|
||||
{{- $coreVersion := "" }}
|
||||
{{- if eq $coreArgsLen 3 }}
|
||||
{{- $coreNamespace = $coreArgs._0 }}
|
||||
{{- $coreName = $coreArgs._1 }}
|
||||
{{- $coreVersion = $coreArgs._2 }}
|
||||
{{- else if eq $coreArgsLen 2 }}
|
||||
{{- $coreNamespace = "capi-system" }}
|
||||
{{- $coreName = $coreArgs._0 }}
|
||||
{{- $coreVersion = $coreArgs._1 }}
|
||||
{{- else if eq $coreArgsLen 1 }}
|
||||
{{- $coreNamespace = "capi-system" }}
|
||||
{{- $coreName = $coreArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "core provider argument should have the following format cluster-api:v1.0.0 or mynamespace:cluster-api:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $core := $.Values.core }}
|
||||
{{- $coreNamespace := default "capi-system" (get $core "namespace") }}
|
||||
{{- $coreName := $name }}
|
||||
{{- $coreVersion := get $core "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $coreNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -35,8 +21,10 @@ metadata:
|
||||
name: {{ $coreName }}
|
||||
namespace: {{ $coreNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $coreVersion $.Values.configSecret.name $.Values.manager }}
|
||||
spec:
|
||||
@@ -45,8 +33,8 @@ spec:
|
||||
version: {{ $coreVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
manager:
|
||||
{{- if and $.Values.manager.featureGates $.Values.manager.featureGates.core }}
|
||||
manager:
|
||||
featureGates:
|
||||
{{- range $key, $value := $.Values.manager.featureGates.core }}
|
||||
{{ $key }}: {{ $value }}
|
||||
@@ -60,4 +48,24 @@ spec:
|
||||
namespace: {{ $.Values.configSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $core.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $core.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $core.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $core.additionalManifests.name }}
|
||||
{{- if $core.additionalManifests.namespace }}
|
||||
namespace: {{ $core.additionalManifests.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $core.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $core.additionalManifests.name }}
|
||||
namespace: {{ default $coreNamespace $core.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $core.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $core := .Values.core */}}
|
||||
|
||||
@@ -7,8 +7,10 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-kubeadm-bootstrap-system
|
||||
---
|
||||
@@ -18,8 +20,10 @@ metadata:
|
||||
name: kubeadm
|
||||
namespace: capi-kubeadm-bootstrap-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
@@ -37,8 +41,10 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-kubeadm-control-plane-system
|
||||
---
|
||||
@@ -48,14 +54,16 @@ metadata:
|
||||
name: kubeadm
|
||||
namespace: capi-kubeadm-control-plane-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
{{- if $.Values.manager }}
|
||||
manager:
|
||||
{{- if and $.Values.manager.featureGates $.Values.manager.featureGates.kubeadm }}
|
||||
manager:
|
||||
featureGates:
|
||||
{{- range $key, $value := $.Values.manager.featureGates.kubeadm }}
|
||||
{{ $key }}: {{ $value }}
|
||||
|
||||
@@ -1,33 +1,17 @@
|
||||
# Infrastructure providers
|
||||
{{- if .Values.infrastructure }}
|
||||
{{- $infrastructures := split ";" .Values.infrastructure }}
|
||||
{{- $infrastructureNamespace := "" }}
|
||||
{{- $infrastructureName := "" }}
|
||||
{{- $infrastructureVersion := "" }}
|
||||
{{- range $infrastructure := $infrastructures }}
|
||||
{{- $infrastructureArgs := split ":" $infrastructure }}
|
||||
{{- $infrastructureArgsLen := len $infrastructureArgs }}
|
||||
{{- if eq $infrastructureArgsLen 3 }}
|
||||
{{- $infrastructureNamespace = $infrastructureArgs._0 }}
|
||||
{{- $infrastructureName = $infrastructureArgs._1 }}
|
||||
{{- $infrastructureVersion = $infrastructureArgs._2 }}
|
||||
{{- else if eq $infrastructureArgsLen 2 }}
|
||||
{{- $infrastructureNamespace = print $infrastructureArgs._0 "-infrastructure-system" }}
|
||||
{{- $infrastructureName = $infrastructureArgs._0 }}
|
||||
{{- $infrastructureVersion = $infrastructureArgs._1 }}
|
||||
{{- else if eq $infrastructureArgsLen 1 }}
|
||||
{{- $infrastructureNamespace = print $infrastructureArgs._0 "-infrastructure-system" }}
|
||||
{{- $infrastructureName = $infrastructureArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "infrastructure provider argument should have the following format aws:v1.0.0 or mynamespace:aws:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $infra := $.Values.infrastructure }}
|
||||
{{- $infrastructureNamespace := default ( printf "%s-%s" $name "infrastructure-system" ) (get $infra "namespace") }}
|
||||
{{- $infrastructureName := $name }}
|
||||
{{- $infrastructureVersion := get $infra "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $infrastructureNamespace }}
|
||||
---
|
||||
@@ -37,8 +21,10 @@ metadata:
|
||||
name: {{ $infrastructureName }}
|
||||
namespace: {{ $infrastructureNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $infrastructureVersion $.Values.configSecret.name $.Values.manager $.Values.additionalDeployments }}
|
||||
spec:
|
||||
@@ -47,8 +33,8 @@ spec:
|
||||
version: {{ $infrastructureVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
manager:
|
||||
{{- if and (kindIs "map" $.Values.manager.featureGates) (hasKey $.Values.manager.featureGates $infrastructureName) }}
|
||||
manager:
|
||||
{{- range $key, $value := $.Values.manager.featureGates }}
|
||||
{{- if eq $key $infrastructureName }}
|
||||
featureGates:
|
||||
@@ -79,5 +65,24 @@ spec:
|
||||
{{- if $.Values.additionalDeployments }}
|
||||
additionalDeployments: {{ toYaml $.Values.additionalDeployments | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $infra.manifestPatches }}
|
||||
manifestPatches: {{- toYaml $infra.manifestPatches | nindent 4 }}
|
||||
{{- end }} {{/* if $infra.manifestPatches */}}
|
||||
{{- if $infra.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $infra.additionalManifests.name }}
|
||||
{{- if $infra.additionalManifests.namespace }}
|
||||
namespace: {{ $infra.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $infra.additionalManifests.namespace */}}
|
||||
{{- end }} {{/* if $infra.additionalManifests */}}
|
||||
{{- if $infra.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $infra.additionalManifests.name }}
|
||||
namespace: {{ default $infrastructureNamespace $infra.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $infra.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $infra := .Values.infrastructure */}}
|
||||
|
||||
@@ -1,33 +1,17 @@
|
||||
# IPAM providers
|
||||
{{- if .Values.ipam }}
|
||||
{{- $ipams := split ";" .Values.ipam }}
|
||||
{{- $ipamNamespace := "" }}
|
||||
{{- $ipamName := "" }}
|
||||
{{- $ipamVersion := "" }}
|
||||
{{- range $ipam := $ipams }}
|
||||
{{- $ipamArgs := split ":" $ipam }}
|
||||
{{- $ipamArgsLen := len $ipamArgs }}
|
||||
{{- if eq $ipamArgsLen 3 }}
|
||||
{{- $ipamNamespace = $ipamArgs._0 }}
|
||||
{{- $ipamName = $ipamArgs._1 }}
|
||||
{{- $ipamVersion = $ipamArgs._2 }}
|
||||
{{- else if eq $ipamArgsLen 2 }}
|
||||
{{- $ipamNamespace = print $ipamArgs._0 "-ipam-system" }}
|
||||
{{- $ipamName = $ipamArgs._0 }}
|
||||
{{- $ipamVersion = $ipamArgs._1 }}
|
||||
{{- else if eq $ipamArgsLen 1 }}
|
||||
{{- $ipamNamespace = print $ipamArgs._0 "-ipam-system" }}
|
||||
{{- $ipamName = $ipamArgs._0 }}
|
||||
{{- else }}
|
||||
{{- fail "ipam provider argument should have the following format in-cluster:v1.0.0 or mynamespace:in-cluster:v1.0.0" }}
|
||||
{{- end }}
|
||||
{{- range $name, $ipam := $.Values.ipam }}
|
||||
{{- $ipamNamespace := default ( printf "%s-%s" $name "ipam-system" ) (get $ipam "namespace") }}
|
||||
{{- $ipamName := $name }}
|
||||
{{- $ipamVersion := get $ipam "version" }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $ipamNamespace }}
|
||||
---
|
||||
@@ -37,8 +21,10 @@ metadata:
|
||||
name: {{ $ipamName }}
|
||||
namespace: {{ $ipamNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $ipamVersion $.Values.configSecret.name $.Values.manager $.Values.additionalDeployments }}
|
||||
spec:
|
||||
@@ -47,8 +33,8 @@ spec:
|
||||
version: {{ $ipamVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
manager:
|
||||
{{- if and (kindIs "map" $.Values.manager.featureGates) (hasKey $.Values.manager.featureGates $ipamName) }}
|
||||
manager:
|
||||
{{- range $key, $value := $.Values.manager.featureGates }}
|
||||
{{- if eq $key $ipamName }}
|
||||
featureGates:
|
||||
@@ -66,8 +52,27 @@ spec:
|
||||
namespace: {{ $.Values.configSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $ipam.manifestPatches }}
|
||||
manifestPatches: {{ toYaml $ipam.manifestPatches | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $.Values.additionalDeployments }}
|
||||
additionalDeployments: {{ toYaml $.Values.additionalDeployments | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if $ipam.additionalManifests }}
|
||||
additionalManifests:
|
||||
name: {{ $ipam.additionalManifests.name }}
|
||||
{{- if $ipam.additionalManifests.namespace }}
|
||||
namespace: {{ $ipam.additionalManifests.namespace }}
|
||||
{{- end }} {{/* if $ipam.additionalManifests.namespace */}}
|
||||
{{- end }}
|
||||
{{- if $ipam.additionalManifests }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ $ipam.additionalManifests.name }}
|
||||
namespace: {{ default $ipamNamespace $ipam.additionalManifests.namespace }}
|
||||
data:
|
||||
manifests: {{- toYaml $ipam.additionalManifests.manifests | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }} {{/* range $name, $ipam := .Values.ipam */}}
|
||||
|
||||
@@ -1305,6 +1305,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -2836,6 +2843,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -3048,27 +3062,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -3078,6 +3097,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -4711,27 +4732,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -4741,6 +4767,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -6043,6 +6071,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -7574,6 +7609,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -7786,27 +7828,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -7816,6 +7863,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -9450,27 +9499,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -9480,6 +9534,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -10783,6 +10839,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -12314,6 +12377,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -12527,27 +12597,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -12557,6 +12632,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -14190,27 +14267,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -14220,6 +14302,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -15522,6 +15606,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -17053,6 +17144,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -17265,27 +17363,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -17295,6 +17398,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -18929,27 +19034,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -18959,6 +19069,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -20262,6 +20374,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -21793,6 +21912,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -22006,27 +22132,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -22036,6 +22167,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -23371,6 +23504,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -24902,6 +25042,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -25114,27 +25261,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -25144,6 +25296,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
@@ -26481,6 +26635,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled
|
||||
on the controller manager for the additional provider deployment.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -28012,6 +28173,13 @@ spec:
|
||||
description: Manager defines the properties that can be enabled on
|
||||
the controller manager for the provider.
|
||||
properties:
|
||||
additionalArgs:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
AdditionalArgs is a map of additional options that will be passed
|
||||
in as container args to the provider's controller manager.
|
||||
type: object
|
||||
cacheNamespace:
|
||||
description: |-
|
||||
CacheNamespace if specified restricts the manager's cache to watch objects in
|
||||
@@ -28225,27 +28393,32 @@ spec:
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
Last time the condition transitioned from one status to another.
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when
|
||||
the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
A human readable message indicating details about the transition.
|
||||
message is a human readable message indicating details about the transition.
|
||||
This field may be empty.
|
||||
maxLength: 10240
|
||||
minLength: 1
|
||||
type: string
|
||||
reason:
|
||||
description: |-
|
||||
The reason for the condition's last transition in CamelCase.
|
||||
reason is the reason for the condition's last transition in CamelCase.
|
||||
The specific API may choose whether or not this field is considered a guaranteed API.
|
||||
This field may be empty.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
severity:
|
||||
description: |-
|
||||
severity provides an explicit classification of Reason code, so the users or machines can immediately
|
||||
understand the current situation and act accordingly.
|
||||
The Severity field MUST be set only when Status=False.
|
||||
maxLength: 32
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
@@ -28255,6 +28428,8 @@ spec:
|
||||
type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
Many .condition.type values are consistent across resources like Available, but because arbitrary conditions
|
||||
can be useful (see .node.status.conditions), the ability to deconflict is important.
|
||||
maxLength: 256
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
|
||||
@@ -0,0 +1,47 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"core": {
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"bootstrap": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"controlPlane": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"infrastructure": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"addon": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
},
|
||||
"ipam": {
|
||||
"type": "object",
|
||||
"oneOf": [
|
||||
{ "type": "object" },
|
||||
{ "type": "null" }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,30 @@
|
||||
---
|
||||
# ---
|
||||
# Cluster API provider options
|
||||
core: ""
|
||||
bootstrap: ""
|
||||
controlPlane: ""
|
||||
infrastructure: ""
|
||||
ipam: ""
|
||||
addon: ""
|
||||
core: {}
|
||||
# cluster-api: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
bootstrap: {}
|
||||
# kubeadm: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
controlPlane: {}
|
||||
# kubeadm: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
infrastructure: {}
|
||||
# docker: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
addon: {}
|
||||
# helm: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
ipam: {}
|
||||
# in-cluster: {} # Name, required
|
||||
# namespace: "" # Optional
|
||||
# version: "" # Optional
|
||||
manager.featureGates: {}
|
||||
fetchConfig: {}
|
||||
# ---
|
||||
@@ -21,7 +39,7 @@ leaderElection:
|
||||
image:
|
||||
manager:
|
||||
repository: registry.k8s.io/capi-operator/cluster-api-operator
|
||||
tag: v0.17.0
|
||||
tag: v0.19.0
|
||||
pullPolicy: IfNotPresent
|
||||
env:
|
||||
manager: []
|
||||
@@ -69,3 +87,4 @@ volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
enableHelmHook: true
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: cluster-api
|
||||
spec:
|
||||
# https://github.com/kubernetes-sigs/cluster-api
|
||||
version: v1.9.5
|
||||
version: v1.10.0
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
kind: ControlPlaneProvider
|
||||
@@ -13,7 +13,7 @@ metadata:
|
||||
name: kamaji
|
||||
spec:
|
||||
# https://github.com/clastix/cluster-api-control-plane-provider-kamaji
|
||||
version: v0.14.1
|
||||
version: v0.14.2
|
||||
deployment:
|
||||
containers:
|
||||
- name: manager
|
||||
@@ -21,6 +21,9 @@ spec:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 1024Mi
|
||||
requests:
|
||||
cpu: "10m"
|
||||
memory: 128Mi
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
kind: BootstrapProvider
|
||||
@@ -28,7 +31,7 @@ metadata:
|
||||
name: kubeadm
|
||||
spec:
|
||||
# https://github.com/kubernetes-sigs/cluster-api
|
||||
version: v1.9.5
|
||||
version: v1.10.0
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
kind: InfrastructureProvider
|
||||
@@ -36,4 +39,4 @@ metadata:
|
||||
name: kubevirt
|
||||
spec:
|
||||
# https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt
|
||||
version: v0.1.9
|
||||
version: v0.1.10
|
||||
|
||||
@@ -79,7 +79,7 @@ annotations:
|
||||
Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that can
|
||||
be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n"
|
||||
apiVersion: v2
|
||||
appVersion: 1.17.1
|
||||
appVersion: 1.17.3
|
||||
description: eBPF-based Networking, Security, and Observability
|
||||
home: https://cilium.io/
|
||||
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
|
||||
@@ -95,4 +95,4 @@ kubeVersion: '>= 1.21.0-0'
|
||||
name: cilium
|
||||
sources:
|
||||
- https://github.com/cilium/cilium
|
||||
version: 1.17.1
|
||||
version: 1.17.3
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# cilium
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
Cilium is open source software for providing and transparently securing
|
||||
network connectivity and loadbalancing between application workloads such as
|
||||
@@ -85,7 +85,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
|
||||
| authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:a5d0ce49aa801d475da48f8cb163c354ab95cab073cd3c138bd458fc8257fbf1","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:37f7b378a29ceb4c551b1b5582e27747b855bbfaa73fa11914fe0df028dc581f","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
|
||||
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
|
||||
| authentication.mutual.spire.install.server.annotations | object | `{}` | SPIRE server annotations |
|
||||
@@ -131,6 +131,8 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| bpf.ctTcpMax | int | `524288` | Configure the maximum number of entries in the TCP connection tracking table. |
|
||||
| bpf.datapathMode | string | `veth` | Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) |
|
||||
| bpf.disableExternalIPMitigation | bool | `false` | Disable ExternalIP mitigation (CVE-2020-8554) |
|
||||
| bpf.distributedLRU | object | `{"enabled":false}` | Control to use a distributed per-CPU backend memory for the core BPF LRU maps which Cilium uses. This improves performance significantly, but it is also recommended to increase BPF map sizing along with that. |
|
||||
| bpf.distributedLRU.enabled | bool | `false` | Enable distributed LRU backend memory. For compatibility with existing installations it is off by default. |
|
||||
| bpf.enableTCX | bool | `true` | Attach endpoint programs using tcx instead of legacy tc hooks on supported kernels. |
|
||||
| bpf.events | object | `{"default":{"burstLimit":null,"rateLimit":null},"drop":{"enabled":true},"policyVerdict":{"enabled":true},"trace":{"enabled":true}}` | Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. Helm configuration for BPF events map rate limiting is experimental and might change in upcoming releases. |
|
||||
| bpf.events.default | object | `{"burstLimit":null,"rateLimit":null}` | Default settings for all types of events except dbg and pcap. |
|
||||
@@ -195,7 +197,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
|
||||
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
|
||||
| clustermesh.apiserver.healthPort | int | `9880` | TCP port for the clustermesh-apiserver health API. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:1de22f46bfdd638de72c2224d5223ddc3bbeacda1803cb75799beca3d4bf7a4c","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.1","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:98d5feaf67dd9b5d8d219ff5990de10539566eedc5412bcf52df75920896ad42","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.3","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.kvstoremesh.enabled | bool | `true` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
|
||||
@@ -375,7 +377,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.healthPort | int | `9878` | TCP port for the health API. |
|
||||
| envoy.httpRetryCount | int | `3` | Maximum number of retries for each HTTP request |
|
||||
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
|
||||
| envoy.image | object | `{"digest":"sha256:fc708bd36973d306412b2e50c924cd8333de67e0167802c9b48506f9d772f521","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.31.5-1739264036-958bef243c6c66fcfd73ca319f2eb49fff1eb2ae","useDigest":true}` | Envoy container image. |
|
||||
| envoy.image | object | `{"digest":"sha256:a01cadf7974409b5c5c92ace3d6afa298408468ca24cab1cb413c04f89d3d1f9","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.32.5-1744305768-f9ddca7dcd91f7ca25a505560e655c47d3dec2cf","useDigest":true}` | Envoy container image. |
|
||||
| envoy.initialFetchTimeoutSeconds | int | `30` | Time in seconds after which the initial fetch on an xDS stream is considered timed out |
|
||||
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
|
||||
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
|
||||
@@ -392,6 +394,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.podLabels | object | `{}` | Labels to be added to envoy pods |
|
||||
| envoy.podSecurityContext | object | `{"appArmorProfile":{"type":"Unconfined"}}` | Security Context for cilium-envoy pods. |
|
||||
| envoy.podSecurityContext.appArmorProfile | object | `{"type":"Unconfined"}` | AppArmorProfile options for the `cilium-agent` and init containers |
|
||||
| envoy.policyRestoreTimeoutDuration | string | `nil` | Max duration to wait for endpoint policies to be restored on restart. Default "3m". |
|
||||
| envoy.priorityClassName | string | `nil` | The priority class to use for cilium-envoy. |
|
||||
| envoy.prometheus | object | `{"enabled":true,"port":"9964","serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Configure Cilium Envoy Prometheus options. Note that some of these apply to either cilium-agent or cilium-envoy. |
|
||||
| envoy.prometheus.enabled | bool | `true` | Enable prometheus metrics for cilium-envoy |
|
||||
@@ -515,7 +518,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
|
||||
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
|
||||
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:397e8fbb188157f744390a7b272a1dec31234e605bcbe22d8919a166d202a3dc","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.1","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:f8674b5139111ac828a8818da7f2d344b4a5bfbaeb122c5dc9abed3e74000c55","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.3","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
|
||||
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
|
||||
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
@@ -582,7 +585,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. |
|
||||
| hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. |
|
||||
| hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. |
|
||||
| hubble.ui.backend.image | object | `{"digest":"sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.1","useDigest":true}` | Hubble-ui backend image. |
|
||||
| hubble.ui.backend.image | object | `{"digest":"sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.2","useDigest":true}` | Hubble-ui backend image. |
|
||||
| hubble.ui.backend.livenessProbe.enabled | bool | `false` | Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) |
|
||||
| hubble.ui.backend.readinessProbe.enabled | bool | `false` | Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) |
|
||||
| hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. |
|
||||
@@ -592,7 +595,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. |
|
||||
| hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. |
|
||||
| hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. |
|
||||
| hubble.ui.frontend.image | object | `{"digest":"sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.1","useDigest":true}` | Hubble-ui frontend image. |
|
||||
| hubble.ui.frontend.image | object | `{"digest":"sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.2","useDigest":true}` | Hubble-ui frontend image. |
|
||||
| hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. |
|
||||
| hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. |
|
||||
| hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 |
|
||||
@@ -622,7 +625,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
|
||||
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends). |
|
||||
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
|
||||
| image | object | `{"digest":"sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.1","useDigest":true}` | Agent container image. |
|
||||
| image | object | `{"digest":"sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.3","useDigest":true}` | Agent container image. |
|
||||
| imagePullSecrets | list | `[]` | Configure image pull secrets for pulling container images |
|
||||
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
|
||||
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
|
||||
@@ -759,7 +762,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| operator.hostNetwork | bool | `true` | HostNetwork setting |
|
||||
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
|
||||
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:034b479fba340f9d98510e509c7ce1c36e8889a109d5f1c2240fcb0942bc772c","awsDigest":"sha256:da74748057c836471bfdc0e65bb29ba0edb82916ec4b99f6a4f002b2fcc849d6","azureDigest":"sha256:b9e3e3994f5fcf1832e1f344f3b3b544832851b1990f124b2c2c68e3ffe04a9b","genericDigest":"sha256:628becaeb3e4742a1c36c4897721092375891b58bae2bfcae48bbf4420aaee97","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.1","useDigest":true}` | cilium-operator image. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:e9a9ab227c6e833985bde6537b4d1540b0907f21a84319de4b7d62c5302eed5c","awsDigest":"sha256:40f235111fb2bca209ee65b12f81742596e881a0a3ee4d159776d78e3091ba7f","azureDigest":"sha256:6a3294ec8a2107048254179c3ac5121866f90d20fccf12f1d70960e61f304713","genericDigest":"sha256:8bd38d0e97a955b2d725929d60df09d712fb62b60b930551a29abac2dd92e597","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.3","useDigest":true}` | cilium-operator image. |
|
||||
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
|
||||
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
|
||||
@@ -809,7 +812,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
|
||||
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
|
||||
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
|
||||
| preflight.image | object | `{"digest":"sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.1","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.image | object | `{"digest":"sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.3","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
|
||||
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
|
||||
@@ -883,7 +886,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| tls.caBundle.useSecret | bool | `false` | Use a Secret instead of a ConfigMap. |
|
||||
| tls.readSecretsOnlyFromSecretsNamespace | string | `nil` | Configure if the Cilium Agent will only look in `tls.secretsNamespace` for CiliumNetworkPolicy relevant Secrets. If false, the Cilium Agent will be granted READ (GET/LIST/WATCH) access to _all_ secrets in the entire cluster. This is not recommended and is included for backwards compatibility. This value obsoletes `tls.secretsBackend`, with `true` == `local` in the old setting, and `false` == `k8s`. |
|
||||
| tls.secretSync | object | `{"enabled":null}` | Configures settings for synchronization of TLS Interception Secrets |
|
||||
| tls.secretSync.enabled | string | `nil` | Enable synchronization of Secrets for TLS Interception. If disabled and tls.secretsBackend is set to 'k8s', then secrets will be read directly by the agent. |
|
||||
| tls.secretSync.enabled | string | `nil` | Enable synchronization of Secrets for TLS Interception. If disabled and tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent. |
|
||||
| tls.secretsBackend | string | `nil` | This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies (namely the secrets referenced by terminatingTLS and originatingTLS). This value is DEPRECATED and will be removed in a future version. Use `tls.readSecretsOnlyFromSecretsNamespace` instead. Possible values: - local - k8s |
|
||||
| tls.secretsNamespace | object | `{"create":true,"name":"cilium-secrets"}` | Configures where secrets used in CiliumNetworkPolicies will be looked for |
|
||||
| tls.secretsNamespace.create | bool | `true` | Create secrets namespace for TLS Interception secrets. |
|
||||
@@ -891,6 +894,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for agent scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| tunnelPort | int | Port 8472 for VXLAN, Port 6081 for Geneve | Configure VXLAN and Geneve tunnel port. |
|
||||
| tunnelProtocol | string | `"vxlan"` | Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. Possible values: - "" - vxlan - geneve |
|
||||
| tunnelSourcePortRange | string | 0-0 to let the kernel driver decide the range | Configure VXLAN and Geneve tunnel source port range hint. |
|
||||
| updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":2},"type":"RollingUpdate"}` | Cilium agent update strategy |
|
||||
| upgradeCompatibility | string | `nil` | upgradeCompatibility helps users upgrading to ensure that the configMap for Cilium will not change critical values to ensure continued operation This flag is not required for new installations. For example: '1.7', '1.8', '1.9' |
|
||||
| vtep.cidr | string | `""` | A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" |
|
||||
|
||||
@@ -7,8 +7,15 @@ staticResources:
|
||||
- name: "envoy-prometheus-metrics-listener"
|
||||
address:
|
||||
socketAddress:
|
||||
address: "0.0.0.0"
|
||||
address: {{ .Values.ipv4.enabled | ternary "0.0.0.0" "::" | quote }}
|
||||
portValue: {{ .Values.envoy.prometheus.port }}
|
||||
{{- if and .Values.ipv4.enabled .Values.ipv6.enabled }}
|
||||
additionalAddresses:
|
||||
- address:
|
||||
socketAddress:
|
||||
address: "::"
|
||||
portValue: {{ .Values.envoy.prometheus.port }}
|
||||
{{- end }}
|
||||
filterChains:
|
||||
- filters:
|
||||
- name: "envoy.filters.network.http_connection_manager"
|
||||
@@ -289,7 +296,7 @@ overloadManager:
|
||||
applicationLogConfig:
|
||||
logFormat:
|
||||
{{- if .Values.envoy.log.format_json }}
|
||||
jsonFormat: "{{ .Values.envoy.log.format_json | toJson }}"
|
||||
jsonFormat: {{ .Values.envoy.log.format_json | toJson }}
|
||||
{{- else }}
|
||||
textFormat: "{{ .Values.envoy.log.format }}"
|
||||
{{- end }}
|
||||
|
||||
@@ -232,7 +232,7 @@ spec:
|
||||
resources:
|
||||
{{- toYaml . | trim | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.prometheus.enabled .Values.hubble.metrics.enabled }}
|
||||
{{- if or .Values.prometheus.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) }}
|
||||
ports:
|
||||
- name: peer-service
|
||||
containerPort: {{ .Values.hubble.peerService.targetPort }}
|
||||
@@ -364,7 +364,7 @@ spec:
|
||||
mountPath: {{ .Values.kubeConfigPath }}
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }}
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.tls.enabled }}
|
||||
- name: hubble-metrics-tls
|
||||
mountPath: /var/lib/cilium/tls/hubble-metrics
|
||||
readOnly: true
|
||||
@@ -999,7 +999,7 @@ spec:
|
||||
path: client-ca.crt
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }}
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.tls.enabled }}
|
||||
- name: hubble-metrics-tls
|
||||
projected:
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
|
||||
@@ -39,6 +39,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -62,6 +65,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -85,6 +91,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -104,6 +113,9 @@ metadata:
|
||||
namespace: {{ .Values.bgpControlPlane.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -123,6 +135,9 @@ metadata:
|
||||
namespace: {{ .Values.tls.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -46,6 +46,9 @@ metadata:
|
||||
k8s-app: cilium
|
||||
app.kubernetes.io/name: cilium-agent
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
|
||||
@@ -403,7 +403,7 @@ data:
|
||||
|
||||
{{- if .Values.bpf.authMapMax }}
|
||||
# bpf-auth-map-max specifies the maximum number of entries in the auth map
|
||||
bpf-auth-map-max: {{ .Values.bpf.authMapMax | quote }}
|
||||
bpf-auth-map-max: "{{ .Values.bpf.authMapMax | int }}"
|
||||
{{- end }}
|
||||
{{- if or $bpfCtTcpMax $bpfCtAnyMax }}
|
||||
# bpf-ct-global-*-max specifies the maximum number of connections
|
||||
@@ -419,34 +419,34 @@ data:
|
||||
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
|
||||
# during the upgrade process, set bpf-ct-global-tcp-max to 1000000.
|
||||
{{- if $bpfCtTcpMax }}
|
||||
bpf-ct-global-tcp-max: {{ $bpfCtTcpMax | quote }}
|
||||
bpf-ct-global-tcp-max: "{{ $bpfCtTcpMax | int }}"
|
||||
{{- end }}
|
||||
{{- if $bpfCtAnyMax }}
|
||||
bpf-ct-global-any-max: {{ $bpfCtAnyMax | quote }}
|
||||
bpf-ct-global-any-max: "{{ $bpfCtAnyMax | int }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.ctAccounting }}
|
||||
bpf-conntrack-accounting: "{{ .Values.bpf.ctAccounting }}"
|
||||
bpf-conntrack-accounting: "{{ .Values.bpf.ctAccounting | int }}"
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.natMax }}
|
||||
# bpf-nat-global-max specified the maximum number of entries in the
|
||||
# BPF NAT table.
|
||||
bpf-nat-global-max: "{{ .Values.bpf.natMax }}"
|
||||
bpf-nat-global-max: "{{ .Values.bpf.natMax | int }}"
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.neighMax }}
|
||||
# bpf-neigh-global-max specified the maximum number of entries in the
|
||||
# BPF neighbor table.
|
||||
bpf-neigh-global-max: "{{ .Values.bpf.neighMax }}"
|
||||
bpf-neigh-global-max: "{{ .Values.bpf.neighMax | int }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "policyMapMax" }}
|
||||
# bpf-policy-map-max specifies the maximum number of entries in endpoint
|
||||
# policy map (per endpoint)
|
||||
bpf-policy-map-max: "{{ .Values.bpf.policyMapMax }}"
|
||||
bpf-policy-map-max: "{{ .Values.bpf.policyMapMax | int }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "lbMapMax" }}
|
||||
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
|
||||
# backend and affinity maps.
|
||||
bpf-lb-map-max: "{{ .Values.bpf.lbMapMax }}"
|
||||
bpf-lb-map-max: "{{ .Values.bpf.lbMapMax | int }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "lbExternalClusterIP" }}
|
||||
bpf-lb-external-clusterip: {{ .Values.bpf.lbExternalClusterIP | quote }}
|
||||
@@ -461,6 +461,7 @@ data:
|
||||
bpf-lb-mode-annotation: {{ .Values.bpf.lbModeAnnotation | quote }}
|
||||
{{- end }}
|
||||
|
||||
bpf-distributed-lru: {{ .Values.bpf.distributedLRU.enabled | quote }}
|
||||
bpf-events-drop-enabled: {{ .Values.bpf.events.drop.enabled | quote }}
|
||||
bpf-events-policy-verdict-enabled: {{ .Values.bpf.events.policyVerdict.enabled | quote }}
|
||||
bpf-events-trace-enabled: {{ .Values.bpf.events.trace.enabled | quote }}
|
||||
@@ -513,6 +514,9 @@ data:
|
||||
{{- if .Values.tunnelPort }}
|
||||
tunnel-port: {{ .Values.tunnelPort | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tunnelSourcePortRange }}
|
||||
tunnel-source-port-range: {{ .Values.tunnelSourcePortRange | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.serviceNoBackendResponse }}
|
||||
service-no-backend-response: "{{ .Values.serviceNoBackendResponse }}"
|
||||
@@ -927,9 +931,8 @@ data:
|
||||
operator-api-serve-addr: {{ $defaultOperatorApiServeAddr | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.hubble.enabled }}
|
||||
# Enable Hubble gRPC service.
|
||||
enable-hubble: {{ .Values.hubble.enabled | quote }}
|
||||
{{- if .Values.hubble.enabled }}
|
||||
# UNIX domain socket for Hubble server to listen to.
|
||||
hubble-socket-path: {{ .Values.hubble.socketPath | quote }}
|
||||
{{- if hasKey .Values.hubble "eventQueueSize" }}
|
||||
@@ -941,7 +944,7 @@ data:
|
||||
# Capacity of the buffer to store recent events.
|
||||
hubble-event-buffer-capacity: {{ .Values.hubble.eventBufferCapacity | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.hubble.metrics.enabled }}
|
||||
{{- if or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled}}
|
||||
# Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this
|
||||
# field is not set.
|
||||
hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}"
|
||||
@@ -953,14 +956,20 @@ data:
|
||||
hubble-metrics-server-tls-client-ca-files: /var/lib/cilium/tls/hubble-metrics/client-ca.crt
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.hubble.metrics.enabled }}
|
||||
# A space separated list of metrics to enable. See [0] for available metrics.
|
||||
#
|
||||
# https://github.com/cilium/hubble/blob/master/Documentation/metrics.md
|
||||
hubble-metrics: {{- range .Values.hubble.metrics.enabled }}
|
||||
{{.}}
|
||||
{{- end}}
|
||||
{{- if .Values.hubble.metrics.dynamic.enabled }}
|
||||
hubble-dynamic-metrics-config-path: /dynamic-metrics-config/dynamic-metrics.yaml
|
||||
{{- end }}
|
||||
enable-hubble-open-metrics: {{ .Values.hubble.metrics.enableOpenMetrics | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.hubble.redact }}
|
||||
{{- if eq .Values.hubble.redact.enabled true }}
|
||||
# Enables hubble redact capabilities
|
||||
@@ -1004,10 +1013,6 @@ data:
|
||||
hubble-flowlogs-config-path: /flowlog-config/flowlogs.yaml
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.hubble.metrics.dynamic.enabled }}
|
||||
hubble-dynamic-metrics-config-path: /dynamic-metrics-config/dynamic-metrics.yaml
|
||||
hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.hubble "listenAddress" }}
|
||||
# An additional address for Hubble server to listen to (e.g. ":4244").
|
||||
hubble-listen-address: {{ .Values.hubble.listenAddress | quote }}
|
||||
@@ -1041,8 +1046,8 @@ data:
|
||||
{{- else }}
|
||||
ipam: {{ $ipam | quote }}
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.ipam "multiPoolPreAllocation" }}
|
||||
ipam-multi-pool-pre-allocation: {{ .Values.ipam.multiPoolPreAllocation }}
|
||||
{{- if .Values.ipam.multiPoolPreAllocation }}
|
||||
ipam-multi-pool-pre-allocation: {{ .Values.ipam.multiPoolPreAllocation | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.ipam.ciliumNodeUpdateRate }}
|
||||
@@ -1335,6 +1340,10 @@ data:
|
||||
external-envoy-proxy: {{ include "envoyDaemonSetEnabled" . | quote }}
|
||||
envoy-base-id: {{ .Values.envoy.baseID | quote }}
|
||||
|
||||
{{- if .Values.envoy.policyRestoreTimeoutDuration }}
|
||||
envoy-policy-restore-timeout: {{ .Values.envoy.policyRestoreTimeoutDuration | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.envoy.log.path }}
|
||||
envoy-log: {{ .Values.envoy.log.path | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -41,6 +41,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -66,6 +69,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
||||
@@ -7,24 +7,23 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-ingress-secrets
|
||||
namespace: {{ .Values.ingressController.secretsNamespace.name | quote }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: cilium-operator-ingress-secrets
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceAccounts.operator.name | quote }}
|
||||
namespace: {{ include "cilium.namespace" . }}
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceAccounts.operator.name | quote }}
|
||||
namespace: {{ include "cilium.namespace" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create .Values.gatewayAPI.enabled .Values.gatewayAPI.secretsNamespace.sync .Values.gatewayAPI.secretsNamespace.name }}
|
||||
@@ -34,12 +33,15 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-gateway-secrets
|
||||
namespace: {{ .Values.gatewayAPI.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -57,12 +59,15 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-tlsinterception-secrets
|
||||
namespace: {{ .Values.tls.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.serviceMonitor.enabled }}
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
|
||||
@@ -4,10 +4,13 @@ kind: Service
|
||||
metadata:
|
||||
name: spire-server
|
||||
namespace: {{ .Values.authentication.mutual.spire.install.namespace }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.service.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.authentication.mutual.spire.install.server.service.annotations .Values.authentication.mutual.spire.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.authentication.mutual.spire.annotations }}
|
||||
@@ -17,10 +20,6 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.service.labels }}
|
||||
labels:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.authentication.mutual.spire.install.server.service.type }}
|
||||
ports:
|
||||
|
||||
@@ -4,10 +4,6 @@ kind: StatefulSet
|
||||
metadata:
|
||||
name: spire-server
|
||||
namespace: {{ .Values.authentication.mutual.spire.install.namespace }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.authentication.mutual.spire.install.server.annotations .Values.authentication.mutual.spire.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.authentication.mutual.spire.annotations }}
|
||||
@@ -19,9 +15,12 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app: spire-server
|
||||
{{- with .Values.authentication.mutual.spire.install.server.labels }}
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
|
||||
@@ -519,6 +519,14 @@
|
||||
"disableExternalIPMitigation": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"distributedLRU": {
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"enableTCX": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -2110,6 +2118,12 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"policyRestoreTimeoutDuration": {
|
||||
"type": [
|
||||
"null",
|
||||
"string"
|
||||
]
|
||||
},
|
||||
"priorityClassName": {
|
||||
"type": [
|
||||
"null",
|
||||
@@ -5462,6 +5476,9 @@
|
||||
"tunnelProtocol": {
|
||||
"type": "string"
|
||||
},
|
||||
"tunnelSourcePortRange": {
|
||||
"type": "string"
|
||||
},
|
||||
"updateStrategy": {
|
||||
"properties": {
|
||||
"rollingUpdate": {
|
||||
|
||||
@@ -191,10 +191,10 @@ image:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.17.1"
|
||||
tag: "v1.17.3"
|
||||
pullPolicy: "IfNotPresent"
|
||||
# cilium-digest
|
||||
digest: "sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866"
|
||||
digest: "sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
|
||||
useDigest: true
|
||||
# -- Scheduling configurations for cilium pods
|
||||
scheduling:
|
||||
@@ -495,6 +495,13 @@ bpf:
|
||||
# tracking table.
|
||||
# @default -- `262144`
|
||||
ctAnyMax: ~
|
||||
# -- Control to use a distributed per-CPU backend memory for the core BPF LRU maps
|
||||
# which Cilium uses. This improves performance significantly, but it is also
|
||||
# recommended to increase BPF map sizing along with that.
|
||||
distributedLRU:
|
||||
# -- Enable distributed LRU backend memory. For compatibility with existing
|
||||
# installations it is off by default.
|
||||
enabled: false
|
||||
# -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble.
|
||||
# Helm configuration for BPF events map rate limiting is experimental and might change
|
||||
# in upcoming releases.
|
||||
@@ -1433,9 +1440,9 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-relay"
|
||||
tag: "v1.17.1"
|
||||
tag: "v1.17.3"
|
||||
# hubble-relay-digest
|
||||
digest: "sha256:397e8fbb188157f744390a7b272a1dec31234e605bcbe22d8919a166d202a3dc"
|
||||
digest: "sha256:f8674b5139111ac828a8818da7f2d344b4a5bfbaeb122c5dc9abed3e74000c55"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Specifies the resources for the hubble-relay pods
|
||||
@@ -1684,8 +1691,8 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-ui-backend"
|
||||
tag: "v0.13.1"
|
||||
digest: "sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b"
|
||||
tag: "v0.13.2"
|
||||
digest: "sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Hubble-ui backend security context.
|
||||
@@ -1718,8 +1725,8 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-ui"
|
||||
tag: "v0.13.1"
|
||||
digest: "sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6"
|
||||
tag: "v0.13.2"
|
||||
digest: "sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Hubble-ui frontend security context.
|
||||
@@ -2332,6 +2339,11 @@ envoy:
|
||||
xffNumTrustedHopsL7PolicyIngress: 0
|
||||
# -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners.
|
||||
xffNumTrustedHopsL7PolicyEgress: 0
|
||||
# @schema
|
||||
# type: [null, string]
|
||||
# @schema
|
||||
# -- Max duration to wait for endpoint policies to be restored on restart. Default "3m".
|
||||
policyRestoreTimeoutDuration: null
|
||||
# -- Envoy container image.
|
||||
image:
|
||||
# @schema
|
||||
@@ -2339,9 +2351,9 @@ envoy:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium-envoy"
|
||||
tag: "v1.31.5-1739264036-958bef243c6c66fcfd73ca319f2eb49fff1eb2ae"
|
||||
tag: "v1.32.5-1744305768-f9ddca7dcd91f7ca25a505560e655c47d3dec2cf"
|
||||
pullPolicy: "IfNotPresent"
|
||||
digest: "sha256:fc708bd36973d306412b2e50c924cd8333de67e0167802c9b48506f9d772f521"
|
||||
digest: "sha256:a01cadf7974409b5c5c92ace3d6afa298408468ca24cab1cb413c04f89d3d1f9"
|
||||
useDigest: true
|
||||
# -- Additional containers added to the cilium Envoy DaemonSet.
|
||||
extraContainers: []
|
||||
@@ -2605,7 +2617,7 @@ tls:
|
||||
# type: [null, boolean]
|
||||
# @schema
|
||||
# -- Enable synchronization of Secrets for TLS Interception. If disabled and
|
||||
# tls.secretsBackend is set to 'k8s', then secrets will be read directly by the agent.
|
||||
# tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent.
|
||||
enabled: ~
|
||||
# -- Base64 encoded PEM values for the CA certificate and private key.
|
||||
# This can be used as common CA to generate certificates used by hubble and clustermesh components.
|
||||
@@ -2658,6 +2670,9 @@ routingMode: ""
|
||||
# -- Configure VXLAN and Geneve tunnel port.
|
||||
# @default -- Port 8472 for VXLAN, Port 6081 for Geneve
|
||||
tunnelPort: 0
|
||||
# -- Configure VXLAN and Geneve tunnel source port range hint.
|
||||
# @default -- 0-0 to let the kernel driver decide the range
|
||||
tunnelSourcePortRange: 0-0
|
||||
# -- Configure what the response should be to traffic for a service without backends.
|
||||
# Possible values:
|
||||
# - reject (default)
|
||||
@@ -2693,15 +2708,15 @@ operator:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/operator"
|
||||
tag: "v1.17.1"
|
||||
tag: "v1.17.3"
|
||||
# operator-generic-digest
|
||||
genericDigest: "sha256:628becaeb3e4742a1c36c4897721092375891b58bae2bfcae48bbf4420aaee97"
|
||||
genericDigest: "sha256:8bd38d0e97a955b2d725929d60df09d712fb62b60b930551a29abac2dd92e597"
|
||||
# operator-azure-digest
|
||||
azureDigest: "sha256:b9e3e3994f5fcf1832e1f344f3b3b544832851b1990f124b2c2c68e3ffe04a9b"
|
||||
azureDigest: "sha256:6a3294ec8a2107048254179c3ac5121866f90d20fccf12f1d70960e61f304713"
|
||||
# operator-aws-digest
|
||||
awsDigest: "sha256:da74748057c836471bfdc0e65bb29ba0edb82916ec4b99f6a4f002b2fcc849d6"
|
||||
awsDigest: "sha256:40f235111fb2bca209ee65b12f81742596e881a0a3ee4d159776d78e3091ba7f"
|
||||
# operator-alibabacloud-digest
|
||||
alibabacloudDigest: "sha256:034b479fba340f9d98510e509c7ce1c36e8889a109d5f1c2240fcb0942bc772c"
|
||||
alibabacloudDigest: "sha256:e9a9ab227c6e833985bde6537b4d1540b0907f21a84319de4b7d62c5302eed5c"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
suffix: ""
|
||||
@@ -2976,9 +2991,9 @@ preflight:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.17.1"
|
||||
tag: "v1.17.3"
|
||||
# cilium-digest
|
||||
digest: "sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866"
|
||||
digest: "sha256:1782794aeac951af139315c10eff34050aa7579c12827ee9ec376bb719b82873"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- The priority class to use for the preflight pod.
|
||||
@@ -3125,9 +3140,9 @@ clustermesh:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/clustermesh-apiserver"
|
||||
tag: "v1.17.1"
|
||||
tag: "v1.17.3"
|
||||
# clustermesh-apiserver-digest
|
||||
digest: "sha256:1de22f46bfdd638de72c2224d5223ddc3bbeacda1803cb75799beca3d4bf7a4c"
|
||||
digest: "sha256:98d5feaf67dd9b5d8d219ff5990de10539566eedc5412bcf52df75920896ad42"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- TCP port for the clustermesh-apiserver health API.
|
||||
@@ -3634,7 +3649,7 @@ authentication:
|
||||
override: ~
|
||||
repository: "docker.io/library/busybox"
|
||||
tag: "1.37.0"
|
||||
digest: "sha256:a5d0ce49aa801d475da48f8cb163c354ab95cab073cd3c138bd458fc8257fbf1"
|
||||
digest: "sha256:37f7b378a29ceb4c551b1b5582e27747b855bbfaa73fa11914fe0df028dc581f"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# SPIRE agent configuration
|
||||
|
||||
@@ -500,6 +500,13 @@ bpf:
|
||||
# tracking table.
|
||||
# @default -- `262144`
|
||||
ctAnyMax: ~
|
||||
# -- Control to use a distributed per-CPU backend memory for the core BPF LRU maps
|
||||
# which Cilium uses. This improves performance significantly, but it is also
|
||||
# recommended to increase BPF map sizing along with that.
|
||||
distributedLRU:
|
||||
# -- Enable distributed LRU backend memory. For compatibility with existing
|
||||
# installations it is off by default.
|
||||
enabled: false
|
||||
# -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble.
|
||||
# Helm configuration for BPF events map rate limiting is experimental and might change
|
||||
# in upcoming releases.
|
||||
@@ -2351,6 +2358,11 @@ envoy:
|
||||
xffNumTrustedHopsL7PolicyIngress: 0
|
||||
# -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners.
|
||||
xffNumTrustedHopsL7PolicyEgress: 0
|
||||
# @schema
|
||||
# type: [null, string]
|
||||
# @schema
|
||||
# -- Max duration to wait for endpoint policies to be restored on restart. Default "3m".
|
||||
policyRestoreTimeoutDuration: null
|
||||
# -- Envoy container image.
|
||||
image:
|
||||
# @schema
|
||||
@@ -2626,7 +2638,7 @@ tls:
|
||||
# type: [null, boolean]
|
||||
# @schema
|
||||
# -- Enable synchronization of Secrets for TLS Interception. If disabled and
|
||||
# tls.secretsBackend is set to 'k8s', then secrets will be read directly by the agent.
|
||||
# tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent.
|
||||
enabled: ~
|
||||
# -- Base64 encoded PEM values for the CA certificate and private key.
|
||||
# This can be used as common CA to generate certificates used by hubble and clustermesh components.
|
||||
@@ -2679,6 +2691,9 @@ routingMode: ""
|
||||
# -- Configure VXLAN and Geneve tunnel port.
|
||||
# @default -- Port 8472 for VXLAN, Port 6081 for Geneve
|
||||
tunnelPort: 0
|
||||
# -- Configure VXLAN and Geneve tunnel source port range hint.
|
||||
# @default -- 0-0 to let the kernel driver decide the range
|
||||
tunnelSourcePortRange: 0-0
|
||||
# -- Configure what the response should be to traffic for a service without backends.
|
||||
# Possible values:
|
||||
# - reject (default)
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
ARG VERSION=v1.17.1
|
||||
ARG VERSION=v1.17.3
|
||||
FROM quay.io/cilium/cilium:${VERSION}
|
||||
|
||||
@@ -14,7 +14,7 @@ cilium:
|
||||
mode: "kubernetes"
|
||||
image:
|
||||
repository: ghcr.io/cozystack/cozystack/cilium
|
||||
tag: 1.17.1
|
||||
digest: "sha256:ac154cd13711444f9fd1a7c6e947f504c769cc654039b93630ccc0479111f2a3"
|
||||
tag: 1.17.2
|
||||
digest: "sha256:bc6a8ec326188960ac36584873e07801bcbc56cb862e2ec8bf87a7926f66abf1"
|
||||
envoy:
|
||||
enabled: false
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystackAPI:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.29.1@sha256:3ce1cd4a9c74999b08ee477811bdc048a8b3fc79f214d92db2e81bb3ae0bd516
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.30.2@sha256:7ef370dc8aeac0a6b2a50b7d949f070eb21d267ba0a70e7fc7c1564bfe6d4f83
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user