mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-02-05 08:17:59 +00:00
Compare commits
197 Commits
upd-testin
...
ci-wait-ca
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c3d140448 | ||
|
|
894cb14d49 | ||
|
|
a0935e9ae4 | ||
|
|
f2c248acbd | ||
|
|
590f14a614 | ||
|
|
4c8dba880a | ||
|
|
de0c7b94f4 | ||
|
|
2682a6e674 | ||
|
|
e3e0b21612 | ||
|
|
455d66fbe4 | ||
|
|
7db7277636 | ||
|
|
7be5db8cff | ||
|
|
249950d94b | ||
|
|
44565dca88 | ||
|
|
cefcd24ebb | ||
|
|
13d7df47d7 | ||
|
|
1ccd3074dc | ||
|
|
70d3591ed2 | ||
|
|
700991f4fa | ||
|
|
d89acbf44d | ||
|
|
59ef3296f0 | ||
|
|
3ed0cdee1c | ||
|
|
9f5230a342 | ||
|
|
b895ccfdeb | ||
|
|
d54a407d68 | ||
|
|
f9ec630509 | ||
|
|
3f47181c10 | ||
|
|
19409d801d | ||
|
|
8a4793d571 | ||
|
|
0fc3fdcb3d | ||
|
|
04e2b3952b | ||
|
|
b56624a781 | ||
|
|
07d7fadb1a | ||
|
|
8db92d53d1 | ||
|
|
7537235f43 | ||
|
|
4bb524e53d | ||
|
|
e7ded52f93 | ||
|
|
8547dc3b21 | ||
|
|
c22603bf7e | ||
|
|
89525dedb5 | ||
|
|
1c53a6f9f6 | ||
|
|
16ee0f2c3a | ||
|
|
72d0394475 | ||
|
|
0a998c8b49 | ||
|
|
7bfad655c2 | ||
|
|
e81cbf780c | ||
|
|
e8cc44450a | ||
|
|
d3a8a4a7de | ||
|
|
fc2c5a0f6b | ||
|
|
0f8b8e1744 | ||
|
|
197434ff94 | ||
|
|
703073a164 | ||
|
|
6a0fc64475 | ||
|
|
f1624353ef | ||
|
|
277b438f68 | ||
|
|
405863cb11 | ||
|
|
63ebab5c2a | ||
|
|
0ddaff9380 | ||
|
|
a6b02bf381 | ||
|
|
39ede77fec | ||
|
|
e505857832 | ||
|
|
d8f3547db7 | ||
|
|
6d8a99269b | ||
|
|
b9112a398e | ||
|
|
719fdd29cc | ||
|
|
9e1376f709 | ||
|
|
7a9a1fcba4 | ||
|
|
2def9f4e83 | ||
|
|
c1046aae6a | ||
|
|
53cf1c537c | ||
|
|
ccedcb7419 | ||
|
|
f94a01febd | ||
|
|
495e584313 | ||
|
|
172e660cd1 | ||
|
|
14262cdd2a | ||
|
|
80576cb757 | ||
|
|
fde6e9cc73 | ||
|
|
57ca60c5a5 | ||
|
|
1d0ee15948 | ||
|
|
eeaa1b4517 | ||
|
|
a14bcf98dd | ||
|
|
be84fc6e4e | ||
|
|
73a3f481bc | ||
|
|
5903bbc64a | ||
|
|
f204809e43 | ||
|
|
fe4806ce49 | ||
|
|
8f535acc3f | ||
|
|
53cbb4ae12 | ||
|
|
4e9446d934 | ||
|
|
acbfb6ad64 | ||
|
|
8570449080 | ||
|
|
ffe6109dfb | ||
|
|
7dbb8a1d75 | ||
|
|
86210c1fc1 | ||
|
|
e96f15773d | ||
|
|
bc5635dd8e | ||
|
|
5d71c90f0a | ||
|
|
05d6ab9516 | ||
|
|
ccb001ee97 | ||
|
|
5a5cf91742 | ||
|
|
6a0d4913f2 | ||
|
|
685e50bf6c | ||
|
|
f90fc6f681 | ||
|
|
d8f3f2dee1 | ||
|
|
da8100965f | ||
|
|
6d2ea1295e | ||
|
|
d7914ff9aa | ||
|
|
7f4af5ebbc | ||
|
|
8f575c455c | ||
|
|
819166eb35 | ||
|
|
f507802ec9 | ||
|
|
62267811cb | ||
|
|
12bedef2d3 | ||
|
|
fd240701f8 | ||
|
|
60b96e0a62 | ||
|
|
1d377bab9d | ||
|
|
799690dc07 | ||
|
|
aa02d0c5e6 | ||
|
|
6b8ecf3953 | ||
|
|
e5b81f367e | ||
|
|
ba4798464d | ||
|
|
655e8be382 | ||
|
|
fd9a5b0d7b | ||
|
|
d09314bbb5 | ||
|
|
371791215a | ||
|
|
4c220bb443 | ||
|
|
e27611d45a | ||
|
|
a3e647c547 | ||
|
|
be52fe5461 | ||
|
|
1d639fda0d | ||
|
|
bbdde79428 | ||
|
|
1966f86120 | ||
|
|
fa7c98bc38 | ||
|
|
6671869acc | ||
|
|
434c5d1b9c | ||
|
|
cc9abfe03f | ||
|
|
e02fd14a3c | ||
|
|
559eb8dea9 | ||
|
|
9e6478b9c9 | ||
|
|
3a295c4474 | ||
|
|
f8dfc43cae | ||
|
|
3e19bc74d4 | ||
|
|
2966922c0b | ||
|
|
991c7e1943 | ||
|
|
c31a7710ad | ||
|
|
f4cace093c | ||
|
|
01e417d436 | ||
|
|
261ce4278f | ||
|
|
785898b507 | ||
|
|
47a2cf7cd5 | ||
|
|
1f19793613 | ||
|
|
a0df2989af | ||
|
|
bdb538ab42 | ||
|
|
c844a4fb2b | ||
|
|
fea142774a | ||
|
|
4b575299bc | ||
|
|
4eec016f7d | ||
|
|
4078b21ac6 | ||
|
|
1721d397a7 | ||
|
|
558a0572f5 | ||
|
|
d60b81c8a0 | ||
|
|
cc14c1fbab | ||
|
|
80aee1354b | ||
|
|
332d69259b | ||
|
|
9ad6b0d726 | ||
|
|
ea9df9e371 | ||
|
|
d69a9c4862 | ||
|
|
6270a11bb1 | ||
|
|
18726483a6 | ||
|
|
aed184f6ef | ||
|
|
f688a57132 | ||
|
|
e954ab7f8b | ||
|
|
c9c8235c64 | ||
|
|
8e2e77da56 | ||
|
|
1e27dedde5 | ||
|
|
e947805c15 | ||
|
|
7a1c3b6209 | ||
|
|
49b5b510ee | ||
|
|
3cf850c2c4 | ||
|
|
1fbbfcd063 | ||
|
|
de19450f44 | ||
|
|
09c94cc1a0 | ||
|
|
da301373fa | ||
|
|
1f558baa9b | ||
|
|
3c511023f3 | ||
|
|
d10a9ad4e6 | ||
|
|
9ff9f8f601 | ||
|
|
05a1099fd0 | ||
|
|
b2980afcd1 | ||
|
|
6980dc59c5 | ||
|
|
a9c8133fd4 | ||
|
|
065abdd95a | ||
|
|
cd8c6a8b9a | ||
|
|
459673f764 | ||
|
|
c795e4fb68 | ||
|
|
7c98248e45 | ||
|
|
16c771aa77 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
|||||||
* @kvaps @lllamnyp
|
* @kvaps @lllamnyp @klinch0
|
||||||
|
|||||||
53
.github/workflows/backport.yaml
vendored
Normal file
53
.github/workflows/backport.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
name: Automatic Backport
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [closed] # fires when PR is closed (merged)
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: backport-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
backport:
|
||||||
|
if: |
|
||||||
|
github.event.pull_request.merged == true &&
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'backport')
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# 1. Decide which maintenance branch should receive the back‑port
|
||||||
|
- name: Determine target maintenance branch
|
||||||
|
id: target
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
let rel;
|
||||||
|
try {
|
||||||
|
rel = await github.rest.repos.getLatestRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
} catch (e) {
|
||||||
|
core.setFailed('No existing releases found; cannot determine backport target.');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
|
||||||
|
const branch = `release-${maj}.${min}`;
|
||||||
|
core.setOutput('branch', branch);
|
||||||
|
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
|
||||||
|
# 2. Checkout (required by backport‑action)
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# 3. Create the back‑port pull request
|
||||||
|
- name: Create back‑port PR
|
||||||
|
uses: korthout/backport-action@v3
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
label_pattern: '' # don't read labels for targets
|
||||||
|
target_branches: ${{ steps.target.outputs.branch }}
|
||||||
13
.github/workflows/pre-commit.yml
vendored
13
.github/workflows/pre-commit.yml
vendored
@@ -1,6 +1,14 @@
|
|||||||
name: Pre-Commit Checks
|
name: Pre-Commit Checks
|
||||||
|
|
||||||
on: [push, pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [labeled, opened, synchronize, reopened]
|
||||||
|
paths-ignore:
|
||||||
|
- '**.md'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
pre-commit:
|
pre-commit:
|
||||||
@@ -8,6 +16,9 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
|
|||||||
172
.github/workflows/pull-requests-release.yaml
vendored
Normal file
172
.github/workflows/pull-requests-release.yaml
vendored
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
name: Releasing PR
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [labeled, opened, synchronize, reopened, closed]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: pull-requests-release-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
verify:
|
||||||
|
name: Test Release
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
# Run only when the PR carries the "release" label and not closed.
|
||||||
|
if: |
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||||
|
github.event.action != 'closed'
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
registry: ghcr.io
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: make test
|
||||||
|
|
||||||
|
finalize:
|
||||||
|
name: Finalize Release
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
if: |
|
||||||
|
github.event.pull_request.merged == true &&
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# Extract tag from branch name (branch = release-X.Y.Z*)
|
||||||
|
- name: Extract tag from branch name
|
||||||
|
id: get_tag
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const branch = context.payload.pull_request.head.ref;
|
||||||
|
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||||
|
if (!m) {
|
||||||
|
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const tag = `v${m[1]}`;
|
||||||
|
core.setOutput('tag', tag);
|
||||||
|
console.log(`✅ Tag to publish: ${tag}`);
|
||||||
|
|
||||||
|
# Checkout repo & create / push annotated tag
|
||||||
|
- name: Checkout repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Create tag on merge commit
|
||||||
|
run: |
|
||||||
|
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||||
|
git push -f origin ${{ steps.get_tag.outputs.tag }}
|
||||||
|
|
||||||
|
# Ensure maintenance branch release-X.Y
|
||||||
|
- name: Ensure maintenance branch release-X.Y
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
|
||||||
|
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
|
||||||
|
if (!match) {
|
||||||
|
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-suffix'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const line = `${match[1]}.${match[2]}`;
|
||||||
|
const branch = `release-${line}`;
|
||||||
|
try {
|
||||||
|
await github.rest.repos.getBranch({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
branch
|
||||||
|
});
|
||||||
|
console.log(`Branch '${branch}' already exists`);
|
||||||
|
} catch (_) {
|
||||||
|
await github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: `refs/heads/${branch}`,
|
||||||
|
sha: context.sha
|
||||||
|
});
|
||||||
|
console.log(`✅ Branch '${branch}' created at ${context.sha}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get the latest published release
|
||||||
|
- name: Get the latest published release
|
||||||
|
id: latest_release
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
try {
|
||||||
|
const rel = await github.rest.repos.getLatestRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
core.setOutput('tag', rel.data.tag_name);
|
||||||
|
} catch (_) {
|
||||||
|
core.setOutput('tag', '');
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compare current tag vs latest using semver-utils
|
||||||
|
- name: Semver compare
|
||||||
|
id: semver
|
||||||
|
uses: madhead/semver-utils@v4.3.0
|
||||||
|
with:
|
||||||
|
version: ${{ steps.get_tag.outputs.tag }}
|
||||||
|
compare-to: ${{ steps.latest_release.outputs.tag }}
|
||||||
|
|
||||||
|
# Derive flags: prerelease? make_latest?
|
||||||
|
- name: Calculate publish flags
|
||||||
|
id: flags
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc1
|
||||||
|
const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
|
||||||
|
if (!m) {
|
||||||
|
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc1
|
||||||
|
const isRc = Boolean(m[2]);
|
||||||
|
core.setOutput('is_rc', isRc);
|
||||||
|
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||||
|
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
|
||||||
|
|
||||||
|
# Publish draft release with correct flags
|
||||||
|
- name: Publish draft release
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||||
|
const releases = await github.rest.repos.listReleases({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||||
|
if (!draft) throw new Error(`Draft release for ${tag} not found`);
|
||||||
|
await github.rest.repos.updateRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
release_id: draft.id,
|
||||||
|
draft: false,
|
||||||
|
prerelease: ${{ steps.flags.outputs.is_rc }},
|
||||||
|
make_latest: '${{ steps.flags.outputs.make_latest }}'
|
||||||
|
});
|
||||||
|
|
||||||
|
console.log(`🚀 Published release for ${tag}`);
|
||||||
41
.github/workflows/pull-requests.yaml
vendored
Normal file
41
.github/workflows/pull-requests.yaml
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
name: Pull Request
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [labeled, opened, synchronize, reopened]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: pull-requests-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
e2e:
|
||||||
|
name: Build and Test
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
# Never run when the PR carries the "release" label.
|
||||||
|
if: |
|
||||||
|
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
registry: ghcr.io
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: make build
|
||||||
|
|
||||||
|
- name: Test
|
||||||
|
run: make test
|
||||||
224
.github/workflows/tags.yaml
vendored
Normal file
224
.github/workflows/tags.yaml
vendored
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
name: Versioned Tag
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*.*.*' # vX.Y.Z or vX.Y.Z-rcN
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: tags-${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
prepare-release:
|
||||||
|
name: Prepare Release
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# Check if a non-draft release with this tag already exists
|
||||||
|
- name: Check if release already exists
|
||||||
|
id: check_release
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const tag = context.ref.replace('refs/tags/', '');
|
||||||
|
const releases = await github.rest.repos.listReleases({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
|
||||||
|
core.setOutput('skip', exists);
|
||||||
|
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
|
||||||
|
|
||||||
|
# If a published release already exists, skip the rest of the workflow
|
||||||
|
- name: Skip if release already exists
|
||||||
|
if: steps.check_release.outputs.skip == 'true'
|
||||||
|
run: echo "Release already exists, skipping workflow."
|
||||||
|
|
||||||
|
# Parse tag meta‑data (rc?, maintenance line, etc.)
|
||||||
|
- name: Parse tag
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: tag
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc1
|
||||||
|
const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\d+)?$/);
|
||||||
|
if (!m) {
|
||||||
|
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rcN'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const version = m[1] + (m[2] ?? ''); // 0.31.5‑rc1
|
||||||
|
const isRc = Boolean(m[2]);
|
||||||
|
const [maj, min] = m[1].split('.');
|
||||||
|
core.setOutput('tag', ref);
|
||||||
|
core.setOutput('version', version);
|
||||||
|
core.setOutput('is_rc', isRc);
|
||||||
|
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||||
|
|
||||||
|
# Detect base branch (main or release‑X.Y) the tag was pushed from
|
||||||
|
- name: Get base branch
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: get_base
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const baseRef = context.payload.base_ref;
|
||||||
|
if (!baseRef) {
|
||||||
|
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const branch = baseRef.replace('refs/heads/', '');
|
||||||
|
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
|
||||||
|
if (!ok) {
|
||||||
|
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
core.setOutput('branch', branch);
|
||||||
|
|
||||||
|
# Checkout & login once
|
||||||
|
- name: Checkout code
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Login to GHCR
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
registry: ghcr.io
|
||||||
|
|
||||||
|
# Build project artifacts
|
||||||
|
- name: Build
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
run: make build
|
||||||
|
|
||||||
|
# Commit built artifacts
|
||||||
|
- name: Commit release artifacts
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
run: |
|
||||||
|
git config user.name "github-actions"
|
||||||
|
git config user.email "github-actions@github.com"
|
||||||
|
git add .
|
||||||
|
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||||
|
git push origin HEAD || true
|
||||||
|
|
||||||
|
# Get `latest_version` from latest published release
|
||||||
|
- name: Get latest published release
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: latest_release
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
try {
|
||||||
|
const rel = await github.rest.repos.getLatestRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
core.setOutput('tag', rel.data.tag_name);
|
||||||
|
} catch (_) {
|
||||||
|
core.setOutput('tag', '');
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compare tag (A) with latest (B)
|
||||||
|
- name: Semver compare
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: semver
|
||||||
|
uses: madhead/semver-utils@v4.3.0
|
||||||
|
with:
|
||||||
|
version: ${{ steps.tag.outputs.tag }} # A
|
||||||
|
compare-to: ${{ steps.latest_release.outputs.tag }} # B
|
||||||
|
|
||||||
|
# Create or reuse DRAFT GitHub Release
|
||||||
|
- name: Create / reuse draft release
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: release
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.tag.outputs.tag }}';
|
||||||
|
const isRc = ${{ steps.tag.outputs.is_rc }};
|
||||||
|
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||||
|
const makeLatest = outdated ? false : 'legacy';
|
||||||
|
const releases = await github.rest.repos.listReleases({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
let rel = releases.data.find(r => r.tag_name === tag);
|
||||||
|
if (!rel) {
|
||||||
|
rel = await github.rest.repos.createRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
tag_name: tag,
|
||||||
|
name: tag,
|
||||||
|
draft: true,
|
||||||
|
prerelease: isRc,
|
||||||
|
make_latest: makeLatest
|
||||||
|
});
|
||||||
|
console.log(`Draft release created for ${tag}`);
|
||||||
|
} else {
|
||||||
|
console.log(`Re‑using existing release ${tag}`);
|
||||||
|
}
|
||||||
|
core.setOutput('upload_url', rel.upload_url);
|
||||||
|
|
||||||
|
# Build + upload assets (optional)
|
||||||
|
- name: Build & upload assets
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
run: |
|
||||||
|
make assets
|
||||||
|
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
# Create release‑X.Y.Z branch and push (force‑update)
|
||||||
|
- name: Create release branch
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
run: |
|
||||||
|
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||||
|
git branch -f "$BRANCH"
|
||||||
|
git push -f origin "$BRANCH"
|
||||||
|
|
||||||
|
# Create pull request into original base branch (if absent)
|
||||||
|
- name: Create pull request if not exists
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const version = context.ref.replace('refs/tags/v', '');
|
||||||
|
const base = '${{ steps.get_base.outputs.branch }}';
|
||||||
|
const head = `release-${version}`;
|
||||||
|
|
||||||
|
const prs = await github.rest.pulls.list({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
head: `${context.repo.owner}:${head}`,
|
||||||
|
base
|
||||||
|
});
|
||||||
|
if (prs.data.length === 0) {
|
||||||
|
const pr = await github.rest.pulls.create({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
head,
|
||||||
|
base,
|
||||||
|
title: `Release v${version}`,
|
||||||
|
body: `This PR prepares the release \`v${version}\`.`,
|
||||||
|
draft: false
|
||||||
|
});
|
||||||
|
await github.rest.issues.addLabels({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
issue_number: pr.data.number,
|
||||||
|
labels: ['release']
|
||||||
|
});
|
||||||
|
console.log(`Created PR #${pr.data.number}`);
|
||||||
|
} else {
|
||||||
|
console.log(`PR already exists from ${head} to ${base}`);
|
||||||
|
}
|
||||||
@@ -6,13 +6,13 @@ As you get started, you are in the best position to give us feedbacks on areas o
|
|||||||
|
|
||||||
* Problems found while setting up the development environment
|
* Problems found while setting up the development environment
|
||||||
* Gaps in our documentation
|
* Gaps in our documentation
|
||||||
* Bugs in our Github actions
|
* Bugs in our GitHub actions
|
||||||
|
|
||||||
First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md).
|
First, though, it is important that you read the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||||
|
|
||||||
The guidelines below are a starting point. We don't want to limit your
|
The guidelines below are a starting point. We don't want to limit your
|
||||||
creativity, passion, and initiative. If you think there's a better way, please
|
creativity, passion, and initiative. If you think there's a better way, please
|
||||||
feel free to bring it up in a Github discussion, or open a pull request. We're
|
feel free to bring it up in a GitHub discussion, or open a pull request. We're
|
||||||
certain there are always better ways to do things, we just need to start some
|
certain there are always better ways to do things, we just need to start some
|
||||||
constructive dialogue!
|
constructive dialogue!
|
||||||
|
|
||||||
@@ -23,9 +23,9 @@ We welcome many types of contributions including:
|
|||||||
* New features
|
* New features
|
||||||
* Builds, CI/CD
|
* Builds, CI/CD
|
||||||
* Bug fixes
|
* Bug fixes
|
||||||
* [Documentation](https://github.com/cozystack/cozystack-website/tree/main)
|
* [Documentation](https://GitHub.com/cozystack/cozystack-website/tree/main)
|
||||||
* Issue Triage
|
* Issue Triage
|
||||||
* Answering questions on Slack or Github Discussions
|
* Answering questions on Slack or GitHub Discussions
|
||||||
* Web design
|
* Web design
|
||||||
* Communications / Social Media / Blog Posts
|
* Communications / Social Media / Blog Posts
|
||||||
* Events participation
|
* Events participation
|
||||||
@@ -34,7 +34,7 @@ We welcome many types of contributions including:
|
|||||||
## Ask for Help
|
## Ask for Help
|
||||||
|
|
||||||
The best way to reach us with a question when contributing is to drop a line in
|
The best way to reach us with a question when contributing is to drop a line in
|
||||||
our [Telegram channel](https://t.me/cozystack), or start a new Github discussion.
|
our [Telegram channel](https://t.me/cozystack), or start a new GitHub discussion.
|
||||||
|
|
||||||
## Raising Issues
|
## Raising Issues
|
||||||
|
|
||||||
|
|||||||
23
Makefile
23
Makefile
@@ -1,6 +1,13 @@
|
|||||||
.PHONY: manifests repos assets
|
.PHONY: manifests repos assets
|
||||||
|
|
||||||
build:
|
build-deps:
|
||||||
|
@command -V find docker skopeo jq gh helm > /dev/null
|
||||||
|
@yq --version | grep -q "mikefarah" || (echo "mikefarah/yq is required" && exit 1)
|
||||||
|
@tar --version | grep -q GNU || (echo "GNU tar is required" && exit 1)
|
||||||
|
@sed --version | grep -q GNU || (echo "GNU sed is required" && exit 1)
|
||||||
|
@awk --version | grep -q GNU || (echo "GNU awk is required" && exit 1)
|
||||||
|
|
||||||
|
build: build-deps
|
||||||
make -C packages/apps/http-cache image
|
make -C packages/apps/http-cache image
|
||||||
make -C packages/apps/postgres image
|
make -C packages/apps/postgres image
|
||||||
make -C packages/apps/mysql image
|
make -C packages/apps/mysql image
|
||||||
@@ -19,10 +26,6 @@ build:
|
|||||||
make -C packages/core/installer image
|
make -C packages/core/installer image
|
||||||
make manifests
|
make manifests
|
||||||
|
|
||||||
manifests:
|
|
||||||
mkdir -p _out/assets
|
|
||||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
|
||||||
|
|
||||||
repos:
|
repos:
|
||||||
rm -rf _out
|
rm -rf _out
|
||||||
make -C packages/apps check-version-map
|
make -C packages/apps check-version-map
|
||||||
@@ -33,17 +36,21 @@ repos:
|
|||||||
mkdir -p _out/logos
|
mkdir -p _out/logos
|
||||||
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
|
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
|
||||||
|
|
||||||
|
|
||||||
|
manifests:
|
||||||
|
mkdir -p _out/assets
|
||||||
|
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||||
|
|
||||||
assets:
|
assets:
|
||||||
make -C packages/core/installer/ assets
|
make -C packages/core/installer/ assets
|
||||||
|
|
||||||
test:
|
test:
|
||||||
test -f _out/assets/nocloud-amd64.raw.xz || make -C packages/core/installer talos-nocloud
|
|
||||||
make -C packages/core/testing apply
|
make -C packages/core/testing apply
|
||||||
make -C packages/core/testing test
|
make -C packages/core/testing test
|
||||||
make -C packages/core/testing test-applications
|
#make -C packages/core/testing test-applications
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
hack/update-codegen.sh
|
hack/update-codegen.sh
|
||||||
|
|
||||||
upload_assets: assets
|
upload_assets: manifests
|
||||||
hack/upload-assets.sh
|
hack/upload-assets.sh
|
||||||
|
|||||||
33
README.md
33
README.md
@@ -12,20 +12,21 @@
|
|||||||
|
|
||||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||||
|
|
||||||
With Cozystack, you can transform your bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters, Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||||
|
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||||
|
|
||||||
You can use Cozystack to build your own cloud or to provide a cost-effective development environments.
|
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||||
|
|
||||||
## Use-Cases
|
## Use-Cases
|
||||||
|
|
||||||
* [**Using Cozystack to build public cloud**](https://cozystack.io/docs/use-cases/public-cloud/)
|
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||||
You can use Cozystack as backend for a public cloud
|
You can use Cozystack as a backend for a public cloud
|
||||||
|
|
||||||
* [**Using Cozystack to build private cloud**](https://cozystack.io/docs/use-cases/private-cloud/)
|
* [**Using Cozystack to build a private cloud**](https://cozystack.io/docs/guides/use-cases/private-cloud/)
|
||||||
You can use Cozystack as platform to build a private cloud powered by Infrastructure-as-Code approach
|
You can use Cozystack as a platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||||
|
|
||||||
* [**Using Cozystack as Kubernetes distribution**](https://cozystack.io/docs/use-cases/kubernetes-distribution/)
|
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||||
You can use Cozystack as Kubernetes distribution for Bare Metal
|
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||||
|
|
||||||
## Screenshot
|
## Screenshot
|
||||||
|
|
||||||
@@ -33,11 +34,11 @@ You can use Cozystack as Kubernetes distribution for Bare Metal
|
|||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
The documentation is located on official [cozystack.io](https://cozystack.io) website.
|
The documentation is located on the [cozystack.io](https://cozystack.io) website.
|
||||||
|
|
||||||
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
Read the [Getting Started](https://cozystack.io/docs/getting-started/) section for a quick start.
|
||||||
|
|
||||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/troubleshooting/), and work your way through the process that we've outlined.
|
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/operations/troubleshooting/) and work your way through the process that we've outlined.
|
||||||
|
|
||||||
## Versioning
|
## Versioning
|
||||||
|
|
||||||
@@ -50,15 +51,15 @@ A full list of the available releases is available in the GitHub repository's [R
|
|||||||
|
|
||||||
Contributions are highly appreciated and very welcomed!
|
Contributions are highly appreciated and very welcomed!
|
||||||
|
|
||||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
In case of bugs, please check if the issue has already been opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
If it isn't, you can open a new one. A detailed report will help us replicate it, assess it, and work on a fix.
|
||||||
|
|
||||||
You can express your intention in working on the fix on your own.
|
You can express your intention to on the fix on your own.
|
||||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||||
|
|
||||||
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||||
|
|
||||||
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@@ -178,6 +178,15 @@ func main() {
|
|||||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = (&controller.WorkloadReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
}).SetupWithManager(mgr); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
// +kubebuilder:scaffold:builder
|
// +kubebuilder:scaffold:builder
|
||||||
|
|
||||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||||
|
|||||||
@@ -626,7 +626,7 @@
|
|||||||
"datasource": {
|
"datasource": {
|
||||||
"uid": "${DS_PROMETHEUS}"
|
"uid": "${DS_PROMETHEUS}"
|
||||||
},
|
},
|
||||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"POD\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
|
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"legendFormat": "{{pod}}",
|
"legendFormat": "{{pod}}",
|
||||||
|
|||||||
@@ -450,7 +450,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -520,7 +520,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -590,7 +590,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -660,7 +660,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "__auto",
|
"legendFormat": "__auto",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -1128,7 +1128,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1143,7 +1143,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -1527,7 +1527,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1542,7 +1542,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -1909,7 +1909,7 @@
|
|||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"exemplar": false,
|
"exemplar": false,
|
||||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"range": false,
|
"range": false,
|
||||||
@@ -2037,7 +2037,7 @@
|
|||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"exemplar": false,
|
"exemplar": false,
|
||||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"range": false,
|
"range": false,
|
||||||
@@ -2160,7 +2160,7 @@
|
|||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"exemplar": false,
|
"exemplar": false,
|
||||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"range": false,
|
"range": false,
|
||||||
@@ -2288,7 +2288,7 @@
|
|||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"exemplar": false,
|
"exemplar": false,
|
||||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"range": false,
|
"range": false,
|
||||||
|
|||||||
@@ -684,7 +684,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -710,7 +710,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -723,7 +723,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -736,7 +736,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -762,7 +762,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -786,7 +786,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -798,7 +798,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -810,7 +810,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -848,7 +848,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -860,7 +860,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1315,7 +1315,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1488,7 +1488,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1502,7 +1502,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1642,7 +1642,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -1779,7 +1779,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2095,7 +2095,7 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Usage",
|
"legendFormat": "Usage",
|
||||||
@@ -2109,7 +2109,7 @@
|
|||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
@@ -2295,7 +2295,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "System",
|
"legendFormat": "System",
|
||||||
@@ -2306,7 +2306,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -2468,7 +2468,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -2653,7 +2653,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
@@ -2666,7 +2666,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Cache",
|
"legendFormat": "Cache",
|
||||||
@@ -2679,7 +2679,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Swap",
|
"legendFormat": "Swap",
|
||||||
@@ -2692,7 +2692,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
@@ -2705,7 +2705,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -2837,7 +2837,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -2974,7 +2974,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -3290,56 +3290,56 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Cache",
|
"legendFormat": "Cache",
|
||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Swap",
|
"legendFormat": "Swap",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"POD\", resource=\"memory\"}[$__rate_interval]))\n)",
|
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "F"
|
"refId": "F"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -3834,7 +3834,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -3972,7 +3972,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
|
|||||||
@@ -656,7 +656,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -680,7 +680,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -692,7 +692,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -704,7 +704,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -728,7 +728,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -740,7 +740,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -752,7 +752,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -764,7 +764,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -776,7 +776,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -814,7 +814,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -826,7 +826,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -877,7 +877,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1475,7 +1475,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -1646,7 +1646,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "System",
|
"legendFormat": "System",
|
||||||
@@ -1657,7 +1657,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -1798,7 +1798,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -1939,7 +1939,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2257,28 +2257,28 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Usage",
|
"legendFormat": "Usage",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
@@ -2458,7 +2458,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2470,7 +2470,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -2622,7 +2622,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -2799,14 +2799,14 @@
|
|||||||
"pluginVersion": "8.5.13",
|
"pluginVersion": "8.5.13",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2814,7 +2814,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2822,14 +2822,14 @@
|
|||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -2955,7 +2955,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -3091,7 +3091,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -3408,14 +3408,14 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3423,7 +3423,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3431,35 +3431,35 @@
|
|||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "F"
|
"refId": "F"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -3910,7 +3910,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -4049,7 +4049,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
|
|||||||
@@ -869,7 +869,7 @@
|
|||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -878,7 +878,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -895,7 +895,7 @@
|
|||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -903,7 +903,7 @@
|
|||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -919,7 +919,7 @@
|
|||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -935,7 +935,7 @@
|
|||||||
"refId": "I"
|
"refId": "I"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -943,7 +943,7 @@
|
|||||||
"refId": "J"
|
"refId": "J"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -968,7 +968,7 @@
|
|||||||
"refId": "M"
|
"refId": "M"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -977,7 +977,7 @@
|
|||||||
"refId": "N"
|
"refId": "N"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -1449,7 +1449,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -1616,7 +1616,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "System",
|
"legendFormat": "System",
|
||||||
@@ -1627,7 +1627,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -1764,7 +1764,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -1901,7 +1901,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -2210,7 +2210,7 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2218,21 +2218,21 @@
|
|||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
@@ -2407,7 +2407,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2419,7 +2419,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -2572,7 +2572,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -2754,14 +2754,14 @@
|
|||||||
"pluginVersion": "8.5.13",
|
"pluginVersion": "8.5.13",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2769,7 +2769,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2777,14 +2777,14 @@
|
|||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -2910,7 +2910,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -3046,7 +3046,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -3370,14 +3370,14 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3385,7 +3385,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3393,35 +3393,35 @@
|
|||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "F"
|
"refId": "F"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -3873,7 +3873,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -4008,7 +4008,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
|
|||||||
@@ -686,7 +686,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -759,7 +759,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -847,7 +847,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -860,7 +860,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -899,7 +899,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1503,7 +1503,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1669,7 +1669,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1681,7 +1681,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -1820,7 +1820,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2269,7 +2269,7 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Usage",
|
"legendFormat": "Usage",
|
||||||
@@ -2476,7 +2476,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2488,7 +2488,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -2639,7 +2639,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2816,7 +2816,7 @@
|
|||||||
"pluginVersion": "8.5.13",
|
"pluginVersion": "8.5.13",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2824,28 +2824,28 @@
|
|||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Cache",
|
"legendFormat": "Cache",
|
||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Swap",
|
"legendFormat": "Swap",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -2974,7 +2974,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3110,7 +3110,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3431,7 +3431,7 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3439,7 +3439,7 @@
|
|||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3447,28 +3447,28 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Swap",
|
"legendFormat": "Swap",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
@@ -3482,7 +3482,7 @@
|
|||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -3930,7 +3930,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ container }}",
|
"legendFormat": "{{ container }}",
|
||||||
@@ -4068,7 +4068,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ container }}",
|
"legendFormat": "{{ container }}",
|
||||||
|
|||||||
139
docs/release.md
Normal file
139
docs/release.md
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
# Release Workflow
|
||||||
|
|
||||||
|
This section explains how Cozystack builds and releases are made.
|
||||||
|
|
||||||
|
## Regular Releases
|
||||||
|
|
||||||
|
When making regular releases, we take a commit in `main` and decide to make it a release `x.y.0`.
|
||||||
|
In this explanation, we'll use version `v0.42.0` as an example:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "feature"
|
||||||
|
commit id: "feature 2"
|
||||||
|
commit id: "feature 3" tag: "v0.42.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
A regular release sequence starts in the following way:
|
||||||
|
|
||||||
|
1. Maintainer tags a commit in `main` with `v0.42.0` and pushes it to GitHub.
|
||||||
|
2. CI workflow triggers on tag push:
|
||||||
|
1. Creates a draft page for release `v0.42.0`, if it wasn't created before.
|
||||||
|
2. Takes code from tag `v0.42.0`, builds images, and pushes them to ghcr.io.
|
||||||
|
3. Makes a new commit `Prepare release v0.42.0` with updated digests, pushes it to the new branch `release-0.42.0`, and opens a PR to `main`.
|
||||||
|
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.0` and uploads them to the release draft page.
|
||||||
|
3. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "feature"
|
||||||
|
commit id: "feature 2"
|
||||||
|
commit id: "feature 3" tag: "v0.42.0"
|
||||||
|
branch release-0.42.0
|
||||||
|
checkout release-0.42.0
|
||||||
|
commit id: "Prepare release v0.42.0"
|
||||||
|
checkout main
|
||||||
|
merge release-0.42.0 id: "Pull Request"
|
||||||
|
```
|
||||||
|
|
||||||
|
When testing and editing are completed, the sequence goes on.
|
||||||
|
|
||||||
|
4. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.0`.
|
||||||
|
5. CI workflow triggers on merge:
|
||||||
|
1. Moves the tag `v0.42.0` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||||
|
2. Publishes the release page (`draft` → `latest`).
|
||||||
|
6. The maintainer can now announce the release to the community.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "feature"
|
||||||
|
commit id: "feature 2"
|
||||||
|
commit id: "feature 3"
|
||||||
|
branch release-0.42.0
|
||||||
|
checkout release-0.42.0
|
||||||
|
commit id: "Prepare release v0.42.0"
|
||||||
|
checkout main
|
||||||
|
merge release-0.42.0 id: "Release v0.42.0" tag: "v0.42.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Patch Releases
|
||||||
|
|
||||||
|
Making a patch release has a lot in common with a regular release, with a couple of differences:
|
||||||
|
|
||||||
|
* A release branch is used instead of `main`
|
||||||
|
* Patch commits are cherry-picked to the release branch.
|
||||||
|
* A pull request is opened against the release branch.
|
||||||
|
|
||||||
|
|
||||||
|
Let's assume that we've released `v0.42.0` and that development is ongoing.
|
||||||
|
We have introduced a couple of new features and some fixes to features that we have released
|
||||||
|
in `v0.42.0`.
|
||||||
|
|
||||||
|
Once problems were found and fixed, a patch release is due.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||||
|
checkout main
|
||||||
|
commit id: "feature 4"
|
||||||
|
commit id: "patch 1"
|
||||||
|
commit id: "feature 5"
|
||||||
|
commit id: "patch 2"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
1. The maintainer creates a release branch, `release-0.42,` and cherry-picks patch commits from `main` to `release-0.42`.
|
||||||
|
These must be only patches to features that were present in version `v0.42.0`.
|
||||||
|
|
||||||
|
Cherry-picking can be done as soon as each patch is merged into `main`,
|
||||||
|
or directly before the release.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||||
|
branch release-0.42
|
||||||
|
checkout main
|
||||||
|
commit id: "feature 4"
|
||||||
|
commit id: "patch 1"
|
||||||
|
commit id: "feature 5"
|
||||||
|
commit id: "patch 2"
|
||||||
|
checkout release-0.42
|
||||||
|
cherry-pick id: "patch 1"
|
||||||
|
cherry-pick id: "patch 2"
|
||||||
|
```
|
||||||
|
|
||||||
|
When all relevant patch commits are cherry-picked, the branch is ready for release.
|
||||||
|
|
||||||
|
2. The maintainer tags the `HEAD` commit of branch `release-0.42` as `v0.42.1` and then pushes it to GitHub.
|
||||||
|
3. CI workflow triggers on tag push:
|
||||||
|
1. Creates a draft page for release `v0.42.1`, if it wasn't created before.
|
||||||
|
2. Takes code from tag `v0.42.1`, builds images, and pushes them to ghcr.io.
|
||||||
|
3. Makes a new commit `Prepare release v0.42.1` with updated digests, pushes it to the new branch `release-0.42.1`, and opens a PR to `release-0.42`.
|
||||||
|
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.1` and uploads them to the release draft page.
|
||||||
|
4. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||||
|
branch release-0.42
|
||||||
|
checkout main
|
||||||
|
commit id: "feature 4"
|
||||||
|
commit id: "patch 1"
|
||||||
|
commit id: "feature 5"
|
||||||
|
commit id: "patch 2"
|
||||||
|
checkout release-0.42
|
||||||
|
cherry-pick id: "patch 1"
|
||||||
|
cherry-pick id: "patch 2" tag: "v0.42.1"
|
||||||
|
branch release-0.42.1
|
||||||
|
commit id: "Prepare release v0.42.1"
|
||||||
|
checkout release-0.42
|
||||||
|
merge release-0.42.1 id: "Pull request"
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, when release is confirmed, the release sequence goes on.
|
||||||
|
|
||||||
|
5. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.1`.
|
||||||
|
6. CI workflow triggers on merge:
|
||||||
|
1. Moves the tag `v0.42.1` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||||
|
2. Publishes the release page (`draft` → `latest`).
|
||||||
|
7. The maintainer can now announce the release to the community.
|
||||||
26
hack/e2e.sh
26
hack/e2e.sh
@@ -60,7 +60,7 @@ done
|
|||||||
|
|
||||||
# Prepare system drive
|
# Prepare system drive
|
||||||
if [ ! -f nocloud-amd64.raw ]; then
|
if [ ! -f nocloud-amd64.raw ]; then
|
||||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||||
rm -f nocloud-amd64.raw
|
rm -f nocloud-amd64.raw
|
||||||
xz --decompress nocloud-amd64.raw.xz
|
xz --decompress nocloud-amd64.raw.xz
|
||||||
fi
|
fi
|
||||||
@@ -84,7 +84,7 @@ done
|
|||||||
|
|
||||||
# Start VMs
|
# Start VMs
|
||||||
for i in 1 2 3; do
|
for i in 1 2 3; do
|
||||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
|
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||||
@@ -113,6 +113,11 @@ machine:
|
|||||||
- usermode_helper=disabled
|
- usermode_helper=disabled
|
||||||
- name: zfs
|
- name: zfs
|
||||||
- name: spl
|
- name: spl
|
||||||
|
registries:
|
||||||
|
mirrors:
|
||||||
|
docker.io:
|
||||||
|
endpoints:
|
||||||
|
- https://mirror.gcr.io
|
||||||
files:
|
files:
|
||||||
- content: |
|
- content: |
|
||||||
[plugins]
|
[plugins]
|
||||||
@@ -226,11 +231,11 @@ timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
|||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
|
|
||||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=20m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||||
|
|
||||||
# Wait for Cluster-API providers
|
# Wait for Cluster-API providers
|
||||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||||
|
|
||||||
# Wait for linstor controller
|
# Wait for linstor controller
|
||||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||||
@@ -313,7 +318,12 @@ kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"s
|
|||||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||||
|
|
||||||
# Wait for HelmReleases be installed
|
# Wait for HelmReleases be installed
|
||||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||||
|
|
||||||
|
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||||
|
flux reconcile hr monitoring -n tenant-root --force
|
||||||
|
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||||
|
fi
|
||||||
|
|
||||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||||
"dashboard": true
|
"dashboard": true
|
||||||
@@ -328,7 +338,7 @@ kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root
|
|||||||
|
|
||||||
# Wait for Victoria metrics
|
# Wait for Victoria metrics
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||||
|
|
||||||
# Wait for grafana
|
# Wait for grafana
|
||||||
@@ -347,5 +357,5 @@ kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
|||||||
"oidc-enabled": "true"
|
"oidc-enabled": "true"
|
||||||
}}'
|
}}'
|
||||||
|
|
||||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||||
|
|||||||
@@ -19,21 +19,19 @@ fi
|
|||||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||||
|
|
||||||
# search accross all tags sorted by version
|
# search accross all tags sorted by version
|
||||||
search_commits=$(git ls-remote --tags origin | grep 'refs/tags/v' | sort -k2,2 -rV | awk '{print $1}')
|
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||||
# add latest main commit to search
|
|
||||||
search_commits="${search_commits} $(git rev-parse "origin/main")"
|
|
||||||
|
|
||||||
resolved_miss_map=$(
|
resolved_miss_map=$(
|
||||||
echo "$miss_map" | while read -r chart version commit; do
|
echo "$miss_map" | while read -r chart version commit; do
|
||||||
# if version is found in HEAD, it's HEAD
|
# if version is found in HEAD, it's HEAD
|
||||||
if grep -q "^version: $version$" ./${chart}/Chart.yaml; then
|
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||||
echo "$chart $version HEAD"
|
echo "$chart $version HEAD"
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if commit is not HEAD, check if it's valid
|
# if commit is not HEAD, check if it's valid
|
||||||
if [ $commit != "HEAD" ]; then
|
if [ "$commit" != "HEAD" ]; then
|
||||||
if ! git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | grep -q "^version: $version$"; then
|
if [ "$(git show "${commit}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" != "${version}" ]; then
|
||||||
echo "Commit $commit for $chart $version is not valid" >&2
|
echo "Commit $commit for $chart $version is not valid" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -46,15 +44,15 @@ resolved_miss_map=$(
|
|||||||
# if commit is HEAD, but version is not found in HEAD, check all tags
|
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||||
found_tag=""
|
found_tag=""
|
||||||
for tag in $search_commits; do
|
for tag in $search_commits; do
|
||||||
if git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | grep -q "^version: $version$"; then
|
if [ "$(git show "${tag}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" = "${version}" ]; then
|
||||||
found_tag=$(git rev-parse --short "${tag}")
|
found_tag=$(git rev-parse --short "${tag}")
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "$found_tag" ]; then
|
if [ -z "$found_tag" ]; then
|
||||||
echo "Can't find $chart $version in any version tag or in the latest main commit" >&2
|
echo "Can't find $chart $version in any version tag, removing it" >&2
|
||||||
exit 1
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$chart $version $found_tag"
|
echo "$chart $version $found_tag"
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
set -xe
|
set -xe
|
||||||
|
|
||||||
version=$(git describe --tags)
|
version=${VERSION:-$(git describe --tags)}
|
||||||
gh release upload $version _out/assets/cozystack-installer.yaml
|
|
||||||
gh release upload $version _out/assets/metal-amd64.iso
|
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
|
||||||
gh release upload $version _out/assets/metal-amd64.raw.xz
|
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
||||||
gh release upload $version _out/assets/nocloud-amd64.raw.xz
|
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
|
||||||
|
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
|
||||||
|
gh release upload --clobber $version _out/assets/kernel-amd64
|
||||||
|
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
|
||||||
|
|||||||
87
internal/controller/workload_controller.go
Normal file
87
internal/controller/workload_controller.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
|
||||||
|
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||||
|
type WorkloadReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
w := &cozyv1alpha1.Workload{}
|
||||||
|
err := r.Get(ctx, req.NamespacedName, w)
|
||||||
|
if err != nil {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
logger.Error(err, "Unable to fetch Workload")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// it's being deleted, nothing to handle
|
||||||
|
if w.DeletionTimestamp != nil {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t := getMonitoredObject(w)
|
||||||
|
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||||
|
|
||||||
|
// found object, nothing to do
|
||||||
|
if err == nil {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// error getting object but not 404 -- requeue
|
||||||
|
if !apierrors.IsNotFound(err) {
|
||||||
|
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = r.Delete(ctx, w)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "failed to delete workload")
|
||||||
|
}
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||||
|
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
// Watch WorkloadMonitor objects
|
||||||
|
For(&cozyv1alpha1.Workload{}).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||||
|
if strings.HasPrefix(w.Name, "pvc-") {
|
||||||
|
obj := &corev1.PersistentVolumeClaim{}
|
||||||
|
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||||
|
obj.Namespace = w.Namespace
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(w.Name, "svc-") {
|
||||||
|
obj := &corev1.Service{}
|
||||||
|
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||||
|
obj.Namespace = w.Namespace
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
obj := &corev1.Pod{}
|
||||||
|
obj.Name = w.Name
|
||||||
|
obj.Namespace = w.Namespace
|
||||||
|
return obj
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package controller
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@@ -33,6 +34,17 @@ type WorkloadMonitorReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch
|
||||||
|
|
||||||
|
// isServiceReady checks if the service has an external IP bound
|
||||||
|
func (r *WorkloadMonitorReconciler) isServiceReady(svc *corev1.Service) bool {
|
||||||
|
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPVCReady checks if the PVC is bound
|
||||||
|
func (r *WorkloadMonitorReconciler) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool {
|
||||||
|
return pvc.Status.Phase == corev1.ClaimBound
|
||||||
|
}
|
||||||
|
|
||||||
// isPodReady checks if the Pod is in the Ready condition.
|
// isPodReady checks if the Pod is in the Ready condition.
|
||||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||||
@@ -88,6 +100,110 @@ func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
|||||||
obj.SetOwnerReferences(owners)
|
obj.SetOwnerReferences(owners)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reconcileServiceForMonitor creates or updates a Workload object for the given Service and WorkloadMonitor.
|
||||||
|
func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||||
|
ctx context.Context,
|
||||||
|
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||||
|
svc corev1.Service,
|
||||||
|
) error {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
workload := &cozyv1alpha1.Workload{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("svc-%s", svc.Name),
|
||||||
|
Namespace: svc.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := make(map[string]resource.Quantity)
|
||||||
|
|
||||||
|
quantity := resource.MustParse("0")
|
||||||
|
|
||||||
|
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||||
|
if ing.IP != "" {
|
||||||
|
quantity.Add(resource.MustParse("1"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var resourceLabel string
|
||||||
|
if svc.Annotations != nil {
|
||||||
|
var ok bool
|
||||||
|
resourceLabel, ok = svc.Annotations["metallb.universe.tf/ip-allocated-from-pool"]
|
||||||
|
if !ok {
|
||||||
|
resourceLabel = "default"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resourceLabel = fmt.Sprintf("%s.ipaddresspool.metallb.io/requests.ipaddresses", resourceLabel)
|
||||||
|
resources[resourceLabel] = quantity
|
||||||
|
|
||||||
|
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||||
|
// Update owner references with the new monitor
|
||||||
|
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||||
|
|
||||||
|
workload.Labels = svc.Labels
|
||||||
|
|
||||||
|
// Fill Workload status fields:
|
||||||
|
workload.Status.Kind = monitor.Spec.Kind
|
||||||
|
workload.Status.Type = monitor.Spec.Type
|
||||||
|
workload.Status.Resources = resources
|
||||||
|
workload.Status.Operational = r.isServiceReady(&svc)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reconcilePVCForMonitor creates or updates a Workload object for the given PVC and WorkloadMonitor.
|
||||||
|
func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||||
|
ctx context.Context,
|
||||||
|
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||||
|
pvc corev1.PersistentVolumeClaim,
|
||||||
|
) error {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
workload := &cozyv1alpha1.Workload{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("pvc-%s", pvc.Name),
|
||||||
|
Namespace: pvc.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := make(map[string]resource.Quantity)
|
||||||
|
|
||||||
|
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||||
|
storageClass := "default"
|
||||||
|
if pvc.Spec.StorageClassName != nil || *pvc.Spec.StorageClassName == "" {
|
||||||
|
storageClass = *pvc.Spec.StorageClassName
|
||||||
|
}
|
||||||
|
resourceLabel := fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.%s", storageClass, resourceName.String())
|
||||||
|
resources[resourceLabel] = resourceQuantity
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||||
|
// Update owner references with the new monitor
|
||||||
|
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||||
|
|
||||||
|
workload.Labels = pvc.Labels
|
||||||
|
|
||||||
|
// Fill Workload status fields:
|
||||||
|
workload.Status.Kind = monitor.Spec.Kind
|
||||||
|
workload.Status.Type = monitor.Spec.Type
|
||||||
|
workload.Status.Resources = resources
|
||||||
|
workload.Status.Operational = r.isPVCReady(&pvc)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
@@ -205,6 +321,45 @@ func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pvcList := &corev1.PersistentVolumeClaimList{}
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
pvcList,
|
||||||
|
client.InNamespace(monitor.Namespace),
|
||||||
|
client.MatchingLabels(monitor.Spec.Selector),
|
||||||
|
); err != nil {
|
||||||
|
logger.Error(err, "Unable to list PVCs for WorkloadMonitor", "monitor", monitor.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pvc := range pvcList.Items {
|
||||||
|
if err := r.reconcilePVCForMonitor(ctx, monitor, pvc); err != nil {
|
||||||
|
logger.Error(err, "Failed to reconcile Workload for PVC", "PVC", pvc.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
svcList := &corev1.ServiceList{}
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
svcList,
|
||||||
|
client.InNamespace(monitor.Namespace),
|
||||||
|
client.MatchingLabels(monitor.Spec.Selector),
|
||||||
|
); err != nil {
|
||||||
|
logger.Error(err, "Unable to list Services for WorkloadMonitor", "monitor", monitor.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, svc := range svcList.Items {
|
||||||
|
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := r.reconcileServiceForMonitor(ctx, monitor, svc); err != nil {
|
||||||
|
logger.Error(err, "Failed to reconcile Workload for Service", "Service", svc.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Update WorkloadMonitor status based on observed pods
|
// Update WorkloadMonitor status based on observed pods
|
||||||
monitor.Status.ObservedReplicas = observedReplicas
|
monitor.Status.ObservedReplicas = observedReplicas
|
||||||
monitor.Status.AvailableReplicas = availableReplicas
|
monitor.Status.AvailableReplicas = availableReplicas
|
||||||
@@ -233,41 +388,51 @@ func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||||
Watches(
|
Watches(
|
||||||
&corev1.Pod{},
|
&corev1.Pod{},
|
||||||
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.Pod{}, r.Client)),
|
||||||
pod, ok := obj.(*corev1.Pod)
|
).
|
||||||
if !ok {
|
// Watch PVCs as well
|
||||||
return nil
|
Watches(
|
||||||
}
|
&corev1.PersistentVolumeClaim{},
|
||||||
|
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.PersistentVolumeClaim{}, r.Client)),
|
||||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
|
||||||
// List all WorkloadMonitors in the same namespace
|
|
||||||
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match each monitor's selector with the Pod's labels
|
|
||||||
var requests []reconcile.Request
|
|
||||||
for _, m := range monitorList.Items {
|
|
||||||
matches := true
|
|
||||||
for k, v := range m.Spec.Selector {
|
|
||||||
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
|
||||||
matches = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if matches {
|
|
||||||
requests = append(requests, reconcile.Request{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Name: m.Name,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return requests
|
|
||||||
}),
|
|
||||||
).
|
).
|
||||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||||
Owns(&cozyv1alpha1.Workload{}).
|
Owns(&cozyv1alpha1.Workload{}).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||||
|
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||||
|
concrete, ok := obj.(T)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||||
|
// List all WorkloadMonitors in the same namespace
|
||||||
|
if err := c.List(ctx, &monitorList, client.InNamespace(concrete.GetNamespace())); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := concrete.GetLabels()
|
||||||
|
// Match each monitor's selector with the Pod's labels
|
||||||
|
var requests []reconcile.Request
|
||||||
|
for _, m := range monitorList.Items {
|
||||||
|
matches := true
|
||||||
|
for k, v := range m.Spec.Selector {
|
||||||
|
if labelVal, exists := labels[k]; !exists || labelVal != v {
|
||||||
|
matches = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if matches {
|
||||||
|
requests = append(requests, reconcile.Request{
|
||||||
|
NamespacedName: types.NamespacedName{
|
||||||
|
Namespace: m.Namespace,
|
||||||
|
Name: m.Name,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return requests
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.6.2@sha256:67dd53efa86b704fc5cb876aca055fef294b31ab67899b683a4821ea12582ea7
|
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/postgres-backup:0.9.0@sha256:2b6ba87f5688a439bd2ac12835a5ab9e601feb15c0c44ed0d9ca48cec7c52521
|
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/nginx-cache:0.3.1@sha256:2b82eae28239ca0f9968602c69bbb752cd2a5818e64934ccd06cb91d95d019c7
|
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.17.0
|
version: 0.19.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
UBUNTU_CONTAINER_DISK_TAG = v1.30.1
|
KUBERNETES_VERSION = v1.32
|
||||||
KUBERNETES_PKG_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
KUBERNETES_PKG_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||||
|
|
||||||
include ../../../scripts/common-envs.mk
|
include ../../../scripts/common-envs.mk
|
||||||
@@ -6,21 +6,26 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
|
yq -o json -i '.properties.controlPlane.properties.apiServer.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||||
|
yq -o json -i '.properties.controlPlane.properties.controllerManager.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||||
|
yq -o json -i '.properties.controlPlane.properties.scheduler.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||||
|
yq -o json -i '.properties.controlPlane.properties.konnectivity.properties.server.properties.resourcesPreset.enum = ["none","nano","micro","small","medium","large","xlarge","2xlarge"]' values.schema.json
|
||||||
|
|
||||||
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
|
image: image-ubuntu-container-disk image-kubevirt-cloud-provider image-kubevirt-csi-driver image-cluster-autoscaler
|
||||||
|
|
||||||
image-ubuntu-container-disk:
|
image-ubuntu-container-disk:
|
||||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
|
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
|
||||||
--provenance false \
|
--provenance false \
|
||||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
|
--build-arg KUBERNETES_VERSION=${KUBERNETES_VERSION} \
|
||||||
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
|
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)) \
|
||||||
|
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION)-$(TAG)) \
|
||||||
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
|
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
|
||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
--metadata-file images/ubuntu-container-disk.json \
|
--metadata-file images/ubuntu-container-disk.json \
|
||||||
--push=$(PUSH) \
|
--push=$(PUSH) \
|
||||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||||
--load=$(LOAD)
|
--load=$(LOAD)
|
||||||
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(KUBERNETES_VERSION))@$$(yq e '."containerimage.digest"' images/ubuntu-container-disk.json -o json -r)" \
|
||||||
> images/ubuntu-container-disk.tag
|
> images/ubuntu-container-disk.tag
|
||||||
rm -f images/ubuntu-container-disk.json
|
rm -f images/ubuntu-container-disk.json
|
||||||
|
|
||||||
|
|||||||
@@ -27,20 +27,46 @@ How to access to deployed cluster:
|
|||||||
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
|
||||||
```
|
```
|
||||||
|
|
||||||
# Series
|
## Parameters
|
||||||
|
|
||||||
<!-- source: https://github.com/kubevirt/common-instancetypes/blob/main/README.md -->
|
### Common parameters
|
||||||
|
|
||||||
. | U | O | CX | M | RT
|
| Name | Description | Value |
|
||||||
----------------------------|-----|-----|------|-----|------
|
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
||||||
*Has GPUs* | | | | |
|
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
|
||||||
*Hugepages* | | | ✓ | ✓ | ✓
|
| `controlPlane.replicas` | Number of replicas for Kubernetes control-plane components | `2` |
|
||||||
*Overcommitted Memory* | | ✓ | | |
|
| `storageClass` | StorageClass used to store user data | `replicated` |
|
||||||
*Dedicated CPU* | | | ✓ | | ✓
|
| `nodeGroups` | nodeGroups configuration | `{}` |
|
||||||
*Burstable CPU performance* | ✓ | ✓ | | ✓ |
|
|
||||||
*Isolated emulator threads* | | | ✓ | | ✓
|
### Cluster Addons
|
||||||
*vNUMA* | | | ✓ | | ✓
|
|
||||||
*vCPU-To-Memory Ratio* | 1:4 | 1:4 | 1:2 | 1:8 | 1:4
|
| Name | Description | Value |
|
||||||
|
| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
|
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
|
||||||
|
| `addons.certManager.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
|
||||||
|
| `addons.ingressNginx.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
|
||||||
|
| `addons.gpuOperator.enabled` | Enables the gpu-operator | `false` |
|
||||||
|
| `addons.gpuOperator.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |
|
||||||
|
| `addons.fluxcd.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
| `addons.monitoringAgents.enabled` | Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage | `false` |
|
||||||
|
| `addons.monitoringAgents.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
| `addons.verticalPodAutoscaler.valuesOverride` | Custom values to override | `{}` |
|
||||||
|
|
||||||
|
### Kubernetes control plane configuration
|
||||||
|
|
||||||
|
| Name | Description | Value |
|
||||||
|
| -------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
|
| `controlPlane.apiServer.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` |
|
||||||
|
| `controlPlane.apiServer.resources` | Resources | `{}` |
|
||||||
|
| `controlPlane.controllerManager.resources` | Resources | `{}` |
|
||||||
|
| `controlPlane.controllerManager.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||||
|
| `controlPlane.scheduler.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||||
|
| `controlPlane.scheduler.resources` | Resources | `{}` |
|
||||||
|
| `controlPlane.konnectivity.server.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `micro` |
|
||||||
|
| `controlPlane.konnectivity.server.resources` | Resources | `{}` |
|
||||||
|
|
||||||
|
|
||||||
## U Series
|
## U Series
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.15.2@sha256:967e51702102d0dbd97f9847de4159d62681b31eb606322d2c29755393c2236e
|
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.18.0@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:latest@sha256:47ad85a2bb2b11818df85e80cbc6e07021e97e429d5bb020ce8db002b37a77f1
|
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.18.0@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.15.2@sha256:cb4ab74099662f73e058f7c7495fb403488622c3425c06ad23b687bfa8bc805b
|
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.18.0@sha256:6f9091c3e7e4951c5e43fdafd505705fcc9f1ead290ee3ae42e97e9ec2b87b20
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:bc08ea0ced2cb7dd98b26d72a9462fc0a3863adb908a5effbfcdf7227656ea65
|
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
# TODO: Here we use ubuntu:22.04, as guestfish has some network issues running in ubuntu:24.04
|
||||||
FROM ubuntu:22.04 as guestfish
|
FROM ubuntu:22.04 as guestfish
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
@@ -5,6 +6,7 @@ RUN apt-get update \
|
|||||||
&& apt-get -y install \
|
&& apt-get -y install \
|
||||||
libguestfs-tools \
|
libguestfs-tools \
|
||||||
linux-image-generic \
|
linux-image-generic \
|
||||||
|
wget \
|
||||||
make \
|
make \
|
||||||
bash-completion \
|
bash-completion \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
@@ -13,7 +15,10 @@ WORKDIR /build
|
|||||||
|
|
||||||
FROM guestfish as builder
|
FROM guestfish as builder
|
||||||
|
|
||||||
RUN wget -O image.img https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
|
# noble is a code name for the Ubuntu 24.04 LTS release
|
||||||
|
RUN wget -O image.img https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||||
|
|
||||||
|
ARG KUBERNETES_VERSION
|
||||||
|
|
||||||
RUN qemu-img resize image.img 5G \
|
RUN qemu-img resize image.img 5G \
|
||||||
&& eval "$(guestfish --listen --network)" \
|
&& eval "$(guestfish --listen --network)" \
|
||||||
@@ -26,8 +31,8 @@ RUN qemu-img resize image.img 5G \
|
|||||||
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
|
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
|
||||||
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
|
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
|
||||||
# kubernetes repo
|
# kubernetes repo
|
||||||
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
|
||||||
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/${KUBERNETES_VERSION}/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
|
||||||
# install containerd
|
# install containerd
|
||||||
&& guestfish --remote command "apt-get update -y" \
|
&& guestfish --remote command "apt-get update -y" \
|
||||||
&& guestfish --remote command "apt-get install -y containerd.io" \
|
&& guestfish --remote command "apt-get install -y containerd.io" \
|
||||||
|
|||||||
@@ -39,6 +39,13 @@ spec:
|
|||||||
sockets: 1
|
sockets: 1
|
||||||
{{- end }}
|
{{- end }}
|
||||||
devices:
|
devices:
|
||||||
|
{{- if .group.gpus }}
|
||||||
|
gpus:
|
||||||
|
{{- range $i, $gpu := .group.gpus }}
|
||||||
|
- name: gpu{{ add $i 1 }}
|
||||||
|
deviceName: {{ $gpu.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
disks:
|
disks:
|
||||||
- name: system
|
- name: system
|
||||||
disk:
|
disk:
|
||||||
@@ -103,22 +110,22 @@ metadata:
|
|||||||
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
|
||||||
spec:
|
spec:
|
||||||
apiServer:
|
apiServer:
|
||||||
{{- if .Values.kamajiControlPlane.apiServer.resources }}
|
{{- if .Values.controlPlane.apiServer.resources }}
|
||||||
resources: {{- toYaml .Values.kamajiControlPlane.apiServer.resources | nindent 6 }}
|
resources: {{- toYaml .Values.controlPlane.apiServer.resources | nindent 6 }}
|
||||||
{{- else if ne .Values.kamajiControlPlane.apiServer.resourcesPreset "none" }}
|
{{- else if ne .Values.controlPlane.apiServer.resourcesPreset "none" }}
|
||||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.apiServer.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
controllerManager:
|
controllerManager:
|
||||||
{{- if .Values.kamajiControlPlane.controllerManager.resources }}
|
{{- if .Values.controlPlane.controllerManager.resources }}
|
||||||
resources: {{- toYaml .Values.kamajiControlPlane.controllerManager.resources | nindent 6 }}
|
resources: {{- toYaml .Values.controlPlane.controllerManager.resources | nindent 6 }}
|
||||||
{{- else if ne .Values.kamajiControlPlane.controllerManager.resourcesPreset "none" }}
|
{{- else if ne .Values.controlPlane.controllerManager.resourcesPreset "none" }}
|
||||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.controllerManager.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
scheduler:
|
scheduler:
|
||||||
{{- if .Values.kamajiControlPlane.scheduler.resources }}
|
{{- if .Values.controlPlane.scheduler.resources }}
|
||||||
resources: {{- toYaml .Values.kamajiControlPlane.scheduler.resources | nindent 6 }}
|
resources: {{- toYaml .Values.controlPlane.scheduler.resources | nindent 6 }}
|
||||||
{{- else if ne .Values.kamajiControlPlane.scheduler.resourcesPreset "none" }}
|
{{- else if ne .Values.controlPlane.scheduler.resourcesPreset "none" }}
|
||||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.scheduler.resourcesPreset "Release" .Release) | nindent 6 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
dataStoreName: "{{ $etcd }}"
|
dataStoreName: "{{ $etcd }}"
|
||||||
addons:
|
addons:
|
||||||
@@ -128,10 +135,10 @@ spec:
|
|||||||
konnectivity:
|
konnectivity:
|
||||||
server:
|
server:
|
||||||
port: 8132
|
port: 8132
|
||||||
{{- if .Values.kamajiControlPlane.addons.konnectivity.server.resources }}
|
{{- if .Values.controlPlane.konnectivity.server.resources }}
|
||||||
resources: {{- toYaml .Values.kamajiControlPlane.addons.konnectivity.server.resources | nindent 10 }}
|
resources: {{- toYaml .Values.controlPlane.konnectivity.server.resources | nindent 10 }}
|
||||||
{{- else if ne .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "none" }}
|
{{- else if ne .Values.controlPlane.konnectivity.server.resourcesPreset "none" }}
|
||||||
resources: {{- include "resources.preset" (dict "type" .Values.kamajiControlPlane.addons.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
resources: {{- include "resources.preset" (dict "type" .Values.controlPlane.konnectivity.server.resourcesPreset "Release" .Release) | nindent 10 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
kubelet:
|
kubelet:
|
||||||
cgroupfs: systemd
|
cgroupfs: systemd
|
||||||
@@ -276,7 +283,7 @@ spec:
|
|||||||
kind: KubevirtMachineTemplate
|
kind: KubevirtMachineTemplate
|
||||||
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
|
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
|
||||||
namespace: {{ $.Release.Namespace }}
|
namespace: {{ $.Release.Namespace }}
|
||||||
version: v1.30.1
|
version: v1.32.3
|
||||||
---
|
---
|
||||||
apiVersion: cluster.x-k8s.io/v1beta1
|
apiVersion: cluster.x-k8s.io/v1beta1
|
||||||
kind: MachineHealthCheck
|
kind: MachineHealthCheck
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-cert-manager-crds
|
name: {{ .Release.Name }}-cert-manager-crds
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: cert-manager-crds
|
releaseName: cert-manager-crds
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-cert-manager
|
name: {{ .Release.Name }}-cert-manager
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: cert-manager
|
releaseName: cert-manager
|
||||||
@@ -30,11 +30,9 @@ spec:
|
|||||||
upgrade:
|
upgrade:
|
||||||
remediation:
|
remediation:
|
||||||
retries: -1
|
retries: -1
|
||||||
{{- if .Values.addons.certManager.valuesOverride }}
|
{{- with .Values.addons.certManager.valuesOverride }}
|
||||||
valuesFrom:
|
values:
|
||||||
- kind: Secret
|
{{- toYaml . | nindent 4 }}
|
||||||
name: {{ .Release.Name }}-cert-manager-values-override
|
|
||||||
valuesKey: values
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
dependsOn:
|
dependsOn:
|
||||||
@@ -47,13 +45,3 @@ spec:
|
|||||||
- name: {{ .Release.Name }}-cert-manager-crds
|
- name: {{ .Release.Name }}-cert-manager-crds
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.addons.certManager.valuesOverride }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-cert-manager-values-override
|
|
||||||
stringData:
|
|
||||||
values: |
|
|
||||||
{{- toYaml .Values.addons.certManager.valuesOverride | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-cilium
|
name: {{ .Release.Name }}-cilium
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: cilium
|
releaseName: cilium
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-csi
|
name: {{ .Release.Name }}-csi
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: csi
|
releaseName: csi
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ spec:
|
|||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
containers:
|
containers:
|
||||||
- name: kubectl
|
- name: kubectl
|
||||||
image: docker.io/clastix/kubectl:v1.30.1
|
image: docker.io/clastix/kubectl:v1.32
|
||||||
command:
|
command:
|
||||||
- /bin/sh
|
- /bin/sh
|
||||||
- -c
|
- -c
|
||||||
@@ -32,9 +32,13 @@ spec:
|
|||||||
{{ .Release.Name }}-cilium
|
{{ .Release.Name }}-cilium
|
||||||
{{ .Release.Name }}-csi
|
{{ .Release.Name }}-csi
|
||||||
{{ .Release.Name }}-cert-manager
|
{{ .Release.Name }}-cert-manager
|
||||||
|
{{ .Release.Name }}-cert-manager-crds
|
||||||
|
{{ .Release.Name }}-vertical-pod-autoscaler
|
||||||
|
{{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||||
{{ .Release.Name }}-ingress-nginx
|
{{ .Release.Name }}-ingress-nginx
|
||||||
{{ .Release.Name }}-fluxcd-operator
|
{{ .Release.Name }}-fluxcd-operator
|
||||||
{{ .Release.Name }}-fluxcd
|
{{ .Release.Name }}-fluxcd
|
||||||
|
{{ .Release.Name }}-gpu-operator
|
||||||
-p '{"spec": {"suspend": true}}'
|
-p '{"spec": {"suspend": true}}'
|
||||||
--type=merge --field-manager=flux-client-side-apply || true
|
--type=merge --field-manager=flux-client-side-apply || true
|
||||||
---
|
---
|
||||||
@@ -67,9 +71,13 @@ rules:
|
|||||||
- {{ .Release.Name }}-cilium
|
- {{ .Release.Name }}-cilium
|
||||||
- {{ .Release.Name }}-csi
|
- {{ .Release.Name }}-csi
|
||||||
- {{ .Release.Name }}-cert-manager
|
- {{ .Release.Name }}-cert-manager
|
||||||
|
- {{ .Release.Name }}-cert-manager-crds
|
||||||
|
- {{ .Release.Name }}-vertical-pod-autoscaler
|
||||||
|
- {{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||||
- {{ .Release.Name }}-ingress-nginx
|
- {{ .Release.Name }}-ingress-nginx
|
||||||
- {{ .Release.Name }}-fluxcd-operator
|
- {{ .Release.Name }}-fluxcd-operator
|
||||||
- {{ .Release.Name }}-fluxcd
|
- {{ .Release.Name }}-fluxcd
|
||||||
|
- {{ .Release.Name }}-gpu-operator
|
||||||
---
|
---
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-fluxcd-operator
|
name: {{ .Release.Name }}-fluxcd-operator
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: fluxcd-operator
|
releaseName: fluxcd-operator
|
||||||
@@ -49,7 +49,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-fluxcd
|
name: {{ .Release.Name }}-fluxcd
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: fluxcd
|
releaseName: fluxcd
|
||||||
@@ -73,11 +73,9 @@ spec:
|
|||||||
upgrade:
|
upgrade:
|
||||||
remediation:
|
remediation:
|
||||||
retries: -1
|
retries: -1
|
||||||
{{- if .Values.addons.fluxcd.valuesOverride }}
|
{{- with .Values.addons.fluxcd.valuesOverride }}
|
||||||
valuesFrom:
|
values:
|
||||||
- kind: Secret
|
{{- toYaml . | nindent 4 }}
|
||||||
name: {{ .Release.Name }}-fluxcd-values-override
|
|
||||||
valuesKey: values
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
dependsOn:
|
dependsOn:
|
||||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||||
@@ -89,14 +87,3 @@ spec:
|
|||||||
- name: {{ .Release.Name }}-fluxcd-operator
|
- name: {{ .Release.Name }}-fluxcd-operator
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- if .Values.addons.fluxcd.valuesOverride }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-fluxcd-values-override
|
|
||||||
stringData:
|
|
||||||
values: |
|
|
||||||
{{- toYaml .Values.addons.fluxcd.valuesOverride | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
|
|||||||
@@ -0,0 +1,45 @@
|
|||||||
|
{{- if .Values.addons.gpuOperator.enabled }}
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-gpu-operator
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
|
spec:
|
||||||
|
interval: 5m
|
||||||
|
releaseName: gpu-operator
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-gpu-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
kubeConfig:
|
||||||
|
secretRef:
|
||||||
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
|
targetNamespace: cozy-gpu-operator
|
||||||
|
storageNamespace: cozy-gpu-operator
|
||||||
|
install:
|
||||||
|
createNamespace: true
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
{{- with .Values.addons.gpuOperator.valuesOverride }}
|
||||||
|
values:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
dependsOn:
|
||||||
|
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||||
|
- name: {{ .Release.Name }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
|
- name: {{ .Release.Name }}-cilium
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -1,3 +1,15 @@
|
|||||||
|
{{- define "cozystack.defaultIngressValues" -}}
|
||||||
|
ingress-nginx:
|
||||||
|
fullnameOverride: ingress-nginx
|
||||||
|
controller:
|
||||||
|
kind: DaemonSet
|
||||||
|
hostNetwork: true
|
||||||
|
service:
|
||||||
|
enabled: false
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/ingress-nginx: ""
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
{{- if .Values.addons.ingressNginx.enabled }}
|
{{- if .Values.addons.ingressNginx.enabled }}
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
@@ -5,7 +17,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-ingress-nginx
|
name: {{ .Release.Name }}-ingress-nginx
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: ingress-nginx
|
releaseName: ingress-nginx
|
||||||
@@ -31,21 +43,7 @@ spec:
|
|||||||
remediation:
|
remediation:
|
||||||
retries: -1
|
retries: -1
|
||||||
values:
|
values:
|
||||||
ingress-nginx:
|
{{- toYaml (deepCopy .Values.addons.ingressNginx.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultIngressValues" .))) | nindent 4 }}
|
||||||
fullnameOverride: ingress-nginx
|
|
||||||
controller:
|
|
||||||
kind: DaemonSet
|
|
||||||
hostNetwork: true
|
|
||||||
service:
|
|
||||||
enabled: false
|
|
||||||
nodeSelector:
|
|
||||||
node-role.kubernetes.io/ingress-nginx: ""
|
|
||||||
{{- if .Values.addons.ingressNginx.valuesOverride }}
|
|
||||||
valuesFrom:
|
|
||||||
- kind: Secret
|
|
||||||
name: {{ .Release.Name }}-ingress-nginx-values-override
|
|
||||||
valuesKey: values
|
|
||||||
{{- end }}
|
|
||||||
dependsOn:
|
dependsOn:
|
||||||
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||||
- name: {{ .Release.Name }}
|
- name: {{ .Release.Name }}
|
||||||
@@ -54,14 +52,3 @@ spec:
|
|||||||
- name: {{ .Release.Name }}-cilium
|
- name: {{ .Release.Name }}-cilium
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- if .Values.addons.ingressNginx.valuesOverride }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-ingress-nginx-values-override
|
|
||||||
stringData:
|
|
||||||
values: |
|
|
||||||
{{- toYaml .Values.addons.ingressNginx.valuesOverride | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-monitoring-agents
|
name: {{ .Release.Name }}-monitoring-agents
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: cozy-monitoring-agents
|
releaseName: cozy-monitoring-agents
|
||||||
@@ -38,10 +38,10 @@ spec:
|
|||||||
- name: {{ .Release.Name }}
|
- name: {{ .Release.Name }}
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
- name: {{ .Release.Name }}-cilium
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
- name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
- name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
|
- name: {{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
values:
|
values:
|
||||||
vmagent:
|
vmagent:
|
||||||
externalLabels:
|
externalLabels:
|
||||||
|
|||||||
@@ -0,0 +1,41 @@
|
|||||||
|
{{- if .Values.addons.monitoringAgents.enabled }}
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-vertical-pod-autoscaler-crds
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
|
spec:
|
||||||
|
interval: 5m
|
||||||
|
releaseName: vertical-pod-autoscaler-crds
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-vertical-pod-autoscaler-crds
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
kubeConfig:
|
||||||
|
secretRef:
|
||||||
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
|
targetNamespace: cozy-vertical-pod-autoscaler-crds
|
||||||
|
storageNamespace: cozy-vertical-pod-autoscaler-crds
|
||||||
|
install:
|
||||||
|
createNamespace: true
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
|
||||||
|
dependsOn:
|
||||||
|
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||||
|
- name: {{ .Release.Name }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
|
- name: {{ .Release.Name }}-cilium
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -0,0 +1,67 @@
|
|||||||
|
{{- define "cozystack.defaultVPAValues" -}}
|
||||||
|
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
|
||||||
|
{{- $targetTenant := index $myNS.metadata.annotations "namespace.cozystack.io/monitoring" }}
|
||||||
|
vertical-pod-autoscaler:
|
||||||
|
recommender:
|
||||||
|
extraArgs:
|
||||||
|
container-name-label: container
|
||||||
|
container-namespace-label: namespace
|
||||||
|
container-pod-name-label: pod
|
||||||
|
storage: prometheus
|
||||||
|
memory-saver: true
|
||||||
|
pod-label-prefix: label_
|
||||||
|
metric-for-pod-labels: kube_pod_labels{job="kube-state-metrics", tenant="{{ .Release.Namespace }}", cluster="{{ .Release.Name }}"}[8d]
|
||||||
|
pod-name-label: pod
|
||||||
|
pod-namespace-label: namespace
|
||||||
|
prometheus-address: http://vmselect-shortterm.{{ $targetTenant }}.svc.cozy.local:8481/select/0/prometheus/
|
||||||
|
prometheus-cadvisor-job-name: cadvisor
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: 1600Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 1600Mi
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if .Values.addons.monitoringAgents.enabled }}
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-vertical-pod-autoscaler
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
|
spec:
|
||||||
|
interval: 5m
|
||||||
|
releaseName: vertical-pod-autoscaler
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-vertical-pod-autoscaler
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
kubeConfig:
|
||||||
|
secretRef:
|
||||||
|
name: {{ .Release.Name }}-admin-kubeconfig
|
||||||
|
key: super-admin.svc
|
||||||
|
targetNamespace: cozy-vertical-pod-autoscaler
|
||||||
|
storageNamespace: cozy-vertical-pod-autoscaler
|
||||||
|
install:
|
||||||
|
createNamespace: true
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
values:
|
||||||
|
{{- toYaml (deepCopy .Values.addons.verticalPodAutoscaler.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultVPAValues" .))) | nindent 4 }}
|
||||||
|
dependsOn:
|
||||||
|
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
|
||||||
|
- name: {{ .Release.Name }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
|
- name: {{ .Release.Name }}-monitoring-agents
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
{{- end }}
|
||||||
@@ -5,7 +5,7 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
name: {{ .Release.Name }}-cozy-victoria-metrics-operator
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
coztstack.io/target-cluster-name: {{ .Release.Name }}
|
cozystack.io/target-cluster-name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: cozy-victoria-metrics-operator
|
releaseName: cozy-victoria-metrics-operator
|
||||||
|
|||||||
@@ -1,97 +1,227 @@
|
|||||||
{
|
{
|
||||||
"title": "Chart Values",
|
"title": "Chart Values",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"host": {
|
"host": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
"description": "The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).",
|
||||||
"default": ""
|
"default": ""
|
||||||
|
},
|
||||||
|
"controlPlane": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"replicas": {
|
||||||
|
"type": "number",
|
||||||
|
"description": "Number of replicas for Kubernetes control-plane components",
|
||||||
|
"default": 2
|
||||||
},
|
},
|
||||||
"controlPlane": {
|
"apiServer": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
"replicas": {
|
"resourcesPreset": {
|
||||||
"type": "number",
|
"type": "string",
|
||||||
"description": "Number of replicas for Kubernetes contorl-plane components",
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"default": 2
|
"default": "small",
|
||||||
}
|
"enum": [
|
||||||
|
"none",
|
||||||
|
"nano",
|
||||||
|
"micro",
|
||||||
|
"small",
|
||||||
|
"medium",
|
||||||
|
"large",
|
||||||
|
"xlarge",
|
||||||
|
"2xlarge"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"storageClass": {
|
"controllerManager": {
|
||||||
"type": "string",
|
"type": "object",
|
||||||
"description": "StorageClass used to store user data",
|
"properties": {
|
||||||
"default": "replicated"
|
"resources": {
|
||||||
},
|
"type": "object",
|
||||||
"addons": {
|
"description": "Resources",
|
||||||
"type": "object",
|
"default": {}
|
||||||
"properties": {
|
},
|
||||||
"certManager": {
|
"resourcesPreset": {
|
||||||
"type": "object",
|
"type": "string",
|
||||||
"properties": {
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
"enabled": {
|
"default": "micro",
|
||||||
"type": "boolean",
|
"enum": [
|
||||||
"description": "Enables the cert-manager",
|
"none",
|
||||||
"default": false
|
"nano",
|
||||||
},
|
"micro",
|
||||||
"valuesOverride": {
|
"small",
|
||||||
"type": "object",
|
"medium",
|
||||||
"description": "Custom values to override",
|
"large",
|
||||||
"default": {}
|
"xlarge",
|
||||||
}
|
"2xlarge"
|
||||||
}
|
]
|
||||||
},
|
|
||||||
"ingressNginx": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"enabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
|
||||||
"default": false
|
|
||||||
},
|
|
||||||
"valuesOverride": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Custom values to override",
|
|
||||||
"default": {}
|
|
||||||
},
|
|
||||||
"hosts": {
|
|
||||||
"type": "array",
|
|
||||||
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
|
||||||
"default": [],
|
|
||||||
"items": {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"fluxcd": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"enabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Enables Flux CD",
|
|
||||||
"default": false
|
|
||||||
},
|
|
||||||
"valuesOverride": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Custom values to override",
|
|
||||||
"default": {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"monitoringAgents": {
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
|
||||||
"enabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
|
||||||
"default": false
|
|
||||||
},
|
|
||||||
"valuesOverride": {
|
|
||||||
"type": "object",
|
|
||||||
"description": "Custom values to override",
|
|
||||||
"default": {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"scheduler": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"resourcesPreset": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
|
"default": "micro",
|
||||||
|
"enum": [
|
||||||
|
"none",
|
||||||
|
"nano",
|
||||||
|
"micro",
|
||||||
|
"small",
|
||||||
|
"medium",
|
||||||
|
"large",
|
||||||
|
"xlarge",
|
||||||
|
"2xlarge"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"konnectivity": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"server": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"resourcesPreset": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
||||||
|
"default": "micro",
|
||||||
|
"enum": [
|
||||||
|
"none",
|
||||||
|
"nano",
|
||||||
|
"micro",
|
||||||
|
"small",
|
||||||
|
"medium",
|
||||||
|
"large",
|
||||||
|
"xlarge",
|
||||||
|
"2xlarge"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Resources",
|
||||||
|
"default": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"storageClass": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "StorageClass used to store user data",
|
||||||
|
"default": "replicated"
|
||||||
|
},
|
||||||
|
"addons": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"certManager": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"enabled": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Enables the cert-manager",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"valuesOverride": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Custom values to override",
|
||||||
|
"default": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ingressNginx": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"enabled": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"valuesOverride": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Custom values to override",
|
||||||
|
"default": {}
|
||||||
|
},
|
||||||
|
"hosts": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "List of domain names that should be passed through to the cluster by upper cluster",
|
||||||
|
"default": [],
|
||||||
|
"items": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gpuOperator": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"enabled": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Enables the gpu-operator",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"valuesOverride": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Custom values to override",
|
||||||
|
"default": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"fluxcd": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"enabled": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Enables Flux CD",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"valuesOverride": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Custom values to override",
|
||||||
|
"default": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"monitoringAgents": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"enabled": {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "Enables MonitoringAgents (fluentbit, vmagents for sending logs and metrics to storage) if tenant monitoring enabled, send to tenant storage, else to root storage",
|
||||||
|
"default": false
|
||||||
|
},
|
||||||
|
"valuesOverride": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Custom values to override",
|
||||||
|
"default": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"verticalPodAutoscaler": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"valuesOverride": {
|
||||||
|
"type": "object",
|
||||||
|
"description": "Custom values to override",
|
||||||
|
"default": {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
## @section Common parameters
|
## @section Common parameters
|
||||||
|
|
||||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
## @param controlPlane.replicas Number of replicas for Kubernetes control-plane components
|
||||||
## @param storageClass StorageClass used to store user data
|
## @param storageClass StorageClass used to store user data
|
||||||
##
|
##
|
||||||
host: ""
|
host: ""
|
||||||
controlPlane:
|
|
||||||
replicas: 2
|
|
||||||
storageClass: replicated
|
storageClass: replicated
|
||||||
|
|
||||||
## @param nodeGroups [object] nodeGroups configuration
|
## @param nodeGroups [object] nodeGroups configuration
|
||||||
@@ -24,6 +22,14 @@ nodeGroups:
|
|||||||
cpu: ""
|
cpu: ""
|
||||||
memory: ""
|
memory: ""
|
||||||
|
|
||||||
|
## List of GPUs to attach (WARN: NVIDIA driver requires at least 4 GiB of RAM)
|
||||||
|
## e.g:
|
||||||
|
## instanceType: "u1.xlarge"
|
||||||
|
## gpus:
|
||||||
|
## - name: nvidia.com/AD102GL_L40S
|
||||||
|
gpus: []
|
||||||
|
|
||||||
|
|
||||||
## @section Cluster Addons
|
## @section Cluster Addons
|
||||||
##
|
##
|
||||||
addons:
|
addons:
|
||||||
@@ -52,6 +58,14 @@ addons:
|
|||||||
hosts: []
|
hosts: []
|
||||||
valuesOverride: {}
|
valuesOverride: {}
|
||||||
|
|
||||||
|
## GPU-operator: NVIDIA GPU Operator
|
||||||
|
##
|
||||||
|
gpuOperator:
|
||||||
|
## @param addons.gpuOperator.enabled Enables the gpu-operator
|
||||||
|
## @param addons.gpuOperator.valuesOverride Custom values to override
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
|
||||||
## Flux CD
|
## Flux CD
|
||||||
##
|
##
|
||||||
fluxcd:
|
fluxcd:
|
||||||
@@ -70,62 +84,49 @@ addons:
|
|||||||
enabled: false
|
enabled: false
|
||||||
valuesOverride: {}
|
valuesOverride: {}
|
||||||
|
|
||||||
## @section Kamaji control plane
|
## VerticalPodAutoscaler
|
||||||
##
|
##
|
||||||
kamajiControlPlane:
|
verticalPodAutoscaler:
|
||||||
apiServer:
|
## @param addons.verticalPodAutoscaler.valuesOverride Custom values to override
|
||||||
## @param kamajiControlPlane.apiServer.resources Resources
|
##
|
||||||
resources: {}
|
valuesOverride: {}
|
||||||
# resources:
|
|
||||||
# limits:
|
|
||||||
# cpu: 4000m
|
|
||||||
# memory: 4Gi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 512Mi
|
|
||||||
|
|
||||||
## @param kamajiControlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
## @section Kubernetes control plane configuration
|
||||||
resourcesPreset: "micro"
|
##
|
||||||
|
|
||||||
|
controlPlane:
|
||||||
|
replicas: 2
|
||||||
|
|
||||||
|
apiServer:
|
||||||
|
## @param controlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
## @param controlPlane.apiServer.resources Resources
|
||||||
|
## e.g:
|
||||||
|
## resources:
|
||||||
|
## limits:
|
||||||
|
## cpu: 4000m
|
||||||
|
## memory: 4Gi
|
||||||
|
## requests:
|
||||||
|
## cpu: 100m
|
||||||
|
## memory: 512Mi
|
||||||
|
##
|
||||||
|
resourcesPreset: "small"
|
||||||
|
resources: {}
|
||||||
|
|
||||||
controllerManager:
|
controllerManager:
|
||||||
## @param kamajiControlPlane.controllerManager.resources Resources
|
## @param controlPlane.controllerManager.resources Resources
|
||||||
resources: {}
|
## @param controlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
# resources:
|
|
||||||
# limits:
|
|
||||||
# cpu: 4000m
|
|
||||||
# memory: 4Gi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 512Mi
|
|
||||||
|
|
||||||
## @param kamajiControlPlane.controllerManager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
|
||||||
resourcesPreset: "micro"
|
resourcesPreset: "micro"
|
||||||
|
resources: {}
|
||||||
|
|
||||||
scheduler:
|
scheduler:
|
||||||
## @param kamajiControlPlane.scheduler.resources Resources
|
## @param controlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
resources: {}
|
## @param controlPlane.scheduler.resources Resources
|
||||||
# resources:
|
|
||||||
# limits:
|
|
||||||
# cpu: 4000m
|
|
||||||
# memory: 4Gi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 512Mi
|
|
||||||
|
|
||||||
## @param kamajiControlPlane.scheduler.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
|
||||||
resourcesPreset: "micro"
|
resourcesPreset: "micro"
|
||||||
addons:
|
resources: {}
|
||||||
konnectivity:
|
|
||||||
server:
|
|
||||||
## @param kamajiControlPlane.addons.konnectivity.server.resources Resources
|
|
||||||
resources: {}
|
|
||||||
# resources:
|
|
||||||
# limits:
|
|
||||||
# cpu: 4000m
|
|
||||||
# memory: 4Gi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 512Mi
|
|
||||||
|
|
||||||
## @param kamajiControlPlane.addons.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
|
||||||
resourcesPreset: "micro"
|
|
||||||
|
|
||||||
|
konnectivity:
|
||||||
|
server:
|
||||||
|
## @param controlPlane.konnectivity.server.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||||
|
## @param controlPlane.konnectivity.server.resources Resources
|
||||||
|
resourcesPreset: "micro"
|
||||||
|
resources: {}
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.5.3@sha256:8ca1fb01e880d351ee7d984a0b437c1142836963cd079986156ed28750067138
|
ghcr.io/cozystack/cozystack/mariadb-backup:0.6.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.10.0
|
version: 0.10.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/postgres-backup:0.9.0@sha256:2b6ba87f5688a439bd2ac12835a5ab9e601feb15c0c44ed0d9ca48cec7c52521
|
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||||
|
|||||||
@@ -13,9 +13,6 @@ spec:
|
|||||||
jobTemplate:
|
jobTemplate:
|
||||||
spec:
|
spec:
|
||||||
backoffLimit: 2
|
backoffLimit: 2
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
@@ -24,7 +21,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
imagePullSecrets:
|
imagePullSecrets:
|
||||||
- name: {{ .Release.Name }}-regsecret
|
- name: {{ .Release.Name }}-regsecret
|
||||||
restartPolicy: Never
|
restartPolicy: OnFailure
|
||||||
containers:
|
containers:
|
||||||
- name: pgdump
|
- name: pgdump
|
||||||
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
|
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
|
||||||
|
|||||||
@@ -4,4 +4,4 @@ description: Separated tenant namespace
|
|||||||
icon: /logos/tenant.svg
|
icon: /logos/tenant.svg
|
||||||
|
|
||||||
type: application
|
type: application
|
||||||
version: 1.9.1
|
version: 1.9.2
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ spec:
|
|||||||
ingress:
|
ingress:
|
||||||
- fromEntities:
|
- fromEntities:
|
||||||
- world
|
- world
|
||||||
|
- cluster
|
||||||
egress:
|
egress:
|
||||||
- toEntities:
|
- toEntities:
|
||||||
- world
|
- world
|
||||||
|
|||||||
@@ -56,7 +56,10 @@ kubernetes 0.15.0 4e68e65c
|
|||||||
kubernetes 0.15.1 160e4e2a
|
kubernetes 0.15.1 160e4e2a
|
||||||
kubernetes 0.15.2 8267072d
|
kubernetes 0.15.2 8267072d
|
||||||
kubernetes 0.16.0 077045b0
|
kubernetes 0.16.0 077045b0
|
||||||
kubernetes 0.17.0 HEAD
|
kubernetes 0.17.0 1fbbfcd0
|
||||||
|
kubernetes 0.17.1 fd240701
|
||||||
|
kubernetes 0.18.0 721c12a7
|
||||||
|
kubernetes 0.19.0 HEAD
|
||||||
mysql 0.1.0 263e47be
|
mysql 0.1.0 263e47be
|
||||||
mysql 0.2.0 c24a103f
|
mysql 0.2.0 c24a103f
|
||||||
mysql 0.3.0 53f2365e
|
mysql 0.3.0 53f2365e
|
||||||
@@ -86,7 +89,8 @@ postgres 0.7.0 4b90bf5a
|
|||||||
postgres 0.7.1 1ec10165
|
postgres 0.7.1 1ec10165
|
||||||
postgres 0.8.0 4e68e65c
|
postgres 0.8.0 4e68e65c
|
||||||
postgres 0.9.0 8267072d
|
postgres 0.9.0 8267072d
|
||||||
postgres 0.10.0 HEAD
|
postgres 0.10.0 721c12a7
|
||||||
|
postgres 0.10.1 HEAD
|
||||||
rabbitmq 0.1.0 263e47be
|
rabbitmq 0.1.0 263e47be
|
||||||
rabbitmq 0.2.0 53f2365e
|
rabbitmq 0.2.0 53f2365e
|
||||||
rabbitmq 0.3.0 6c5cf5bf
|
rabbitmq 0.3.0 6c5cf5bf
|
||||||
@@ -127,7 +131,8 @@ tenant 1.6.8 bc95159a
|
|||||||
tenant 1.7.0 24fa7222
|
tenant 1.7.0 24fa7222
|
||||||
tenant 1.8.0 160e4e2a
|
tenant 1.8.0 160e4e2a
|
||||||
tenant 1.9.0 728743db
|
tenant 1.9.0 728743db
|
||||||
tenant 1.9.1 HEAD
|
tenant 1.9.1 721c12a7
|
||||||
|
tenant 1.9.2 HEAD
|
||||||
virtual-machine 0.1.4 f2015d65
|
virtual-machine 0.1.4 f2015d65
|
||||||
virtual-machine 0.1.5 263e47be
|
virtual-machine 0.1.5 263e47be
|
||||||
virtual-machine 0.2.0 c0685f43
|
virtual-machine 0.2.0 c0685f43
|
||||||
@@ -139,15 +144,20 @@ virtual-machine 0.7.0 e23286a3
|
|||||||
virtual-machine 0.7.1 0ab39f20
|
virtual-machine 0.7.1 0ab39f20
|
||||||
virtual-machine 0.8.0 3fa4dd3a
|
virtual-machine 0.8.0 3fa4dd3a
|
||||||
virtual-machine 0.8.1 93c46161
|
virtual-machine 0.8.1 93c46161
|
||||||
virtual-machine 0.8.2 HEAD
|
virtual-machine 0.8.2 de19450f
|
||||||
vm-disk 0.1.0 HEAD
|
virtual-machine 0.9.0 721c12a7
|
||||||
|
virtual-machine 0.9.1 HEAD
|
||||||
|
vm-disk 0.1.0 d971f2ff
|
||||||
|
vm-disk 0.1.1 HEAD
|
||||||
vm-instance 0.1.0 1ec10165
|
vm-instance 0.1.0 1ec10165
|
||||||
vm-instance 0.2.0 84f3ccc0
|
vm-instance 0.2.0 84f3ccc0
|
||||||
vm-instance 0.3.0 4e68e65c
|
vm-instance 0.3.0 4e68e65c
|
||||||
vm-instance 0.4.0 e23286a3
|
vm-instance 0.4.0 e23286a3
|
||||||
vm-instance 0.4.1 0ab39f20
|
vm-instance 0.4.1 0ab39f20
|
||||||
vm-instance 0.5.0 3fa4dd3a
|
vm-instance 0.5.0 3fa4dd3a
|
||||||
vm-instance 0.5.1 HEAD
|
vm-instance 0.5.1 de19450f
|
||||||
|
vm-instance 0.6.0 721c12a7
|
||||||
|
vm-instance 0.6.1 HEAD
|
||||||
vpn 0.1.0 263e47be
|
vpn 0.1.0 263e47be
|
||||||
vpn 0.2.0 53f2365e
|
vpn 0.2.0 53f2365e
|
||||||
vpn 0.3.0 6c5cf5bf
|
vpn 0.3.0 6c5cf5bf
|
||||||
|
|||||||
@@ -17,10 +17,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.8.2
|
version: 0.9.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.8.2"
|
appVersion: 0.9.0
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ include ../../../scripts/package.mk
|
|||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
|
yq -o json -i '.properties.gpus.items.type = "object" | .properties.gpus.default = []' values.schema.json
|
||||||
INSTANCE_TYPES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/instancetypes.yaml | yq 'split(" ") | . + [""]' -o json) \
|
INSTANCE_TYPES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/instancetypes.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||||
&& yq -i -o json ".properties.instanceType.optional=true | .properties.instanceType.enum = $${INSTANCE_TYPES}" values.schema.json
|
&& yq -i -o json ".properties.instanceType.optional=true | .properties.instanceType.enum = $${INSTANCE_TYPES}" values.schema.json
|
||||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||||
|
|||||||
@@ -36,22 +36,23 @@ virtctl ssh <user>@<vm>
|
|||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ------------ |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||||
| `running` | Determines if the virtual machine should be running | `true` |
|
| `running` | Determines if the virtual machine should be running | `true` |
|
||||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
| `instanceProfile` | Virtual Machine preferences profile | `ubuntu` |
|
||||||
| `systemDisk.image` | The base image for the virtual machine. Allowed values: `ubuntu`, `cirros`, `alpine`, `fedora` and `talos` | `ubuntu` |
|
| `systemDisk.image` | The base image for the virtual machine. Allowed values: `ubuntu`, `cirros`, `alpine`, `fedora` and `talos` | `ubuntu` |
|
||||||
| `systemDisk.storage` | The size of the disk allocated for the virtual machine | `5Gi` |
|
| `systemDisk.storage` | The size of the disk allocated for the virtual machine | `5Gi` |
|
||||||
| `systemDisk.storageClass` | StorageClass used to store the data | `replicated` |
|
| `systemDisk.storageClass` | StorageClass used to store the data | `replicated` |
|
||||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
| `gpus` | List of GPUs to attach | `[]` |
|
||||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||||
` |
|
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `""` |
|
||||||
|
| `cloudInitSeed` | A seed string to generate an SMBIOS UUID for the VM. | `""` |
|
||||||
|
|
||||||
## U Series
|
## U Series
|
||||||
|
|
||||||
|
|||||||
@@ -49,3 +49,23 @@ Selector labels
|
|||||||
app.kubernetes.io/name: {{ include "virtual-machine.name" . }}
|
app.kubernetes.io/name: {{ include "virtual-machine.name" . }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Generate a stable UUID for cloud-init re-initialization upon upgrade.
|
||||||
|
*/}}
|
||||||
|
{{- define "virtual-machine.stableUuid" -}}
|
||||||
|
{{- $source := printf "%s-%s-%s" .Release.Namespace (include "virtual-machine.fullname" .) .Values.cloudInitSeed }}
|
||||||
|
{{- $hash := sha256sum $source }}
|
||||||
|
{{- $uuid := printf "%s-%s-4%s-9%s-%s" (substr 0 8 $hash) (substr 8 12 $hash) (substr 13 16 $hash) (substr 17 20 $hash) (substr 20 32 $hash) }}
|
||||||
|
{{- if eq .Values.cloudInitSeed "" }}
|
||||||
|
{{- /* Try to save previous uuid to not trigger full cloud-init again if user decided to remove the seed. */}}
|
||||||
|
{{- $vmResource := lookup "kubevirt.io/v1" "VirtualMachine" .Release.Namespace (include "virtual-machine.fullname" .) -}}
|
||||||
|
{{- if $vmResource }}
|
||||||
|
{{- $existingUuid := $vmResource | dig "spec" "template" "spec" "domain" "firmware" "uuid" "" }}
|
||||||
|
{{- if $existingUuid }}
|
||||||
|
{{- $uuid = $existingUuid }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- $uuid }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -68,7 +68,16 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
memory: {{ .Values.resources.memory | quote }}
|
memory: {{ .Values.resources.memory | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
firmware:
|
||||||
|
uuid: {{ include "virtual-machine.stableUuid" . }}
|
||||||
devices:
|
devices:
|
||||||
|
{{- if .Values.gpus }}
|
||||||
|
gpus:
|
||||||
|
{{- range $i, $gpu := .Values.gpus }}
|
||||||
|
- name: gpu{{ add $i 1 }}
|
||||||
|
deviceName: {{ $gpu.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
disks:
|
disks:
|
||||||
- disk:
|
- disk:
|
||||||
bus: scsi
|
bus: scsi
|
||||||
@@ -90,6 +99,7 @@ spec:
|
|||||||
secret:
|
secret:
|
||||||
secretName: {{ include "virtual-machine.fullname" $ }}-ssh-keys
|
secretName: {{ include "virtual-machine.fullname" $ }}-ssh-keys
|
||||||
propagationMethod:
|
propagationMethod:
|
||||||
|
# keys will be injected into metadata part of cloud-init disk
|
||||||
noCloud: {}
|
noCloud: {}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
terminationGracePeriodSeconds: 30
|
terminationGracePeriodSeconds: 30
|
||||||
@@ -100,8 +110,14 @@ spec:
|
|||||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||||
- name: cloudinitdisk
|
- name: cloudinitdisk
|
||||||
cloudInitNoCloud:
|
cloudInitNoCloud:
|
||||||
|
{{- if .Values.cloudInit }}
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||||
|
{{- else }}
|
||||||
|
userData: |
|
||||||
|
#cloud-config
|
||||||
|
final_message: Cloud-init user-data was left blank intentionally.
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
networks:
|
networks:
|
||||||
- name: default
|
- name: default
|
||||||
|
|||||||
@@ -88,7 +88,7 @@
|
|||||||
},
|
},
|
||||||
"instanceProfile": {
|
"instanceProfile": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Virtual Machine prefferences profile",
|
"description": "Virtual Machine preferences profile",
|
||||||
"default": "ubuntu",
|
"default": "ubuntu",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"enum": [
|
"enum": [
|
||||||
@@ -164,6 +164,14 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"gpus": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "List of GPUs to attach",
|
||||||
|
"default": [],
|
||||||
|
"items": {
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
@@ -190,7 +198,12 @@
|
|||||||
"cloudInit": {
|
"cloudInit": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "cloud-init user data config. See cloud-init documentation for more details.",
|
"description": "cloud-init user data config. See cloud-init documentation for more details.",
|
||||||
"default": "#cloud-config\n"
|
"default": ""
|
||||||
|
},
|
||||||
|
"cloudInitSeed": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "A seed string to generate an SMBIOS UUID for the VM.",
|
||||||
|
"default": ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ externalPorts:
|
|||||||
running: true
|
running: true
|
||||||
|
|
||||||
## @param instanceType Virtual Machine instance type
|
## @param instanceType Virtual Machine instance type
|
||||||
## @param instanceProfile Virtual Machine prefferences profile
|
## @param instanceProfile Virtual Machine preferences profile
|
||||||
##
|
##
|
||||||
instanceType: "u1.medium"
|
instanceType: "u1.medium"
|
||||||
instanceProfile: ubuntu
|
instanceProfile: ubuntu
|
||||||
@@ -26,6 +26,12 @@ systemDisk:
|
|||||||
storage: 5Gi
|
storage: 5Gi
|
||||||
storageClass: replicated
|
storageClass: replicated
|
||||||
|
|
||||||
|
## @param gpus [array] List of GPUs to attach
|
||||||
|
## Example:
|
||||||
|
## gpus:
|
||||||
|
## - name: nvidia.com/GA102GL_A10
|
||||||
|
gpus: []
|
||||||
|
|
||||||
## @param resources.cpu The number of CPU cores allocated to the virtual machine
|
## @param resources.cpu The number of CPU cores allocated to the virtual machine
|
||||||
## @param resources.memory The amount of memory allocated to the virtual machine
|
## @param resources.memory The amount of memory allocated to the virtual machine
|
||||||
resources:
|
resources:
|
||||||
@@ -49,5 +55,13 @@ sshKeys: []
|
|||||||
## password: ubuntu
|
## password: ubuntu
|
||||||
## chpasswd: { expire: False }
|
## chpasswd: { expire: False }
|
||||||
##
|
##
|
||||||
cloudInit: |
|
cloudInit: ""
|
||||||
#cloud-config
|
|
||||||
|
## @param cloudInitSeed A seed string to generate an SMBIOS UUID for the VM.
|
||||||
|
cloudInitSeed: ""
|
||||||
|
## Change it to any new value to force a full cloud-init reconfiguration. Change it when you want to apply
|
||||||
|
## to an existing VM settings that are usually written only once, like new SSH keys or new network configuration.
|
||||||
|
## An empty value does nothing (and the existing UUID is not reverted). Please note that changing this value
|
||||||
|
## does not trigger a VM restart. You must perform the restart separately.
|
||||||
|
## Example:
|
||||||
|
## cloudInitSeed: "upd1"
|
||||||
|
|||||||
@@ -16,10 +16,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.1.0
|
version: 0.1.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: 0.1.0
|
appVersion: 0.1.1
|
||||||
|
|||||||
@@ -3,7 +3,9 @@ apiVersion: cdi.kubevirt.io/v1beta1
|
|||||||
kind: DataVolume
|
kind: DataVolume
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
|
{{- if hasKey .Values.source "upload" }}
|
||||||
cdi.kubevirt.io/storage.bind.immediate.requested: ""
|
cdi.kubevirt.io/storage.bind.immediate.requested: ""
|
||||||
|
{{- end }}
|
||||||
vm-disk.cozystack.io/optical: "{{ .Values.optical }}"
|
vm-disk.cozystack.io/optical: "{{ .Values.optical }}"
|
||||||
name: {{ .Release.Name }}
|
name: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
|
|||||||
@@ -17,10 +17,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.5.1
|
version: 0.6.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.5.1"
|
appVersion: 0.6.0
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ include ../../../scripts/package.mk
|
|||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||||
yq -o json -i '.properties.disks.items.type = "object" | .properties.disks.default = []' values.schema.json
|
yq -o json -i '.properties.disks.items.type = "object" | .properties.disks.default = []' values.schema.json
|
||||||
|
yq -o json -i '.properties.gpus.items.type = "object" | .properties.gpus.default = []' values.schema.json
|
||||||
INSTANCE_TYPES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/instancetypes.yaml | yq 'split(" ") | . + [""]' -o json) \
|
INSTANCE_TYPES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/instancetypes.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||||
&& yq -i -o json ".properties.instanceType.optional=true | .properties.instanceType.enum = $${INSTANCE_TYPES}" values.schema.json
|
&& yq -i -o json ".properties.instanceType.optional=true | .properties.instanceType.enum = $${INSTANCE_TYPES}" values.schema.json
|
||||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||||
|
|||||||
@@ -36,20 +36,21 @@ virtctl ssh <user>@<vm>
|
|||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ----------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||||
| `running` | Determines if the virtual machine should be running | `true` |
|
| `running` | Determines if the virtual machine should be running | `true` |
|
||||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
| `instanceProfile` | Virtual Machine preferences profile | `ubuntu` |
|
||||||
| `disks` | List of disks to attach | `[]` |
|
| `disks` | List of disks to attach | `[]` |
|
||||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
| `gpus` | List of GPUs to attach | `[]` |
|
||||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||||
` |
|
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `""` |
|
||||||
|
| `cloudInitSeed` | A seed string to generate an SMBIOS UUID for the VM. | `""` |
|
||||||
|
|
||||||
## U Series
|
## U Series
|
||||||
|
|
||||||
|
|||||||
@@ -49,3 +49,23 @@ Selector labels
|
|||||||
app.kubernetes.io/name: {{ include "virtual-machine.name" . }}
|
app.kubernetes.io/name: {{ include "virtual-machine.name" . }}
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Generate a stable UUID for cloud-init re-initialization upon upgrade.
|
||||||
|
*/}}
|
||||||
|
{{- define "virtual-machine.stableUuid" -}}
|
||||||
|
{{- $source := printf "%s-%s-%s" .Release.Namespace (include "virtual-machine.fullname" .) .Values.cloudInitSeed }}
|
||||||
|
{{- $hash := sha256sum $source }}
|
||||||
|
{{- $uuid := printf "%s-%s-4%s-9%s-%s" (substr 0 8 $hash) (substr 8 12 $hash) (substr 13 16 $hash) (substr 17 20 $hash) (substr 20 32 $hash) }}
|
||||||
|
{{- if eq .Values.cloudInitSeed "" }}
|
||||||
|
{{- /* Try to save previous uuid to not trigger full cloud-init again if user decided to remove the seed. */}}
|
||||||
|
{{- $vmResource := lookup "kubevirt.io/v1" "VirtualMachine" .Release.Namespace (include "virtual-machine.fullname" .) -}}
|
||||||
|
{{- if $vmResource }}
|
||||||
|
{{- $existingUuid := $vmResource | dig "spec" "template" "spec" "domain" "firmware" "uuid" "" }}
|
||||||
|
{{- if $existingUuid }}
|
||||||
|
{{- $uuid = $existingUuid }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- $uuid }}
|
||||||
|
{{- end }}
|
||||||
|
|||||||
@@ -22,5 +22,5 @@ spec:
|
|||||||
kind: virtual-machine
|
kind: virtual-machine
|
||||||
type: virtual-machine
|
type: virtual-machine
|
||||||
selector:
|
selector:
|
||||||
vm.kubevirt.io/name: {{ $.Release.Name }}
|
{{- include "virtual-machine.selectorLabels" . | nindent 4 }}
|
||||||
version: {{ $.Chart.Version }}
|
version: {{ $.Chart.Version }}
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
{{- if and .Values.instanceType (not (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterInstancetype" "" .Values.instanceType)) }}
|
{{- if and .Values.instanceType (not (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterInstancetype" "" .Values.instanceType)) }}
|
||||||
{{- fail (printf "Specified instancetype not exists in cluster: %s" .Values.instanceType) }}
|
{{- fail (printf "Specified instanceType does not exist in the cluster: %s" .Values.instanceType) }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and .Values.instanceProfile (not (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterPreference" "" .Values.instanceProfile)) }}
|
{{- if and .Values.instanceProfile (not (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterPreference" "" .Values.instanceProfile)) }}
|
||||||
{{- fail (printf "Specified profile not exists in cluster: %s" .Values.instanceProfile) }}
|
{{- fail (printf "Specified instanceProfile does not exist in the cluster: %s" .Values.instanceProfile) }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
apiVersion: kubevirt.io/v1
|
apiVersion: kubevirt.io/v1
|
||||||
@@ -40,11 +40,20 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
memory: {{ .Values.resources.memory | quote }}
|
memory: {{ .Values.resources.memory | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
firmware:
|
||||||
|
uuid: {{ include "virtual-machine.stableUuid" . }}
|
||||||
devices:
|
devices:
|
||||||
|
{{- if .Values.gpus }}
|
||||||
|
gpus:
|
||||||
|
{{- range $i, $gpu := .Values.gpus }}
|
||||||
|
- name: gpu{{ add $i 1 }}
|
||||||
|
deviceName: {{ $gpu.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
disks:
|
disks:
|
||||||
{{- range $i, $disk := .Values.disks }}
|
{{- range $i, $disk := .Values.disks }}
|
||||||
- name: disk-{{ .name }}
|
- name: disk-{{ $disk.name }}
|
||||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" .name) }}
|
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||||
{{- if $disk }}
|
{{- if $disk }}
|
||||||
{{- if and (hasKey $disk.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $disk.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
{{- if and (hasKey $disk.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $disk.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||||
cdrom: {}
|
cdrom: {}
|
||||||
@@ -75,6 +84,7 @@ spec:
|
|||||||
secret:
|
secret:
|
||||||
secretName: {{ include "virtual-machine.fullname" $ }}-ssh-keys
|
secretName: {{ include "virtual-machine.fullname" $ }}-ssh-keys
|
||||||
propagationMethod:
|
propagationMethod:
|
||||||
|
# keys will be injected into metadata part of cloud-init disk
|
||||||
noCloud: {}
|
noCloud: {}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
terminationGracePeriodSeconds: 30
|
terminationGracePeriodSeconds: 30
|
||||||
@@ -87,8 +97,14 @@ spec:
|
|||||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||||
- name: cloudinitdisk
|
- name: cloudinitdisk
|
||||||
cloudInitNoCloud:
|
cloudInitNoCloud:
|
||||||
|
{{- if .Values.cloudInit }}
|
||||||
secretRef:
|
secretRef:
|
||||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||||
|
{{- else }}
|
||||||
|
userData: |
|
||||||
|
#cloud-config
|
||||||
|
final_message: Cloud-init user-data was left blank intentionally.
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
networks:
|
networks:
|
||||||
- name: default
|
- name: default
|
||||||
|
|||||||
@@ -88,7 +88,7 @@
|
|||||||
},
|
},
|
||||||
"instanceProfile": {
|
"instanceProfile": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Virtual Machine prefferences profile",
|
"description": "Virtual Machine preferences profile",
|
||||||
"default": "ubuntu",
|
"default": "ubuntu",
|
||||||
"optional": true,
|
"optional": true,
|
||||||
"enum": [
|
"enum": [
|
||||||
@@ -145,6 +145,14 @@
|
|||||||
"type": "object"
|
"type": "object"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"gpus": {
|
||||||
|
"type": "array",
|
||||||
|
"description": "List of GPUs to attach",
|
||||||
|
"default": [],
|
||||||
|
"items": {
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"properties": {
|
"properties": {
|
||||||
@@ -171,7 +179,12 @@
|
|||||||
"cloudInit": {
|
"cloudInit": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "cloud-init user data config. See cloud-init documentation for more details.",
|
"description": "cloud-init user data config. See cloud-init documentation for more details.",
|
||||||
"default": "#cloud-config\n"
|
"default": ""
|
||||||
|
},
|
||||||
|
"cloudInitSeed": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "A seed string to generate an SMBIOS UUID for the VM.",
|
||||||
|
"default": ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ externalPorts:
|
|||||||
running: true
|
running: true
|
||||||
|
|
||||||
## @param instanceType Virtual Machine instance type
|
## @param instanceType Virtual Machine instance type
|
||||||
## @param instanceProfile Virtual Machine prefferences profile
|
## @param instanceProfile Virtual Machine preferences profile
|
||||||
##
|
##
|
||||||
instanceType: "u1.medium"
|
instanceType: "u1.medium"
|
||||||
instanceProfile: ubuntu
|
instanceProfile: ubuntu
|
||||||
@@ -24,6 +24,12 @@ instanceProfile: ubuntu
|
|||||||
## - name: example-data
|
## - name: example-data
|
||||||
disks: []
|
disks: []
|
||||||
|
|
||||||
|
## @param gpus [array] List of GPUs to attach
|
||||||
|
## Example:
|
||||||
|
## gpus:
|
||||||
|
## - name: nvidia.com/GA102GL_A10
|
||||||
|
gpus: []
|
||||||
|
|
||||||
## @param resources.cpu The number of CPU cores allocated to the virtual machine
|
## @param resources.cpu The number of CPU cores allocated to the virtual machine
|
||||||
## @param resources.memory The amount of memory allocated to the virtual machine
|
## @param resources.memory The amount of memory allocated to the virtual machine
|
||||||
resources:
|
resources:
|
||||||
@@ -47,5 +53,13 @@ sshKeys: []
|
|||||||
## password: ubuntu
|
## password: ubuntu
|
||||||
## chpasswd: { expire: False }
|
## chpasswd: { expire: False }
|
||||||
##
|
##
|
||||||
cloudInit: |
|
cloudInit: ""
|
||||||
#cloud-config
|
|
||||||
|
## @param cloudInitSeed A seed string to generate an SMBIOS UUID for the VM.
|
||||||
|
cloudInitSeed: ""
|
||||||
|
## Change it to any new value to force a full cloud-init reconfiguration. Change it when you want to apply
|
||||||
|
## to an existing VM settings that are usually written only once, like new SSH keys or new network configuration.
|
||||||
|
## An empty value does nothing (and the existing UUID is not reverted). Please note that changing this value
|
||||||
|
## does not trigger a VM restart. You must perform the restart separately.
|
||||||
|
## Example:
|
||||||
|
## cloudInitSeed: "upd1"
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
NAMESPACE=cozy-builder
|
|
||||||
NAME := builder
|
|
||||||
|
|
||||||
TALOS_VERSION=$(shell awk '/^version:/ {print $$2}' ../installer/images/talos/profiles/installer.yaml)
|
|
||||||
|
|
||||||
include ../../../scripts/common-envs.mk
|
|
||||||
|
|
||||||
help: ## Show this help.
|
|
||||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
|
||||||
|
|
||||||
show:
|
|
||||||
helm template -n $(NAMESPACE) $(NAME) .
|
|
||||||
|
|
||||||
apply: ## Create builder sandbox in existing Kubernetes cluster.
|
|
||||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl apply -f -
|
|
||||||
docker buildx ls | grep -q '^buildkit-builder*' || docker buildx create \
|
|
||||||
--bootstrap \
|
|
||||||
--name=buildkit-$(NAME) \
|
|
||||||
--driver=kubernetes \
|
|
||||||
--driver-opt=namespace=$(NAMESPACE),replicas=1 \
|
|
||||||
--platform=linux/amd64 \
|
|
||||||
--platform=linux/arm64 \
|
|
||||||
--use \
|
|
||||||
--config config.toml
|
|
||||||
|
|
||||||
diff:
|
|
||||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl diff -f -
|
|
||||||
|
|
||||||
delete: ## Remove builder sandbox from existing Kubernetes cluster.
|
|
||||||
kubectl delete deploy -n $(NAMESPACE) $(NAME)-talos-imager
|
|
||||||
docker buildx rm buildkit-$(NAME)
|
|
||||||
|
|
||||||
wait-for-builder:
|
|
||||||
kubectl wait deploy --for=condition=Progressing -n $(NAMESPACE) $(NAME)-talos-imager
|
|
||||||
kubectl wait pod --for=condition=Ready -n $(NAMESPACE) -l app=$(NAME)-talos-imager
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
[worker.oci]
|
|
||||||
gc = true
|
|
||||||
gckeepstorage = 50000
|
|
||||||
|
|
||||||
[[worker.oci.gcpolicy]]
|
|
||||||
keepBytes = 10737418240
|
|
||||||
keepDuration = 604800
|
|
||||||
filters = [ "type==source.local", "type==exec.cachemount", "type==source.git.checkout"]
|
|
||||||
[[worker.oci.gcpolicy]]
|
|
||||||
all = true
|
|
||||||
keepBytes = 53687091200
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
pod-security.kubernetes.io/enforce: privileged
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-talos-imager
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: {{ .Release.Name }}-talos-imager
|
|
||||||
strategy:
|
|
||||||
type: Recreate
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: {{ .Release.Name }}-talos-imager
|
|
||||||
spec:
|
|
||||||
automountServiceAccountToken: false
|
|
||||||
terminationGracePeriodSeconds: 1
|
|
||||||
containers:
|
|
||||||
- name: imager
|
|
||||||
image: "{{ .Values.talos.imager.image }}"
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
command:
|
|
||||||
- sleep
|
|
||||||
- infinity
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /dev
|
|
||||||
name: dev
|
|
||||||
volumes:
|
|
||||||
- hostPath:
|
|
||||||
path: /dev
|
|
||||||
type: Directory
|
|
||||||
name: dev
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
talos:
|
|
||||||
imager:
|
|
||||||
image: ghcr.io/siderolabs/imager:v1.9.3
|
|
||||||
@@ -19,12 +19,10 @@ diff:
|
|||||||
|
|
||||||
update:
|
update:
|
||||||
hack/gen-profiles.sh
|
hack/gen-profiles.sh
|
||||||
IMAGE=$$(yq '.input.baseInstaller.imageRef | sub("/installer:", "/imager:")' images/talos/profiles/installer.yaml) \
|
|
||||||
yq -i '.talos.imager.image = strenv(IMAGE)' ../builder/values.yaml
|
|
||||||
|
|
||||||
image: pre-checks image-cozystack image-talos image-matchbox
|
image: pre-checks image-matchbox image-cozystack image-talos
|
||||||
|
|
||||||
image-cozystack: run-builder
|
image-cozystack:
|
||||||
make -C ../../.. repos
|
make -C ../../.. repos
|
||||||
docker buildx build -f images/cozystack/Dockerfile ../../.. \
|
docker buildx build -f images/cozystack/Dockerfile ../../.. \
|
||||||
--provenance false \
|
--provenance false \
|
||||||
@@ -40,11 +38,11 @@ image-cozystack: run-builder
|
|||||||
yq -i '.cozystack.image = strenv(IMAGE)' values.yaml
|
yq -i '.cozystack.image = strenv(IMAGE)' values.yaml
|
||||||
rm -f images/installer.json
|
rm -f images/installer.json
|
||||||
|
|
||||||
image-talos: run-builder
|
image-talos:
|
||||||
test -f ../../../_out/assets/installer-amd64.tar || make talos-installer
|
test -f ../../../_out/assets/installer-amd64.tar || make talos-installer
|
||||||
skopeo copy docker-archive:../../../_out/assets/installer-amd64.tar docker://$(REGISTRY)/talos:$(call settag,$(TALOS_VERSION))
|
skopeo copy docker-archive:../../../_out/assets/installer-amd64.tar docker://$(REGISTRY)/talos:$(call settag,$(TALOS_VERSION))
|
||||||
|
|
||||||
image-matchbox: run-builder
|
image-matchbox:
|
||||||
test -f ../../../_out/assets/kernel-amd64 || make talos-kernel
|
test -f ../../../_out/assets/kernel-amd64 || make talos-kernel
|
||||||
test -f ../../../_out/assets/initramfs-metal-amd64.xz || make talos-initramfs
|
test -f ../../../_out/assets/initramfs-metal-amd64.xz || make talos-initramfs
|
||||||
docker buildx build -f images/matchbox/Dockerfile ../../.. \
|
docker buildx build -f images/matchbox/Dockerfile ../../.. \
|
||||||
@@ -61,13 +59,10 @@ image-matchbox: run-builder
|
|||||||
> ../../extra/bootbox/images/matchbox.tag
|
> ../../extra/bootbox/images/matchbox.tag
|
||||||
rm -f images/matchbox.json
|
rm -f images/matchbox.json
|
||||||
|
|
||||||
assets: talos-iso talos-nocloud talos-metal
|
assets: talos-iso talos-nocloud talos-metal talos-kernel talos-initramfs
|
||||||
|
|
||||||
talos-initramfs talos-kernel talos-installer talos-iso talos-nocloud talos-metal:
|
talos-initramfs talos-kernel talos-installer talos-iso talos-nocloud talos-metal:
|
||||||
mkdir -p ../../../_out/assets
|
mkdir -p ../../../_out/assets
|
||||||
cat images/talos/profiles/$(subst talos-,,$@).yaml | \
|
cat images/talos/profiles/$(subst talos-,,$@).yaml | \
|
||||||
kubectl exec -i -n cozy-builder deploy/builder-talos-imager -- imager --tar-to-stdout - | \
|
docker run --rm -i -v /dev:/dev --privileged "ghcr.io/siderolabs/imager:$(TALOS_VERSION)" --tar-to-stdout - | \
|
||||||
tar -C ../../../_out/assets -xzf-
|
tar -C ../../../_out/assets -xzf-
|
||||||
|
|
||||||
run-builder:
|
|
||||||
make -C ../builder/ apply wait-for-builder
|
|
||||||
|
|||||||
@@ -30,6 +30,8 @@ FROM alpine:3.21
|
|||||||
|
|
||||||
RUN apk add --no-cache make
|
RUN apk add --no-cache make
|
||||||
RUN apk add helm kubectl --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
|
RUN apk add helm kubectl --repository=https://dl-cdn.alpinelinux.org/alpine/edge/community
|
||||||
|
RUN apk add yq
|
||||||
|
RUN apk add coreutils
|
||||||
|
|
||||||
COPY scripts /cozystack/scripts
|
COPY scripts /cozystack/scripts
|
||||||
COPY --from=builder /src/packages/core /cozystack/packages/core
|
COPY --from=builder /src/packages/core /cozystack/packages/core
|
||||||
|
|||||||
@@ -3,24 +3,24 @@
|
|||||||
arch: amd64
|
arch: amd64
|
||||||
platform: metal
|
platform: metal
|
||||||
secureboot: false
|
secureboot: false
|
||||||
version: v1.9.3
|
version: v1.9.5
|
||||||
input:
|
input:
|
||||||
kernel:
|
kernel:
|
||||||
path: /usr/install/amd64/vmlinuz
|
path: /usr/install/amd64/vmlinuz
|
||||||
initramfs:
|
initramfs:
|
||||||
path: /usr/install/amd64/initramfs.xz
|
path: /usr/install/amd64/initramfs.xz
|
||||||
baseInstaller:
|
baseInstaller:
|
||||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||||
systemExtensions:
|
systemExtensions:
|
||||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||||
output:
|
output:
|
||||||
kind: initramfs
|
kind: initramfs
|
||||||
imageOptions: {}
|
imageOptions: {}
|
||||||
|
|||||||
@@ -3,24 +3,24 @@
|
|||||||
arch: amd64
|
arch: amd64
|
||||||
platform: metal
|
platform: metal
|
||||||
secureboot: false
|
secureboot: false
|
||||||
version: v1.9.3
|
version: v1.9.5
|
||||||
input:
|
input:
|
||||||
kernel:
|
kernel:
|
||||||
path: /usr/install/amd64/vmlinuz
|
path: /usr/install/amd64/vmlinuz
|
||||||
initramfs:
|
initramfs:
|
||||||
path: /usr/install/amd64/initramfs.xz
|
path: /usr/install/amd64/initramfs.xz
|
||||||
baseInstaller:
|
baseInstaller:
|
||||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||||
systemExtensions:
|
systemExtensions:
|
||||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||||
output:
|
output:
|
||||||
kind: installer
|
kind: installer
|
||||||
imageOptions: {}
|
imageOptions: {}
|
||||||
|
|||||||
@@ -3,24 +3,24 @@
|
|||||||
arch: amd64
|
arch: amd64
|
||||||
platform: metal
|
platform: metal
|
||||||
secureboot: false
|
secureboot: false
|
||||||
version: v1.9.3
|
version: v1.9.5
|
||||||
input:
|
input:
|
||||||
kernel:
|
kernel:
|
||||||
path: /usr/install/amd64/vmlinuz
|
path: /usr/install/amd64/vmlinuz
|
||||||
initramfs:
|
initramfs:
|
||||||
path: /usr/install/amd64/initramfs.xz
|
path: /usr/install/amd64/initramfs.xz
|
||||||
baseInstaller:
|
baseInstaller:
|
||||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||||
systemExtensions:
|
systemExtensions:
|
||||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||||
output:
|
output:
|
||||||
kind: iso
|
kind: iso
|
||||||
imageOptions: {}
|
imageOptions: {}
|
||||||
|
|||||||
@@ -3,24 +3,24 @@
|
|||||||
arch: amd64
|
arch: amd64
|
||||||
platform: metal
|
platform: metal
|
||||||
secureboot: false
|
secureboot: false
|
||||||
version: v1.9.3
|
version: v1.9.5
|
||||||
input:
|
input:
|
||||||
kernel:
|
kernel:
|
||||||
path: /usr/install/amd64/vmlinuz
|
path: /usr/install/amd64/vmlinuz
|
||||||
initramfs:
|
initramfs:
|
||||||
path: /usr/install/amd64/initramfs.xz
|
path: /usr/install/amd64/initramfs.xz
|
||||||
baseInstaller:
|
baseInstaller:
|
||||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||||
systemExtensions:
|
systemExtensions:
|
||||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||||
output:
|
output:
|
||||||
kind: kernel
|
kind: kernel
|
||||||
imageOptions: {}
|
imageOptions: {}
|
||||||
|
|||||||
@@ -3,24 +3,24 @@
|
|||||||
arch: amd64
|
arch: amd64
|
||||||
platform: metal
|
platform: metal
|
||||||
secureboot: false
|
secureboot: false
|
||||||
version: v1.9.3
|
version: v1.9.5
|
||||||
input:
|
input:
|
||||||
kernel:
|
kernel:
|
||||||
path: /usr/install/amd64/vmlinuz
|
path: /usr/install/amd64/vmlinuz
|
||||||
initramfs:
|
initramfs:
|
||||||
path: /usr/install/amd64/initramfs.xz
|
path: /usr/install/amd64/initramfs.xz
|
||||||
baseInstaller:
|
baseInstaller:
|
||||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||||
systemExtensions:
|
systemExtensions:
|
||||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||||
output:
|
output:
|
||||||
kind: image
|
kind: image
|
||||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||||
|
|||||||
@@ -3,24 +3,24 @@
|
|||||||
arch: amd64
|
arch: amd64
|
||||||
platform: nocloud
|
platform: nocloud
|
||||||
secureboot: false
|
secureboot: false
|
||||||
version: v1.9.3
|
version: v1.9.5
|
||||||
input:
|
input:
|
||||||
kernel:
|
kernel:
|
||||||
path: /usr/install/amd64/vmlinuz
|
path: /usr/install/amd64/vmlinuz
|
||||||
initramfs:
|
initramfs:
|
||||||
path: /usr/install/amd64/initramfs.xz
|
path: /usr/install/amd64/initramfs.xz
|
||||||
baseInstaller:
|
baseInstaller:
|
||||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||||
systemExtensions:
|
systemExtensions:
|
||||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||||
output:
|
output:
|
||||||
kind: image
|
kind: image
|
||||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
cozystack:
|
cozystack:
|
||||||
image: ghcr.io/cozystack/cozystack/installer:v0.28.0@sha256:71ae2037ca44d49bbcf8be56c127ee92f2486089a8ea1cdd6508af49705956ac
|
image: ghcr.io/cozystack/cozystack/installer:v0.30.2@sha256:59996588b5d59b5593fb34442b2f2ed8ef466d138b229a8d37beb6f70141a690
|
||||||
|
|||||||
@@ -7,7 +7,11 @@ show:
|
|||||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS)
|
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS)
|
||||||
|
|
||||||
apply:
|
apply:
|
||||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) | kubectl apply -f-
|
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) \
|
||||||
|
| kubectl apply -f-
|
||||||
|
kubectl delete helmreleases.helm.toolkit.fluxcd.io -l cozystack.io/marked-for-deletion=true -A
|
||||||
|
|
||||||
|
reconcile: apply
|
||||||
|
|
||||||
namespaces-show:
|
namespaces-show:
|
||||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml
|
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml
|
||||||
|
|||||||
@@ -31,6 +31,13 @@ releases:
|
|||||||
autoDirectNodeRoutes: true
|
autoDirectNodeRoutes: true
|
||||||
routingMode: native
|
routingMode: native
|
||||||
|
|
||||||
|
- name: cilium-networkpolicy
|
||||||
|
releaseName: cilium-networkpolicy
|
||||||
|
chart: cozy-cilium-networkpolicy
|
||||||
|
namespace: cozy-cilium
|
||||||
|
privileged: true
|
||||||
|
dependsOn: [cilium]
|
||||||
|
|
||||||
- name: cozy-proxy
|
- name: cozy-proxy
|
||||||
releaseName: cozystack
|
releaseName: cozystack
|
||||||
chart: cozy-cozy-proxy
|
chart: cozy-cozy-proxy
|
||||||
@@ -127,14 +134,14 @@ releases:
|
|||||||
chart: cozy-kafka-operator
|
chart: cozy-kafka-operator
|
||||||
namespace: cozy-kafka-operator
|
namespace: cozy-kafka-operator
|
||||||
optional: true
|
optional: true
|
||||||
dependsOn: [cilium]
|
dependsOn: [cilium,victoria-metrics-operator]
|
||||||
|
|
||||||
- name: clickhouse-operator
|
- name: clickhouse-operator
|
||||||
releaseName: clickhouse-operator
|
releaseName: clickhouse-operator
|
||||||
chart: cozy-clickhouse-operator
|
chart: cozy-clickhouse-operator
|
||||||
namespace: cozy-clickhouse-operator
|
namespace: cozy-clickhouse-operator
|
||||||
optional: true
|
optional: true
|
||||||
dependsOn: [cilium]
|
dependsOn: [cilium,victoria-metrics-operator]
|
||||||
|
|
||||||
- name: rabbitmq-operator
|
- name: rabbitmq-operator
|
||||||
releaseName: rabbitmq-operator
|
releaseName: rabbitmq-operator
|
||||||
@@ -154,7 +161,7 @@ releases:
|
|||||||
releaseName: piraeus-operator
|
releaseName: piraeus-operator
|
||||||
chart: cozy-piraeus-operator
|
chart: cozy-piraeus-operator
|
||||||
namespace: cozy-linstor
|
namespace: cozy-linstor
|
||||||
dependsOn: [cilium,cert-manager]
|
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
|
||||||
|
|
||||||
- name: snapshot-controller
|
- name: snapshot-controller
|
||||||
releaseName: snapshot-controller
|
releaseName: snapshot-controller
|
||||||
|
|||||||
@@ -96,14 +96,14 @@ releases:
|
|||||||
chart: cozy-kafka-operator
|
chart: cozy-kafka-operator
|
||||||
namespace: cozy-kafka-operator
|
namespace: cozy-kafka-operator
|
||||||
optional: true
|
optional: true
|
||||||
dependsOn: []
|
dependsOn: [victoria-metrics-operator]
|
||||||
|
|
||||||
- name: clickhouse-operator
|
- name: clickhouse-operator
|
||||||
releaseName: clickhouse-operator
|
releaseName: clickhouse-operator
|
||||||
chart: cozy-clickhouse-operator
|
chart: cozy-clickhouse-operator
|
||||||
namespace: cozy-clickhouse-operator
|
namespace: cozy-clickhouse-operator
|
||||||
optional: true
|
optional: true
|
||||||
dependsOn: []
|
dependsOn: [victoria-metrics-operator]
|
||||||
|
|
||||||
- name: rabbitmq-operator
|
- name: rabbitmq-operator
|
||||||
releaseName: rabbitmq-operator
|
releaseName: rabbitmq-operator
|
||||||
|
|||||||
@@ -34,6 +34,13 @@ releases:
|
|||||||
- values-talos.yaml
|
- values-talos.yaml
|
||||||
- values-kubeovn.yaml
|
- values-kubeovn.yaml
|
||||||
|
|
||||||
|
- name: cilium-networkpolicy
|
||||||
|
releaseName: cilium-networkpolicy
|
||||||
|
chart: cozy-cilium-networkpolicy
|
||||||
|
namespace: cozy-cilium
|
||||||
|
privileged: true
|
||||||
|
dependsOn: [cilium]
|
||||||
|
|
||||||
- name: kubeovn
|
- name: kubeovn
|
||||||
releaseName: kubeovn
|
releaseName: kubeovn
|
||||||
chart: cozy-kubeovn
|
chart: cozy-kubeovn
|
||||||
@@ -109,7 +116,7 @@ releases:
|
|||||||
chart: cozy-monitoring-agents
|
chart: cozy-monitoring-agents
|
||||||
namespace: cozy-monitoring
|
namespace: cozy-monitoring
|
||||||
privileged: true
|
privileged: true
|
||||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
dependsOn: [victoria-metrics-operator, vertical-pod-autoscaler-crds]
|
||||||
values:
|
values:
|
||||||
scrapeRules:
|
scrapeRules:
|
||||||
etcd:
|
etcd:
|
||||||
@@ -146,6 +153,17 @@ releases:
|
|||||||
namespace: cozy-kubevirt-cdi
|
namespace: cozy-kubevirt-cdi
|
||||||
dependsOn: [cilium,kubeovn,kubevirt-cdi-operator]
|
dependsOn: [cilium,kubeovn,kubevirt-cdi-operator]
|
||||||
|
|
||||||
|
- name: gpu-operator
|
||||||
|
releaseName: gpu-operator
|
||||||
|
chart: cozy-gpu-operator
|
||||||
|
namespace: cozy-gpu-operator
|
||||||
|
privileged: true
|
||||||
|
optional: true
|
||||||
|
dependsOn: [cilium,kubeovn]
|
||||||
|
valuesFiles:
|
||||||
|
- values.yaml
|
||||||
|
- values-talos.yaml
|
||||||
|
|
||||||
- name: metallb
|
- name: metallb
|
||||||
releaseName: metallb
|
releaseName: metallb
|
||||||
chart: cozy-metallb
|
chart: cozy-metallb
|
||||||
@@ -181,13 +199,13 @@ releases:
|
|||||||
releaseName: kafka-operator
|
releaseName: kafka-operator
|
||||||
chart: cozy-kafka-operator
|
chart: cozy-kafka-operator
|
||||||
namespace: cozy-kafka-operator
|
namespace: cozy-kafka-operator
|
||||||
dependsOn: [cilium,kubeovn]
|
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||||
|
|
||||||
- name: clickhouse-operator
|
- name: clickhouse-operator
|
||||||
releaseName: clickhouse-operator
|
releaseName: clickhouse-operator
|
||||||
chart: cozy-clickhouse-operator
|
chart: cozy-clickhouse-operator
|
||||||
namespace: cozy-clickhouse-operator
|
namespace: cozy-clickhouse-operator
|
||||||
dependsOn: [cilium,kubeovn]
|
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||||
|
|
||||||
- name: rabbitmq-operator
|
- name: rabbitmq-operator
|
||||||
releaseName: rabbitmq-operator
|
releaseName: rabbitmq-operator
|
||||||
@@ -252,7 +270,10 @@ releases:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
frontend:
|
||||||
|
resourcesPreset: "none"
|
||||||
dashboard:
|
dashboard:
|
||||||
|
resourcesPreset: "none"
|
||||||
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
||||||
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
||||||
{{- if $branding }}
|
{{- if $branding }}
|
||||||
@@ -381,6 +402,13 @@ releases:
|
|||||||
privileged: true
|
privileged: true
|
||||||
dependsOn: [monitoring-agents]
|
dependsOn: [monitoring-agents]
|
||||||
|
|
||||||
|
- name: vertical-pod-autoscaler-crds
|
||||||
|
releaseName: vertical-pod-autoscaler-crds
|
||||||
|
chart: cozy-vertical-pod-autoscaler-crds
|
||||||
|
namespace: cozy-vertical-pod-autoscaler
|
||||||
|
privileged: true
|
||||||
|
dependsOn: [cilium, kubeovn]
|
||||||
|
|
||||||
- name: reloader
|
- name: reloader
|
||||||
releaseName: reloader
|
releaseName: reloader
|
||||||
chart: cozy-reloader
|
chart: cozy-reloader
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ releases:
|
|||||||
chart: cozy-monitoring-agents
|
chart: cozy-monitoring-agents
|
||||||
namespace: cozy-monitoring
|
namespace: cozy-monitoring
|
||||||
privileged: true
|
privileged: true
|
||||||
dependsOn: [victoria-metrics-operator]
|
dependsOn: [victoria-metrics-operator, vertical-pod-autoscaler-crds]
|
||||||
values:
|
values:
|
||||||
scrapeRules:
|
scrapeRules:
|
||||||
etcd:
|
etcd:
|
||||||
@@ -103,13 +103,13 @@ releases:
|
|||||||
releaseName: kafka-operator
|
releaseName: kafka-operator
|
||||||
chart: cozy-kafka-operator
|
chart: cozy-kafka-operator
|
||||||
namespace: cozy-kafka-operator
|
namespace: cozy-kafka-operator
|
||||||
dependsOn: []
|
dependsOn: [victoria-metrics-operator]
|
||||||
|
|
||||||
- name: clickhouse-operator
|
- name: clickhouse-operator
|
||||||
releaseName: clickhouse-operator
|
releaseName: clickhouse-operator
|
||||||
chart: cozy-clickhouse-operator
|
chart: cozy-clickhouse-operator
|
||||||
namespace: cozy-clickhouse-operator
|
namespace: cozy-clickhouse-operator
|
||||||
dependsOn: []
|
dependsOn: [victoria-metrics-operator]
|
||||||
|
|
||||||
- name: rabbitmq-operator
|
- name: rabbitmq-operator
|
||||||
releaseName: rabbitmq-operator
|
releaseName: rabbitmq-operator
|
||||||
@@ -168,7 +168,10 @@ releases:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
frontend:
|
||||||
|
resourcesPreset: "none"
|
||||||
dashboard:
|
dashboard:
|
||||||
|
resourcesPreset: "none"
|
||||||
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
{{- $cozystackBranding:= lookup "v1" "ConfigMap" "cozy-system" "cozystack-branding" }}
|
||||||
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
{{- $branding := dig "data" "branding" "" $cozystackBranding }}
|
||||||
{{- if $branding }}
|
{{- if $branding }}
|
||||||
@@ -254,3 +257,10 @@ releases:
|
|||||||
namespace: cozy-vertical-pod-autoscaler
|
namespace: cozy-vertical-pod-autoscaler
|
||||||
privileged: true
|
privileged: true
|
||||||
dependsOn: [monitoring-agents]
|
dependsOn: [monitoring-agents]
|
||||||
|
|
||||||
|
- name: vertical-pod-autoscaler-crds
|
||||||
|
releaseName: vertical-pod-autoscaler-crds
|
||||||
|
chart: cozy-vertical-pod-autoscaler-crds
|
||||||
|
namespace: cozy-vertical-pod-autoscaler
|
||||||
|
privileged: true
|
||||||
|
dependsOn: [cilium, kubeovn]
|
||||||
|
|||||||
@@ -7,12 +7,23 @@
|
|||||||
|
|
||||||
{{/* collect dependency namespaces from releases */}}
|
{{/* collect dependency namespaces from releases */}}
|
||||||
{{- range $x := $bundle.releases }}
|
{{- range $x := $bundle.releases }}
|
||||||
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
|
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- range $x := $bundle.releases }}
|
{{- range $x := $bundle.releases }}
|
||||||
{{- if not (has $x.name $disabledComponents) }}
|
|
||||||
{{- if or (not $x.optional) (and ($x.optional) (has $x.name $enabledComponents)) }}
|
{{- $shouldInstall := true }}
|
||||||
|
{{- $shouldDelete := false }}
|
||||||
|
{{- if or (has $x.name $disabledComponents) (and ($x.optional) (not (has $x.name $enabledComponents))) }}
|
||||||
|
{{- $shouldInstall = false }}
|
||||||
|
{{- if $.Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2" }}
|
||||||
|
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" $x.namespace $x.name }}
|
||||||
|
{{- $shouldDelete = true }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- if or $shouldInstall $shouldDelete }}
|
||||||
---
|
---
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2
|
apiVersion: helm.toolkit.fluxcd.io/v2
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
@@ -22,6 +33,9 @@ metadata:
|
|||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
cozystack.io/system-app: "true"
|
cozystack.io/system-app: "true"
|
||||||
|
{{- if $shouldDelete }}
|
||||||
|
cozystack.io/marked-for-deletion: "true"
|
||||||
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
interval: 5m
|
interval: 5m
|
||||||
releaseName: {{ $x.releaseName | default $x.name }}
|
releaseName: {{ $x.releaseName | default $x.name }}
|
||||||
@@ -47,10 +61,10 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $values := dict }}
|
{{- $values := dict }}
|
||||||
{{- with $x.values }}
|
{{- with $x.values }}
|
||||||
{{- $values = merge . $values }}
|
{{- $values = merge . $values }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with index $cozyConfig.data (printf "values-%s" $x.name) }}
|
{{- with index $cozyConfig.data (printf "values-%s" $x.name) }}
|
||||||
{{- $values = merge (fromYaml .) $values }}
|
{{- $values = merge (fromYaml .) $values }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- with $values }}
|
{{- with $values }}
|
||||||
values:
|
values:
|
||||||
@@ -70,13 +84,12 @@ spec:
|
|||||||
|
|
||||||
{{- with $x.dependsOn }}
|
{{- with $x.dependsOn }}
|
||||||
dependsOn:
|
dependsOn:
|
||||||
{{- range $dep := . }}
|
{{- range $dep := . }}
|
||||||
{{- if not (has $dep $disabledComponents) }}
|
{{- if not (has $dep $disabledComponents) }}
|
||||||
- name: {{ $dep }}
|
- name: {{ $dep }}
|
||||||
namespace: {{ index $dependencyNamespaces $dep }}
|
namespace: {{ index $dependencyNamespaces $dep }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
|
||||||
|
|||||||
@@ -2,6 +2,9 @@ NAMESPACE=cozy-e2e-tests
|
|||||||
NAME := sandbox
|
NAME := sandbox
|
||||||
CLEAN := 1
|
CLEAN := 1
|
||||||
TESTING_APPS := $(shell find ../../apps -maxdepth 1 -mindepth 1 -type d | awk -F/ '{print $$NF}')
|
TESTING_APPS := $(shell find ../../apps -maxdepth 1 -mindepth 1 -type d | awk -F/ '{print $$NF}')
|
||||||
|
SANDBOX_NAME := cozy-e2e-sandbox-$(shell echo "$$(hostname):$$(pwd)" | sha256sum | cut -c -6)
|
||||||
|
|
||||||
|
ROOT_DIR = $(dir $(abspath $(firstword $(MAKEFILE_LIST))/../../..))
|
||||||
|
|
||||||
include ../../../scripts/common-envs.mk
|
include ../../../scripts/common-envs.mk
|
||||||
|
|
||||||
@@ -24,7 +27,6 @@ image-e2e-sandbox:
|
|||||||
--provenance false \
|
--provenance false \
|
||||||
--tag $(REGISTRY)/e2e-sandbox:$(call settag,$(TAG)) \
|
--tag $(REGISTRY)/e2e-sandbox:$(call settag,$(TAG)) \
|
||||||
--cache-from type=registry,ref=$(REGISTRY)/e2e-sandbox:latest \
|
--cache-from type=registry,ref=$(REGISTRY)/e2e-sandbox:latest \
|
||||||
--platform linux/amd64,linux/arm64 \
|
|
||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
--metadata-file images/e2e-sandbox.json \
|
--metadata-file images/e2e-sandbox.json \
|
||||||
--push=$(PUSH) \
|
--push=$(PUSH) \
|
||||||
@@ -34,27 +36,20 @@ image-e2e-sandbox:
|
|||||||
yq -i '.e2e.image = strenv(IMAGE)' values.yaml
|
yq -i '.e2e.image = strenv(IMAGE)' values.yaml
|
||||||
rm -f images/e2e-sandbox.json
|
rm -f images/e2e-sandbox.json
|
||||||
|
|
||||||
copy-hack-dir:
|
test: ## Run the end-to-end tests in existing sandbox.
|
||||||
tar -C ../../../ -cf- hack | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- tar -xf-
|
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/e2e.sh'
|
||||||
|
|
||||||
copy-image:
|
test-applications: ## Run the end-to-end tests in existing sandbox for applications.
|
||||||
cat ../../../_out/assets/nocloud-amd64.raw.xz | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -xec 'xz --decompress > /nocloud-amd64.raw'
|
|
||||||
|
|
||||||
test: wait-for-sandbox copy-hack-dir copy-image ## Run the end-to-end tests in existing sandbox.
|
|
||||||
helm template -n cozy-system installer ../installer | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'cat > /cozystack-installer.yaml'
|
|
||||||
kubectl exec -ti -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'export COZYSTACK_INSTALLER_YAML=$$(cat /cozystack-installer.yaml) && /hack/e2e.sh'
|
|
||||||
|
|
||||||
test-applications: wait-for-sandbox copy-hack-dir ## Run the end-to-end tests in existing sandbox for applications.
|
|
||||||
for app in $(TESTING_APPS); do \
|
for app in $(TESTING_APPS); do \
|
||||||
kubectl exec -ti -n cozy-e2e-tests deploy/cozystack-e2e-sandbox -- bash -c "/hack/e2e.application.sh $${app}"; \
|
docker exec ${SANDBOX_NAME} bash -c "/hack/e2e.application.sh $${app}"; \
|
||||||
done
|
done
|
||||||
kubectl exec -ti -n cozy-e2e-tests deploy/cozystack-e2e-sandbox -- bash -c "kubectl get hr -A | grep -v 'True'"
|
docker exec ${SANDBOX_NAME} bash -c "kubectl get hr -A | grep -v 'True'"
|
||||||
|
|
||||||
delete: ## Remove sandbox from existing Kubernetes cluster.
|
delete: ## Remove sandbox from existing Kubernetes cluster.
|
||||||
kubectl delete deploy -n $(NAMESPACE) cozystack-e2e-$(NAME)
|
docker rm -f "${SANDBOX_NAME}" || true
|
||||||
|
|
||||||
exec: ## Opens an interactive shell in the sandbox container.
|
exec: ## Opens an interactive shell in the sandbox container.
|
||||||
kubectl exec -ti -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- bash
|
docker exec -ti "${SANDBOX_NAME}" -- bash
|
||||||
|
|
||||||
proxy: sync-hosts ## Enable a SOCKS5 proxy server; mirrord and gost must be installed.
|
proxy: sync-hosts ## Enable a SOCKS5 proxy server; mirrord and gost must be installed.
|
||||||
mirrord exec --target deploy/cozystack-e2e-sandbox --target-namespace cozy-e2e-tests -- gost -L=127.0.0.1:10080
|
mirrord exec --target deploy/cozystack-e2e-sandbox --target-namespace cozy-e2e-tests -- gost -L=127.0.0.1:10080
|
||||||
@@ -65,6 +60,6 @@ login: ## Downloads the kubeconfig into a temporary directory and runs a shell w
|
|||||||
sync-hosts:
|
sync-hosts:
|
||||||
kubectl exec -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'kubectl get ing -A -o go-template='\''{{ "127.0.0.1 localhost\n"}}{{ range .items }}{{ range .status.loadBalancer.ingress }}{{ .ip }}{{ end }} {{ range .spec.rules }}{{ .host }}{{ end }}{{ "\n" }}{{ end }}'\'' > /etc/hosts'
|
kubectl exec -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'kubectl get ing -A -o go-template='\''{{ "127.0.0.1 localhost\n"}}{{ range .items }}{{ range .status.loadBalancer.ingress }}{{ .ip }}{{ end }} {{ range .spec.rules }}{{ .host }}{{ end }}{{ "\n" }}{{ end }}'\'' > /etc/hosts'
|
||||||
|
|
||||||
wait-for-sandbox:
|
apply: delete
|
||||||
kubectl wait deploy --for=condition=Progressing -n $(NAMESPACE) cozystack-e2e-$(NAME)
|
docker run -d --rm --name "${SANDBOX_NAME}" --privileged "$$(yq .e2e.image values.yaml)" sleep infinity
|
||||||
kubectl wait pod --for=condition=Ready -n $(NAMESPACE) -l app=cozystack-e2e-$(NAME)
|
docker cp "${ROOT_DIR}" "${SANDBOX_NAME}":/workspace
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
FROM ubuntu:22.04
|
FROM ubuntu:22.04
|
||||||
|
|
||||||
ARG KUBECTL_VERSION=1.32.0
|
ARG KUBECTL_VERSION=1.32.0
|
||||||
ARG TALOSCTL_VERSION=1.8.4
|
ARG TALOSCTL_VERSION=1.9.5
|
||||||
ARG HELM_VERSION=3.16.4
|
ARG HELM_VERSION=3.16.4
|
||||||
|
|
||||||
RUN apt-get update
|
RUN apt-get update
|
||||||
RUN apt-get -y install genisoimage qemu-kvm qemu-utils iproute2 iptables wget xz-utils netcat curl jq
|
RUN apt-get -y install genisoimage qemu-kvm qemu-utils iproute2 iptables wget xz-utils netcat curl jq make git
|
||||||
RUN curl -LO "https://github.com/siderolabs/talos/releases/download/v${TALOSCTL_VERSION}/talosctl-linux-amd64" \
|
RUN curl -LO "https://github.com/siderolabs/talos/releases/download/v${TALOSCTL_VERSION}/talosctl-linux-amd64" \
|
||||||
&& chmod +x talosctl-linux-amd64 \
|
&& chmod +x talosctl-linux-amd64 \
|
||||||
&& mv talosctl-linux-amd64 /usr/local/bin/talosctl
|
&& mv talosctl-linux-amd64 /usr/local/bin/talosctl
|
||||||
@@ -14,3 +14,4 @@ RUN curl -LO "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/amd64/kube
|
|||||||
&& mv kubectl /usr/local/bin/kubectl
|
&& mv kubectl /usr/local/bin/kubectl
|
||||||
RUN curl -sSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -s - --version "v${HELM_VERSION}"
|
RUN curl -sSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -s - --version "v${HELM_VERSION}"
|
||||||
RUN wget https://github.com/mikefarah/yq/releases/download/v4.44.3/yq_linux_amd64 -O /usr/local/bin/yq && chmod +x /usr/local/bin/yq
|
RUN wget https://github.com/mikefarah/yq/releases/download/v4.44.3/yq_linux_amd64 -O /usr/local/bin/yq && chmod +x /usr/local/bin/yq
|
||||||
|
RUN curl -s https://fluxcd.io/install.sh | bash
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
pod-security.kubernetes.io/enforce: privileged
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: cozystack-e2e-{{ .Release.Name }}
|
|
||||||
namespace: cozy-e2e-tests
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: cozystack-e2e-{{ .Release.Name }}
|
|
||||||
strategy:
|
|
||||||
type: Recreate
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: cozystack-e2e-{{ .Release.Name }}
|
|
||||||
spec:
|
|
||||||
automountServiceAccountToken: false
|
|
||||||
terminationGracePeriodSeconds: 1
|
|
||||||
containers:
|
|
||||||
- name: sandbox
|
|
||||||
image: "{{ .Values.e2e.image }}"
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
env:
|
|
||||||
- name: KUBECONFIG
|
|
||||||
value: /kubeconfig
|
|
||||||
- name: TALOSCONFIG
|
|
||||||
value: /talosconfig
|
|
||||||
command:
|
|
||||||
- sleep
|
|
||||||
- infinity
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMInstance
|
|
||||||
metadata:
|
|
||||||
name: srv1
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
instanceProfile: ubuntu
|
|
||||||
instanceType: u1.xlarge
|
|
||||||
running: true
|
|
||||||
disks:
|
|
||||||
- name: srv1-system
|
|
||||||
- name: srv1-data
|
|
||||||
---
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMDisk
|
|
||||||
metadata:
|
|
||||||
name: srv1-system
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
optical: false
|
|
||||||
source:
|
|
||||||
http:
|
|
||||||
url: https://github.com/cozystack/cozystack/releases/download/v0.28.2/nocloud-amd64.raw.xz
|
|
||||||
storage: 10Gi
|
|
||||||
storageClass: local
|
|
||||||
---
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMDisk
|
|
||||||
metadata:
|
|
||||||
name: srv1-data
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
optical: false
|
|
||||||
source: {}
|
|
||||||
storage: 100Gi
|
|
||||||
storageClass: local
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMInstance
|
|
||||||
metadata:
|
|
||||||
name: srv2
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
instanceProfile: ubuntu
|
|
||||||
instanceType: u1.xlarge
|
|
||||||
running: true
|
|
||||||
disks:
|
|
||||||
- name: srv2-system
|
|
||||||
- name: srv2-data
|
|
||||||
---
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMDisk
|
|
||||||
metadata:
|
|
||||||
name: srv2-system
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
optical: false
|
|
||||||
source:
|
|
||||||
http:
|
|
||||||
url: https://github.com/cozystack/cozystack/releases/download/v0.28.2/nocloud-amd64.raw.xz
|
|
||||||
storage: 10Gi
|
|
||||||
storageClass: local
|
|
||||||
---
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMDisk
|
|
||||||
metadata:
|
|
||||||
name: srv2-data
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
optical: false
|
|
||||||
source: {}
|
|
||||||
storage: 100Gi
|
|
||||||
storageClass: local
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMInstance
|
|
||||||
metadata:
|
|
||||||
name: srv3
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
instanceProfile: ubuntu
|
|
||||||
instanceType: u1.xlarge
|
|
||||||
running: true
|
|
||||||
disks:
|
|
||||||
- name: srv3-system
|
|
||||||
- name: srv3-data
|
|
||||||
---
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMDisk
|
|
||||||
metadata:
|
|
||||||
name: srv3-system
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
optical: false
|
|
||||||
source:
|
|
||||||
http:
|
|
||||||
url: https://github.com/cozystack/cozystack/releases/download/v0.28.2/nocloud-amd64.raw.xz
|
|
||||||
storage: 10Gi
|
|
||||||
storageClass: local
|
|
||||||
---
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: VMDisk
|
|
||||||
metadata:
|
|
||||||
name: srv3-data
|
|
||||||
namespace: tenant-testing
|
|
||||||
spec:
|
|
||||||
optical: false
|
|
||||||
source: {}
|
|
||||||
storage: 100Gi
|
|
||||||
storageClass: local
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: Tenant
|
|
||||||
metadata:
|
|
||||||
name: testing
|
|
||||||
namespace: tenant-root
|
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
e2e:
|
e2e:
|
||||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.28.0@sha256:bb5e8f5d92e2e4305ea1cc7f007b3e98769645ab845f632b4788b9373cd207eb
|
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.30.2@sha256:31273d6b42dc88c2be2ff9ba64564d1b12e70ae8a5480953341b0d113ac7d4bd
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/matchbox:v0.28.0@sha256:b2002815727b71e2657a6f5b8ed558cc38fc21e81a39b9699266e558be03561f
|
ghcr.io/cozystack/cozystack/matchbox:v0.30.2@sha256:307d382f75f1dcb39820c73b93b2ce576cdb6d58032679bda7d926999c677900
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user