mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-29 10:18:54 +00:00
Compare commits
1 Commits
0.30.1
...
upd-testin
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24c8f4012d |
9
.github/workflows/pre-commit.yml
vendored
9
.github/workflows/pre-commit.yml
vendored
@@ -1,12 +1,7 @@
|
||||
name: Pre-Commit Checks
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
96
.github/workflows/pull-requests-release.yaml
vendored
96
.github/workflows/pull-requests-release.yaml
vendored
@@ -1,96 +0,0 @@
|
||||
name: Releasing PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
name: Test Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
finalize:
|
||||
name: Finalize Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const match = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
|
||||
if (!match) {
|
||||
core.setFailed(`Branch '${branch}' does not match expected format 'release-X.Y.Z[-suffix]'`);
|
||||
} else {
|
||||
const tag = match[1];
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Extracted tag: ${tag}`);
|
||||
}
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merged commit
|
||||
run: |
|
||||
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
const release = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!release) {
|
||||
throw new Error(`Draft release with tag ${tag} not found`);
|
||||
}
|
||||
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: release.id,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`✅ Published release for ${tag}`);
|
||||
39
.github/workflows/pull-requests.yaml
vendored
39
.github/workflows/pull-requests.yaml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: make build
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: make test
|
||||
run: |
|
||||
make test
|
||||
162
.github/workflows/tags.yaml
vendored
162
.github/workflows/tags.yaml
vendored
@@ -1,162 +0,0 @@
|
||||
name: Versioned Tag
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
name: Prepare Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
|
||||
if (existing) {
|
||||
core.setOutput('skip', 'true');
|
||||
} else {
|
||||
core.setOutput('skip', 'false');
|
||||
}
|
||||
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GIT_AUTHOR_NAME: ${{ github.actor }}
|
||||
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
|
||||
run: |
|
||||
git config user.name "$GIT_AUTHOR_NAME"
|
||||
git config user.email "$GIT_AUTHOR_EMAIL"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH_NAME"
|
||||
git push origin "$BRANCH_NAME" --force
|
||||
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const branch = `release-${version}`;
|
||||
const base = 'main';
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${branch}`,
|
||||
base
|
||||
});
|
||||
|
||||
if (prs.data.length === 0) {
|
||||
const newPr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: branch,
|
||||
base: base,
|
||||
title: `Release v${version}`,
|
||||
body:
|
||||
`This PR prepares the release \`v${version}\`.\n` +
|
||||
`(Please merge it before releasing draft)`,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`Created pull request #${newPr.data.number} from ${branch} to ${base}`);
|
||||
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: newPr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
|
||||
} else {
|
||||
console.log(`Pull request already exists from ${branch} to ${base}`);
|
||||
}
|
||||
|
||||
- name: Create or reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: create_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
let release = releases.data.find(r => r.tag_name === tag);
|
||||
if (!release) {
|
||||
release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: `${tag}`,
|
||||
draft: true,
|
||||
prerelease: false
|
||||
});
|
||||
}
|
||||
core.setOutput('upload_url', release.upload_url);
|
||||
|
||||
- name: Build assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make assets
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Delete pushed tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
git push --delete origin ${GITHUB_REF#refs/tags/}
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
@@ -6,13 +6,13 @@ As you get started, you are in the best position to give us feedbacks on areas o
|
||||
|
||||
* Problems found while setting up the development environment
|
||||
* Gaps in our documentation
|
||||
* Bugs in our GitHub actions
|
||||
* Bugs in our Github actions
|
||||
|
||||
First, though, it is important that you read the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
The guidelines below are a starting point. We don't want to limit your
|
||||
creativity, passion, and initiative. If you think there's a better way, please
|
||||
feel free to bring it up in a GitHub discussion, or open a pull request. We're
|
||||
feel free to bring it up in a Github discussion, or open a pull request. We're
|
||||
certain there are always better ways to do things, we just need to start some
|
||||
constructive dialogue!
|
||||
|
||||
@@ -23,9 +23,9 @@ We welcome many types of contributions including:
|
||||
* New features
|
||||
* Builds, CI/CD
|
||||
* Bug fixes
|
||||
* [Documentation](https://GitHub.com/cozystack/cozystack-website/tree/main)
|
||||
* [Documentation](https://github.com/cozystack/cozystack-website/tree/main)
|
||||
* Issue Triage
|
||||
* Answering questions on Slack or GitHub Discussions
|
||||
* Answering questions on Slack or Github Discussions
|
||||
* Web design
|
||||
* Communications / Social Media / Blog Posts
|
||||
* Events participation
|
||||
@@ -34,7 +34,7 @@ We welcome many types of contributions including:
|
||||
## Ask for Help
|
||||
|
||||
The best way to reach us with a question when contributing is to drop a line in
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new GitHub discussion.
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new Github discussion.
|
||||
|
||||
## Raising Issues
|
||||
|
||||
|
||||
23
Makefile
23
Makefile
@@ -1,13 +1,6 @@
|
||||
.PHONY: manifests repos assets
|
||||
|
||||
build-deps:
|
||||
@command -V find docker skopeo jq gh helm > /dev/null
|
||||
@yq --version | grep -q "mikefarah" || (echo "mikefarah/yq is required" && exit 1)
|
||||
@tar --version | grep -q GNU || (echo "GNU tar is required" && exit 1)
|
||||
@sed --version | grep -q GNU || (echo "GNU sed is required" && exit 1)
|
||||
@awk --version | grep -q GNU || (echo "GNU awk is required" && exit 1)
|
||||
|
||||
build: build-deps
|
||||
build:
|
||||
make -C packages/apps/http-cache image
|
||||
make -C packages/apps/postgres image
|
||||
make -C packages/apps/mysql image
|
||||
@@ -26,6 +19,10 @@ build: build-deps
|
||||
make -C packages/core/installer image
|
||||
make manifests
|
||||
|
||||
manifests:
|
||||
mkdir -p _out/assets
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||
|
||||
repos:
|
||||
rm -rf _out
|
||||
make -C packages/apps check-version-map
|
||||
@@ -36,21 +33,17 @@ repos:
|
||||
mkdir -p _out/logos
|
||||
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
|
||||
|
||||
|
||||
manifests:
|
||||
mkdir -p _out/assets
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||
|
||||
assets:
|
||||
make -C packages/core/installer/ assets
|
||||
|
||||
test:
|
||||
test -f _out/assets/nocloud-amd64.raw.xz || make -C packages/core/installer talos-nocloud
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
#make -C packages/core/testing test-applications
|
||||
make -C packages/core/testing test-applications
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
upload_assets: manifests
|
||||
upload_assets: assets
|
||||
hack/upload-assets.sh
|
||||
|
||||
33
README.md
33
README.md
@@ -12,21 +12,20 @@
|
||||
|
||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||
|
||||
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
With Cozystack, you can transform your bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters, Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
|
||||
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||
You can use Cozystack to build your own cloud or to provide a cost-effective development environments.
|
||||
|
||||
## Use-Cases
|
||||
|
||||
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||
You can use Cozystack as a backend for a public cloud
|
||||
* [**Using Cozystack to build public cloud**](https://cozystack.io/docs/use-cases/public-cloud/)
|
||||
You can use Cozystack as backend for a public cloud
|
||||
|
||||
* [**Using Cozystack to build a private cloud**](https://cozystack.io/docs/guides/use-cases/private-cloud/)
|
||||
You can use Cozystack as a platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
* [**Using Cozystack to build private cloud**](https://cozystack.io/docs/use-cases/private-cloud/)
|
||||
You can use Cozystack as platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
|
||||
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
* [**Using Cozystack as Kubernetes distribution**](https://cozystack.io/docs/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as Kubernetes distribution for Bare Metal
|
||||
|
||||
## Screenshot
|
||||
|
||||
@@ -34,11 +33,11 @@ You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is located on the [cozystack.io](https://cozystack.io) website.
|
||||
The documentation is located on official [cozystack.io](https://cozystack.io) website.
|
||||
|
||||
Read the [Getting Started](https://cozystack.io/docs/getting-started/) section for a quick start.
|
||||
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
||||
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/operations/troubleshooting/) and work your way through the process that we've outlined.
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/troubleshooting/), and work your way through the process that we've outlined.
|
||||
|
||||
## Versioning
|
||||
|
||||
@@ -51,15 +50,15 @@ A full list of the available releases is available in the GitHub repository's [R
|
||||
|
||||
Contributions are highly appreciated and very welcomed!
|
||||
|
||||
In case of bugs, please check if the issue has already been opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||
If it isn't, you can open a new one. A detailed report will help us replicate it, assess it, and work on a fix.
|
||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||
|
||||
You can express your intention to on the fix on your own.
|
||||
You can express your intention in working on the fix on your own.
|
||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||
|
||||
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
|
||||
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -178,15 +178,6 @@ func main() {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.WorkloadReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
|
||||
16
hack/e2e.sh
16
hack/e2e.sh
@@ -84,7 +84,7 @@ done
|
||||
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
@@ -113,11 +113,6 @@ machine:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
@@ -318,12 +313,7 @@ kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"s
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||
|
||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||
fi
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
||||
|
||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||
"dashboard": true
|
||||
@@ -338,7 +328,7 @@ kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
|
||||
@@ -19,19 +19,21 @@ fi
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||
|
||||
# search accross all tags sorted by version
|
||||
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||
search_commits=$(git ls-remote --tags origin | grep 'refs/tags/v' | sort -k2,2 -rV | awk '{print $1}')
|
||||
# add latest main commit to search
|
||||
search_commits="${search_commits} $(git rev-parse "origin/main")"
|
||||
|
||||
resolved_miss_map=$(
|
||||
echo "$miss_map" | while read -r chart version commit; do
|
||||
# if version is found in HEAD, it's HEAD
|
||||
if [ $(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml) = "${version}" ]; then
|
||||
if grep -q "^version: $version$" ./${chart}/Chart.yaml; then
|
||||
echo "$chart $version HEAD"
|
||||
continue
|
||||
fi
|
||||
|
||||
# if commit is not HEAD, check if it's valid
|
||||
if [ $commit != "HEAD" ]; then
|
||||
if [ $(git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') != "${version}" ]; then
|
||||
if ! git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | grep -q "^version: $version$"; then
|
||||
echo "Commit $commit for $chart $version is not valid" >&2
|
||||
exit 1
|
||||
fi
|
||||
@@ -44,15 +46,15 @@ resolved_miss_map=$(
|
||||
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||
found_tag=""
|
||||
for tag in $search_commits; do
|
||||
if [ $(git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') = "${version}" ]; then
|
||||
if git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | grep -q "^version: $version$"; then
|
||||
found_tag=$(git rev-parse --short "${tag}")
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$found_tag" ]; then
|
||||
echo "Can't find $chart $version in any version tag, removing it" >&2
|
||||
continue
|
||||
echo "Can't find $chart $version in any version tag or in the latest main commit" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$chart $version $found_tag"
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
version=${VERSION:-$(git describe --tags)}
|
||||
|
||||
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/kernel-amd64
|
||||
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
|
||||
version=$(git describe --tags)
|
||||
gh release upload $version _out/assets/cozystack-installer.yaml
|
||||
gh release upload $version _out/assets/metal-amd64.iso
|
||||
gh release upload $version _out/assets/metal-amd64.raw.xz
|
||||
gh release upload $version _out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
type WorkloadReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
err := r.Get(ctx, req.NamespacedName, w)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "Unable to fetch Workload")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// it's being deleted, nothing to handle
|
||||
if w.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
if err == nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// error getting object but not 404 -- requeue
|
||||
if !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
// Watch WorkloadMonitor objects
|
||||
For(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
if strings.HasPrefix(w.Name, "pvc-") {
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
if strings.HasPrefix(w.Name, "svc-") {
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = w.Name
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -34,17 +33,6 @@ type WorkloadMonitorReconciler struct {
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch
|
||||
|
||||
// isServiceReady checks if the service has an external IP bound
|
||||
func (r *WorkloadMonitorReconciler) isServiceReady(svc *corev1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||
}
|
||||
|
||||
// isPVCReady checks if the PVC is bound
|
||||
func (r *WorkloadMonitorReconciler) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool {
|
||||
return pvc.Status.Phase == corev1.ClaimBound
|
||||
}
|
||||
|
||||
// isPodReady checks if the Pod is in the Ready condition.
|
||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||
@@ -100,96 +88,6 @@ func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
||||
obj.SetOwnerReferences(owners)
|
||||
}
|
||||
|
||||
// reconcileServiceForMonitor creates or updates a Workload object for the given Service and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
svc corev1.Service,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%s", svc.Name),
|
||||
Namespace: svc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
q := resource.MustParse("0")
|
||||
|
||||
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||
if ing.IP != "" {
|
||||
q.Add(resource.MustParse("1"))
|
||||
}
|
||||
}
|
||||
|
||||
resources["public-ips"] = q
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = svc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isServiceReady(&svc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePVCForMonitor creates or updates a Workload object for the given PVC and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
pvc corev1.PersistentVolumeClaim,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pvc-%s", pvc.Name),
|
||||
Namespace: pvc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||
resources[resourceName.String()] = resourceQuantity
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = pvc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isPVCReady(&pvc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
ctx context.Context,
|
||||
@@ -307,45 +205,6 @@ func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
}
|
||||
}
|
||||
|
||||
pvcList := &corev1.PersistentVolumeClaimList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
pvcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list PVCs for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, pvc := range pvcList.Items {
|
||||
if err := r.reconcilePVCForMonitor(ctx, monitor, pvc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for PVC", "PVC", pvc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
svcList := &corev1.ServiceList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
svcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list Services for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, svc := range svcList.Items {
|
||||
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
|
||||
continue
|
||||
}
|
||||
if err := r.reconcileServiceForMonitor(ctx, monitor, svc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for Service", "Service", svc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Update WorkloadMonitor status based on observed pods
|
||||
monitor.Status.ObservedReplicas = observedReplicas
|
||||
monitor.Status.AvailableReplicas = availableReplicas
|
||||
@@ -374,51 +233,41 @@ func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||
Watches(
|
||||
&corev1.Pod{},
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.Pod{}, r.Client)),
|
||||
).
|
||||
// Watch PVCs as well
|
||||
Watches(
|
||||
&corev1.PersistentVolumeClaim{},
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.PersistentVolumeClaim{}, r.Client)),
|
||||
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
pod, ok := obj.(*corev1.Pod)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}),
|
||||
).
|
||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||
Owns(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
concrete, ok := obj.(T)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := c.List(ctx, &monitorList, client.InNamespace(concrete.GetNamespace())); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
labels := concrete.GetLabels()
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if labelVal, exists := labels[k]; !exists || labelVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.6.2@sha256:67dd53efa86b704fc5cb876aca055fef294b31ab67899b683a4821ea12582ea7
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.9.0@sha256:2b6ba87f5688a439bd2ac12835a5ab9e601feb15c0c44ed0d9ca48cec7c52521
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:bef7344da098c4dc400a9e20ffad10ac991df67d09a30026207454abbc91f28b
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.3.1@sha256:2b82eae28239ca0f9968602c69bbb752cd2a5818e64934ccd06cb91d95d019c7
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.17.1
|
||||
version: 0.17.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.17.1@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.15.2@sha256:967e51702102d0dbd97f9847de4159d62681b31eb606322d2c29755393c2236e
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.17.1@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:latest@sha256:47ad85a2bb2b11818df85e80cbc6e07021e97e429d5bb020ce8db002b37a77f1
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.17.1@sha256:d1346d59224e6d2d07f1551af918ed31e57ba84b750122c1aeceaf9b33dd2271
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.15.2@sha256:cb4ab74099662f73e058f7c7495fb403488622c3425c06ad23b687bfa8bc805b
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:bc08ea0ced2cb7dd98b26d72a9462fc0a3863adb908a5effbfcdf7227656ea65
|
||||
|
||||
@@ -85,7 +85,7 @@ kamajiControlPlane:
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "small"
|
||||
resourcesPreset: "micro"
|
||||
|
||||
controllerManager:
|
||||
## @param kamajiControlPlane.controllerManager.resources Resources
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.6.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.5.3@sha256:8ca1fb01e880d351ee7d984a0b437c1142836963cd079986156ed28750067138
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.9.0@sha256:2b6ba87f5688a439bd2ac12835a5ab9e601feb15c0c44ed0d9ca48cec7c52521
|
||||
|
||||
@@ -56,8 +56,7 @@ kubernetes 0.15.0 4e68e65c
|
||||
kubernetes 0.15.1 160e4e2a
|
||||
kubernetes 0.15.2 8267072d
|
||||
kubernetes 0.16.0 077045b0
|
||||
kubernetes 0.17.0 1fbbfcd0
|
||||
kubernetes 0.17.1 HEAD
|
||||
kubernetes 0.17.0 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
@@ -140,18 +139,15 @@ virtual-machine 0.7.0 e23286a3
|
||||
virtual-machine 0.7.1 0ab39f20
|
||||
virtual-machine 0.8.0 3fa4dd3a
|
||||
virtual-machine 0.8.1 93c46161
|
||||
virtual-machine 0.8.2 de19450f
|
||||
virtual-machine 0.9.0 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 HEAD
|
||||
virtual-machine 0.8.2 HEAD
|
||||
vm-disk 0.1.0 HEAD
|
||||
vm-instance 0.1.0 1ec10165
|
||||
vm-instance 0.2.0 84f3ccc0
|
||||
vm-instance 0.3.0 4e68e65c
|
||||
vm-instance 0.4.0 e23286a3
|
||||
vm-instance 0.4.1 0ab39f20
|
||||
vm-instance 0.5.0 3fa4dd3a
|
||||
vm-instance 0.5.1 de19450f
|
||||
vm-instance 0.6.0 HEAD
|
||||
vm-instance 0.5.1 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.9.0
|
||||
version: 0.8.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.9.0
|
||||
appVersion: "0.8.2"
|
||||
|
||||
@@ -2,7 +2,6 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.gpus.items.type = "object" | .properties.gpus.default = []' values.schema.json
|
||||
INSTANCE_TYPES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/instancetypes.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
&& yq -i -o json ".properties.instanceType.optional=true | .properties.instanceType.enum = $${INSTANCE_TYPES}" values.schema.json
|
||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
|
||||
@@ -36,23 +36,22 @@ virtctl ssh <user>@<vm>
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine preferences profile | `ubuntu` |
|
||||
| `systemDisk.image` | The base image for the virtual machine. Allowed values: `ubuntu`, `cirros`, `alpine`, `fedora` and `talos` | `ubuntu` |
|
||||
| `systemDisk.storage` | The size of the disk allocated for the virtual machine | `5Gi` |
|
||||
| `systemDisk.storageClass` | StorageClass used to store the data | `replicated` |
|
||||
| `gpus` | List of GPUs to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `""` |
|
||||
| `cloudInitSeed` | A seed string to generate an SMBIOS UUID for the VM. | `""` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
||||
| `systemDisk.image` | The base image for the virtual machine. Allowed values: `ubuntu`, `cirros`, `alpine`, `fedora` and `talos` | `ubuntu` |
|
||||
| `systemDisk.storage` | The size of the disk allocated for the virtual machine | `5Gi` |
|
||||
| `systemDisk.storageClass` | StorageClass used to store the data | `replicated` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
||||
` |
|
||||
|
||||
## U Series
|
||||
|
||||
|
||||
@@ -49,23 +49,3 @@ Selector labels
|
||||
app.kubernetes.io/name: {{ include "virtual-machine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Generate a stable UUID for cloud-init re-initialization upon upgrade.
|
||||
*/}}
|
||||
{{- define "virtual-machine.stableUuid" -}}
|
||||
{{- $source := printf "%s-%s-%s" .Release.Namespace (include "virtual-machine.fullname" .) .Values.cloudInitSeed }}
|
||||
{{- $hash := sha256sum $source }}
|
||||
{{- $uuid := printf "%s-%s-4%s-9%s-%s" (substr 0 8 $hash) (substr 8 12 $hash) (substr 13 16 $hash) (substr 17 20 $hash) (substr 20 32 $hash) }}
|
||||
{{- if eq .Values.cloudInitSeed "" }}
|
||||
{{- /* Try to save previous uuid to not trigger full cloud-init again if user decided to remove the seed. */}}
|
||||
{{- $vmResource := lookup "kubevirt.io/v1" "VirtualMachine" .Release.Namespace (include "virtual-machine.fullname" .) -}}
|
||||
{{- if $vmResource }}
|
||||
{{- $existingUuid := $vmResource | dig "spec" "template" "spec" "domain" "firmware" "uuid" "" }}
|
||||
{{- if $existingUuid }}
|
||||
{{- $uuid = $existingUuid }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $uuid }}
|
||||
{{- end }}
|
||||
|
||||
@@ -68,15 +68,7 @@ spec:
|
||||
requests:
|
||||
memory: {{ .Values.resources.memory | quote }}
|
||||
{{- end }}
|
||||
firmware:
|
||||
uuid: {{ include "virtual-machine.stableUuid" . }}
|
||||
devices:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
- disk:
|
||||
bus: scsi
|
||||
@@ -98,7 +90,6 @@ spec:
|
||||
secret:
|
||||
secretName: {{ include "virtual-machine.fullname" $ }}-ssh-keys
|
||||
propagationMethod:
|
||||
# keys will be injected into metadata part of cloud-init disk
|
||||
noCloud: {}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 30
|
||||
@@ -109,14 +100,8 @@ spec:
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
{{- if .Values.cloudInit }}
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else }}
|
||||
userData: |
|
||||
#cloud-config
|
||||
final_message: Cloud-init user-data was left blank intentionally.
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
networks:
|
||||
- name: default
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
},
|
||||
"instanceProfile": {
|
||||
"type": "string",
|
||||
"description": "Virtual Machine preferences profile",
|
||||
"description": "Virtual Machine prefferences profile",
|
||||
"default": "ubuntu",
|
||||
"optional": true,
|
||||
"enum": [
|
||||
@@ -164,14 +164,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"gpus": {
|
||||
"type": "array",
|
||||
"description": "List of GPUs to attach",
|
||||
"default": [],
|
||||
"items": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -198,12 +190,7 @@
|
||||
"cloudInit": {
|
||||
"type": "string",
|
||||
"description": "cloud-init user data config. See cloud-init documentation for more details.",
|
||||
"default": ""
|
||||
},
|
||||
"cloudInitSeed": {
|
||||
"type": "string",
|
||||
"description": "A seed string to generate an SMBIOS UUID for the VM.",
|
||||
"default": ""
|
||||
"default": "#cloud-config\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ externalPorts:
|
||||
running: true
|
||||
|
||||
## @param instanceType Virtual Machine instance type
|
||||
## @param instanceProfile Virtual Machine preferences profile
|
||||
## @param instanceProfile Virtual Machine prefferences profile
|
||||
##
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
@@ -26,12 +26,6 @@ systemDisk:
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
|
||||
## @param gpus [array] List of GPUs to attach
|
||||
## Example:
|
||||
## gpus:
|
||||
## - name: nvidia.com/GA102GL_A10
|
||||
gpus: []
|
||||
|
||||
## @param resources.cpu The number of CPU cores allocated to the virtual machine
|
||||
## @param resources.memory The amount of memory allocated to the virtual machine
|
||||
resources:
|
||||
@@ -55,13 +49,5 @@ sshKeys: []
|
||||
## password: ubuntu
|
||||
## chpasswd: { expire: False }
|
||||
##
|
||||
cloudInit: ""
|
||||
|
||||
## @param cloudInitSeed A seed string to generate an SMBIOS UUID for the VM.
|
||||
cloudInitSeed: ""
|
||||
## Change it to any new value to force a full cloud-init reconfiguration. Change it when you want to apply
|
||||
## to an existing VM settings that are usually written only once, like new SSH keys or new network configuration.
|
||||
## An empty value does nothing (and the existing UUID is not reverted). Please note that changing this value
|
||||
## does not trigger a VM restart. You must perform the restart separately.
|
||||
## Example:
|
||||
## cloudInitSeed: "upd1"
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
|
||||
@@ -16,10 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.1
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.1.1
|
||||
appVersion: 0.1.0
|
||||
|
||||
@@ -3,9 +3,7 @@ apiVersion: cdi.kubevirt.io/v1beta1
|
||||
kind: DataVolume
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if hasKey .Values.source "upload" }}
|
||||
cdi.kubevirt.io/storage.bind.immediate.requested: ""
|
||||
{{- end }}
|
||||
vm-disk.cozystack.io/optical: "{{ .Values.optical }}"
|
||||
name: {{ .Release.Name }}
|
||||
spec:
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.0
|
||||
version: 0.5.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.6.0
|
||||
appVersion: "0.5.1"
|
||||
|
||||
@@ -3,7 +3,6 @@ include ../../../scripts/package.mk
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.disks.items.type = "object" | .properties.disks.default = []' values.schema.json
|
||||
yq -o json -i '.properties.gpus.items.type = "object" | .properties.gpus.default = []' values.schema.json
|
||||
INSTANCE_TYPES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/instancetypes.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
&& yq -i -o json ".properties.instanceType.optional=true | .properties.instanceType.enum = $${INSTANCE_TYPES}" values.schema.json
|
||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
|
||||
@@ -36,21 +36,20 @@ virtctl ssh <user>@<vm>
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ----------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine preferences profile | `ubuntu` |
|
||||
| `disks` | List of disks to attach | `[]` |
|
||||
| `gpus` | List of GPUs to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `""` |
|
||||
| `cloudInitSeed` | A seed string to generate an SMBIOS UUID for the VM. | `""` |
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
||||
| `disks` | List of disks to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
||||
` |
|
||||
|
||||
## U Series
|
||||
|
||||
|
||||
@@ -49,23 +49,3 @@ Selector labels
|
||||
app.kubernetes.io/name: {{ include "virtual-machine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Generate a stable UUID for cloud-init re-initialization upon upgrade.
|
||||
*/}}
|
||||
{{- define "virtual-machine.stableUuid" -}}
|
||||
{{- $source := printf "%s-%s-%s" .Release.Namespace (include "virtual-machine.fullname" .) .Values.cloudInitSeed }}
|
||||
{{- $hash := sha256sum $source }}
|
||||
{{- $uuid := printf "%s-%s-4%s-9%s-%s" (substr 0 8 $hash) (substr 8 12 $hash) (substr 13 16 $hash) (substr 17 20 $hash) (substr 20 32 $hash) }}
|
||||
{{- if eq .Values.cloudInitSeed "" }}
|
||||
{{- /* Try to save previous uuid to not trigger full cloud-init again if user decided to remove the seed. */}}
|
||||
{{- $vmResource := lookup "kubevirt.io/v1" "VirtualMachine" .Release.Namespace (include "virtual-machine.fullname" .) -}}
|
||||
{{- if $vmResource }}
|
||||
{{- $existingUuid := $vmResource | dig "spec" "template" "spec" "domain" "firmware" "uuid" "" }}
|
||||
{{- if $existingUuid }}
|
||||
{{- $uuid = $existingUuid }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $uuid }}
|
||||
{{- end }}
|
||||
|
||||
@@ -22,5 +22,5 @@ spec:
|
||||
kind: virtual-machine
|
||||
type: virtual-machine
|
||||
selector:
|
||||
{{- include "virtual-machine.selectorLabels" . | nindent 4 }}
|
||||
vm.kubevirt.io/name: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{{- if and .Values.instanceType (not (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterInstancetype" "" .Values.instanceType)) }}
|
||||
{{- fail (printf "Specified instanceType does not exist in the cluster: %s" .Values.instanceType) }}
|
||||
{{- fail (printf "Specified instancetype not exists in cluster: %s" .Values.instanceType) }}
|
||||
{{- end }}
|
||||
{{- if and .Values.instanceProfile (not (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterPreference" "" .Values.instanceProfile)) }}
|
||||
{{- fail (printf "Specified instanceProfile does not exist in the cluster: %s" .Values.instanceProfile) }}
|
||||
{{- fail (printf "Specified profile not exists in cluster: %s" .Values.instanceProfile) }}
|
||||
{{- end }}
|
||||
|
||||
apiVersion: kubevirt.io/v1
|
||||
@@ -40,19 +40,11 @@ spec:
|
||||
requests:
|
||||
memory: {{ .Values.resources.memory | quote }}
|
||||
{{- end }}
|
||||
firmware:
|
||||
uuid: {{ include "virtual-machine.stableUuid" . }}
|
||||
devices:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
{{- range $i, $disk := .Values.disks }}
|
||||
- name: disk-{{ $disk.name }}
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||
- name: disk-{{ .name }}
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" .name) }}
|
||||
{{- if $disk }}
|
||||
{{- if and (hasKey $disk.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $disk.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom: {}
|
||||
@@ -83,7 +75,6 @@ spec:
|
||||
secret:
|
||||
secretName: {{ include "virtual-machine.fullname" $ }}-ssh-keys
|
||||
propagationMethod:
|
||||
# keys will be injected into metadata part of cloud-init disk
|
||||
noCloud: {}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 30
|
||||
@@ -96,14 +87,8 @@ spec:
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
{{- if .Values.cloudInit }}
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else }}
|
||||
userData: |
|
||||
#cloud-config
|
||||
final_message: Cloud-init user-data was left blank intentionally.
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
networks:
|
||||
- name: default
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
},
|
||||
"instanceProfile": {
|
||||
"type": "string",
|
||||
"description": "Virtual Machine preferences profile",
|
||||
"description": "Virtual Machine prefferences profile",
|
||||
"default": "ubuntu",
|
||||
"optional": true,
|
||||
"enum": [
|
||||
@@ -145,14 +145,6 @@
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"gpus": {
|
||||
"type": "array",
|
||||
"description": "List of GPUs to attach",
|
||||
"default": [],
|
||||
"items": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -179,12 +171,7 @@
|
||||
"cloudInit": {
|
||||
"type": "string",
|
||||
"description": "cloud-init user data config. See cloud-init documentation for more details.",
|
||||
"default": ""
|
||||
},
|
||||
"cloudInitSeed": {
|
||||
"type": "string",
|
||||
"description": "A seed string to generate an SMBIOS UUID for the VM.",
|
||||
"default": ""
|
||||
"default": "#cloud-config\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ externalPorts:
|
||||
running: true
|
||||
|
||||
## @param instanceType Virtual Machine instance type
|
||||
## @param instanceProfile Virtual Machine preferences profile
|
||||
## @param instanceProfile Virtual Machine prefferences profile
|
||||
##
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
@@ -24,12 +24,6 @@ instanceProfile: ubuntu
|
||||
## - name: example-data
|
||||
disks: []
|
||||
|
||||
## @param gpus [array] List of GPUs to attach
|
||||
## Example:
|
||||
## gpus:
|
||||
## - name: nvidia.com/GA102GL_A10
|
||||
gpus: []
|
||||
|
||||
## @param resources.cpu The number of CPU cores allocated to the virtual machine
|
||||
## @param resources.memory The amount of memory allocated to the virtual machine
|
||||
resources:
|
||||
@@ -53,13 +47,5 @@ sshKeys: []
|
||||
## password: ubuntu
|
||||
## chpasswd: { expire: False }
|
||||
##
|
||||
cloudInit: ""
|
||||
|
||||
## @param cloudInitSeed A seed string to generate an SMBIOS UUID for the VM.
|
||||
cloudInitSeed: ""
|
||||
## Change it to any new value to force a full cloud-init reconfiguration. Change it when you want to apply
|
||||
## to an existing VM settings that are usually written only once, like new SSH keys or new network configuration.
|
||||
## An empty value does nothing (and the existing UUID is not reverted). Please note that changing this value
|
||||
## does not trigger a VM restart. You must perform the restart separately.
|
||||
## Example:
|
||||
## cloudInitSeed: "upd1"
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
|
||||
2
packages/system/gpu-operator/Chart.yaml → packages/core/builder/Chart.yaml
Normal file → Executable file
2
packages/system/gpu-operator/Chart.yaml → packages/core/builder/Chart.yaml
Normal file → Executable file
@@ -1,3 +1,3 @@
|
||||
apiVersion: v2
|
||||
name: cozy-gpu-operator
|
||||
name: builder
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
35
packages/core/builder/Makefile
Executable file
35
packages/core/builder/Makefile
Executable file
@@ -0,0 +1,35 @@
|
||||
NAMESPACE=cozy-builder
|
||||
NAME := builder
|
||||
|
||||
TALOS_VERSION=$(shell awk '/^version:/ {print $$2}' ../installer/images/talos/profiles/installer.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
|
||||
help: ## Show this help.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
show:
|
||||
helm template -n $(NAMESPACE) $(NAME) .
|
||||
|
||||
apply: ## Create builder sandbox in existing Kubernetes cluster.
|
||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl apply -f -
|
||||
docker buildx ls | grep -q '^buildkit-builder*' || docker buildx create \
|
||||
--bootstrap \
|
||||
--name=buildkit-$(NAME) \
|
||||
--driver=kubernetes \
|
||||
--driver-opt=namespace=$(NAMESPACE),replicas=1 \
|
||||
--platform=linux/amd64 \
|
||||
--platform=linux/arm64 \
|
||||
--use \
|
||||
--config config.toml
|
||||
|
||||
diff:
|
||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl diff -f -
|
||||
|
||||
delete: ## Remove builder sandbox from existing Kubernetes cluster.
|
||||
kubectl delete deploy -n $(NAMESPACE) $(NAME)-talos-imager
|
||||
docker buildx rm buildkit-$(NAME)
|
||||
|
||||
wait-for-builder:
|
||||
kubectl wait deploy --for=condition=Progressing -n $(NAMESPACE) $(NAME)-talos-imager
|
||||
kubectl wait pod --for=condition=Ready -n $(NAMESPACE) -l app=$(NAME)-talos-imager
|
||||
11
packages/core/builder/config.toml
Normal file
11
packages/core/builder/config.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
[worker.oci]
|
||||
gc = true
|
||||
gckeepstorage = 50000
|
||||
|
||||
[[worker.oci.gcpolicy]]
|
||||
keepBytes = 10737418240
|
||||
keepDuration = 604800
|
||||
filters = [ "type==source.local", "type==exec.cachemount", "type==source.git.checkout"]
|
||||
[[worker.oci.gcpolicy]]
|
||||
all = true
|
||||
keepBytes = 53687091200
|
||||
43
packages/core/builder/templates/sandbox.yaml
Executable file
43
packages/core/builder/templates/sandbox.yaml
Executable file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ .Release.Namespace }}
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-talos-imager
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}-talos-imager
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}-talos-imager
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
terminationGracePeriodSeconds: 1
|
||||
containers:
|
||||
- name: imager
|
||||
image: "{{ .Values.talos.imager.image }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
volumeMounts:
|
||||
- mountPath: /dev
|
||||
name: dev
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
name: dev
|
||||
3
packages/core/builder/values.yaml
Executable file
3
packages/core/builder/values.yaml
Executable file
@@ -0,0 +1,3 @@
|
||||
talos:
|
||||
imager:
|
||||
image: ghcr.io/siderolabs/imager:v1.9.3
|
||||
@@ -19,10 +19,12 @@ diff:
|
||||
|
||||
update:
|
||||
hack/gen-profiles.sh
|
||||
IMAGE=$$(yq '.input.baseInstaller.imageRef | sub("/installer:", "/imager:")' images/talos/profiles/installer.yaml) \
|
||||
yq -i '.talos.imager.image = strenv(IMAGE)' ../builder/values.yaml
|
||||
|
||||
image: pre-checks image-matchbox image-cozystack image-talos
|
||||
image: pre-checks image-cozystack image-talos image-matchbox
|
||||
|
||||
image-cozystack:
|
||||
image-cozystack: run-builder
|
||||
make -C ../../.. repos
|
||||
docker buildx build -f images/cozystack/Dockerfile ../../.. \
|
||||
--provenance false \
|
||||
@@ -38,11 +40,11 @@ image-cozystack:
|
||||
yq -i '.cozystack.image = strenv(IMAGE)' values.yaml
|
||||
rm -f images/installer.json
|
||||
|
||||
image-talos:
|
||||
image-talos: run-builder
|
||||
test -f ../../../_out/assets/installer-amd64.tar || make talos-installer
|
||||
skopeo copy docker-archive:../../../_out/assets/installer-amd64.tar docker://$(REGISTRY)/talos:$(call settag,$(TALOS_VERSION))
|
||||
|
||||
image-matchbox:
|
||||
image-matchbox: run-builder
|
||||
test -f ../../../_out/assets/kernel-amd64 || make talos-kernel
|
||||
test -f ../../../_out/assets/initramfs-metal-amd64.xz || make talos-initramfs
|
||||
docker buildx build -f images/matchbox/Dockerfile ../../.. \
|
||||
@@ -59,10 +61,13 @@ image-matchbox:
|
||||
> ../../extra/bootbox/images/matchbox.tag
|
||||
rm -f images/matchbox.json
|
||||
|
||||
assets: talos-iso talos-nocloud talos-metal talos-kernel talos-initramfs
|
||||
assets: talos-iso talos-nocloud talos-metal
|
||||
|
||||
talos-initramfs talos-kernel talos-installer talos-iso talos-nocloud talos-metal:
|
||||
mkdir -p ../../../_out/assets
|
||||
cat images/talos/profiles/$(subst talos-,,$@).yaml | \
|
||||
docker run --rm -i -v /dev:/dev --privileged "ghcr.io/siderolabs/imager:$(TALOS_VERSION)" --tar-to-stdout - | \
|
||||
kubectl exec -i -n cozy-builder deploy/builder-talos-imager -- imager --tar-to-stdout - | \
|
||||
tar -C ../../../_out/assets -xzf-
|
||||
|
||||
run-builder:
|
||||
make -C ../builder/ apply wait-for-builder
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.5
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: initramfs
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.5
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: installer
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.5
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: iso
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.5
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: kernel
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.5
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: nocloud
|
||||
secureboot: false
|
||||
version: v1.9.5
|
||||
version: v1.9.3
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.30.1@sha256:29b975e1485efa98965d292d772efc11966724fef2f9b70612e398dff0eded5b
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.28.0@sha256:71ae2037ca44d49bbcf8be56c127ee92f2486089a8ea1cdd6508af49705956ac
|
||||
|
||||
@@ -31,13 +31,6 @@ releases:
|
||||
autoDirectNodeRoutes: true
|
||||
routingMode: native
|
||||
|
||||
- name: cilium-networkpolicy
|
||||
releaseName: cilium-networkpolicy
|
||||
chart: cozy-cilium-networkpolicy
|
||||
namespace: cozy-cilium
|
||||
privileged: true
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: cozy-proxy
|
||||
releaseName: cozystack
|
||||
chart: cozy-cozy-proxy
|
||||
@@ -134,14 +127,14 @@ releases:
|
||||
chart: cozy-kafka-operator
|
||||
namespace: cozy-kafka-operator
|
||||
optional: true
|
||||
dependsOn: [cilium,victoria-metrics-operator]
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: clickhouse-operator
|
||||
releaseName: clickhouse-operator
|
||||
chart: cozy-clickhouse-operator
|
||||
namespace: cozy-clickhouse-operator
|
||||
optional: true
|
||||
dependsOn: [cilium,victoria-metrics-operator]
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: rabbitmq-operator
|
||||
releaseName: rabbitmq-operator
|
||||
@@ -161,7 +154,7 @@ releases:
|
||||
releaseName: piraeus-operator
|
||||
chart: cozy-piraeus-operator
|
||||
namespace: cozy-linstor
|
||||
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
|
||||
dependsOn: [cilium,cert-manager]
|
||||
|
||||
- name: snapshot-controller
|
||||
releaseName: snapshot-controller
|
||||
|
||||
@@ -96,14 +96,14 @@ releases:
|
||||
chart: cozy-kafka-operator
|
||||
namespace: cozy-kafka-operator
|
||||
optional: true
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
dependsOn: []
|
||||
|
||||
- name: clickhouse-operator
|
||||
releaseName: clickhouse-operator
|
||||
chart: cozy-clickhouse-operator
|
||||
namespace: cozy-clickhouse-operator
|
||||
optional: true
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
dependsOn: []
|
||||
|
||||
- name: rabbitmq-operator
|
||||
releaseName: rabbitmq-operator
|
||||
|
||||
@@ -34,13 +34,6 @@ releases:
|
||||
- values-talos.yaml
|
||||
- values-kubeovn.yaml
|
||||
|
||||
- name: cilium-networkpolicy
|
||||
releaseName: cilium-networkpolicy
|
||||
chart: cozy-cilium-networkpolicy
|
||||
namespace: cozy-cilium
|
||||
privileged: true
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: kubeovn
|
||||
releaseName: kubeovn
|
||||
chart: cozy-kubeovn
|
||||
@@ -116,7 +109,7 @@ releases:
|
||||
chart: cozy-monitoring-agents
|
||||
namespace: cozy-monitoring
|
||||
privileged: true
|
||||
dependsOn: [victoria-metrics-operator, vertical-pod-autoscaler-crds]
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
@@ -153,17 +146,6 @@ releases:
|
||||
namespace: cozy-kubevirt-cdi
|
||||
dependsOn: [cilium,kubeovn,kubevirt-cdi-operator]
|
||||
|
||||
- name: gpu-operator
|
||||
releaseName: gpu-operator
|
||||
chart: cozy-gpu-operator
|
||||
namespace: cozy-gpu-operator
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [cilium,kubeovn]
|
||||
valuesFiles:
|
||||
- values.yaml
|
||||
- values-talos.yaml
|
||||
|
||||
- name: metallb
|
||||
releaseName: metallb
|
||||
chart: cozy-metallb
|
||||
@@ -199,13 +181,13 @@ releases:
|
||||
releaseName: kafka-operator
|
||||
chart: cozy-kafka-operator
|
||||
namespace: cozy-kafka-operator
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
dependsOn: [cilium,kubeovn]
|
||||
|
||||
- name: clickhouse-operator
|
||||
releaseName: clickhouse-operator
|
||||
chart: cozy-clickhouse-operator
|
||||
namespace: cozy-clickhouse-operator
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
dependsOn: [cilium,kubeovn]
|
||||
|
||||
- name: rabbitmq-operator
|
||||
releaseName: rabbitmq-operator
|
||||
@@ -399,13 +381,6 @@ releases:
|
||||
privileged: true
|
||||
dependsOn: [monitoring-agents]
|
||||
|
||||
- name: vertical-pod-autoscaler-crds
|
||||
releaseName: vertical-pod-autoscaler-crds
|
||||
chart: cozy-vertical-pod-autoscaler-crds
|
||||
namespace: cozy-vertical-pod-autoscaler
|
||||
privileged: true
|
||||
dependsOn: [cilium, kubeovn]
|
||||
|
||||
- name: reloader
|
||||
releaseName: reloader
|
||||
chart: cozy-reloader
|
||||
|
||||
@@ -69,7 +69,7 @@ releases:
|
||||
chart: cozy-monitoring-agents
|
||||
namespace: cozy-monitoring
|
||||
privileged: true
|
||||
dependsOn: [victoria-metrics-operator, vertical-pod-autoscaler-crds]
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
@@ -103,13 +103,13 @@ releases:
|
||||
releaseName: kafka-operator
|
||||
chart: cozy-kafka-operator
|
||||
namespace: cozy-kafka-operator
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
dependsOn: []
|
||||
|
||||
- name: clickhouse-operator
|
||||
releaseName: clickhouse-operator
|
||||
chart: cozy-clickhouse-operator
|
||||
namespace: cozy-clickhouse-operator
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
dependsOn: []
|
||||
|
||||
- name: rabbitmq-operator
|
||||
releaseName: rabbitmq-operator
|
||||
@@ -254,10 +254,3 @@ releases:
|
||||
namespace: cozy-vertical-pod-autoscaler
|
||||
privileged: true
|
||||
dependsOn: [monitoring-agents]
|
||||
|
||||
- name: vertical-pod-autoscaler-crds
|
||||
releaseName: vertical-pod-autoscaler-crds
|
||||
chart: cozy-vertical-pod-autoscaler-crds
|
||||
namespace: cozy-vertical-pod-autoscaler
|
||||
privileged: true
|
||||
dependsOn: [cilium, kubeovn]
|
||||
|
||||
@@ -2,9 +2,6 @@ NAMESPACE=cozy-e2e-tests
|
||||
NAME := sandbox
|
||||
CLEAN := 1
|
||||
TESTING_APPS := $(shell find ../../apps -maxdepth 1 -mindepth 1 -type d | awk -F/ '{print $$NF}')
|
||||
SANDBOX_NAME := cozy-e2e-sandbox-$(shell echo "$$(hostname):$$(pwd)" | sha256sum | cut -c -6)
|
||||
|
||||
ROOT_DIR = $(dir $(abspath $(firstword $(MAKEFILE_LIST))/../../..))
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
|
||||
@@ -27,6 +24,7 @@ image-e2e-sandbox:
|
||||
--provenance false \
|
||||
--tag $(REGISTRY)/e2e-sandbox:$(call settag,$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/e2e-sandbox:latest \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/e2e-sandbox.json \
|
||||
--push=$(PUSH) \
|
||||
@@ -36,20 +34,27 @@ image-e2e-sandbox:
|
||||
yq -i '.e2e.image = strenv(IMAGE)' values.yaml
|
||||
rm -f images/e2e-sandbox.json
|
||||
|
||||
test: ## Run the end-to-end tests in existing sandbox.
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/e2e.sh'
|
||||
copy-hack-dir:
|
||||
tar -C ../../../ -cf- hack | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- tar -xf-
|
||||
|
||||
test-applications: ## Run the end-to-end tests in existing sandbox for applications.
|
||||
copy-image:
|
||||
cat ../../../_out/assets/nocloud-amd64.raw.xz | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -xec 'xz --decompress > /nocloud-amd64.raw'
|
||||
|
||||
test: wait-for-sandbox copy-hack-dir copy-image ## Run the end-to-end tests in existing sandbox.
|
||||
helm template -n cozy-system installer ../installer | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'cat > /cozystack-installer.yaml'
|
||||
kubectl exec -ti -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'export COZYSTACK_INSTALLER_YAML=$$(cat /cozystack-installer.yaml) && /hack/e2e.sh'
|
||||
|
||||
test-applications: wait-for-sandbox copy-hack-dir ## Run the end-to-end tests in existing sandbox for applications.
|
||||
for app in $(TESTING_APPS); do \
|
||||
docker exec ${SANDBOX_NAME} bash -c "/hack/e2e.application.sh $${app}"; \
|
||||
kubectl exec -ti -n cozy-e2e-tests deploy/cozystack-e2e-sandbox -- bash -c "/hack/e2e.application.sh $${app}"; \
|
||||
done
|
||||
docker exec ${SANDBOX_NAME} bash -c "kubectl get hr -A | grep -v 'True'"
|
||||
kubectl exec -ti -n cozy-e2e-tests deploy/cozystack-e2e-sandbox -- bash -c "kubectl get hr -A | grep -v 'True'"
|
||||
|
||||
delete: ## Remove sandbox from existing Kubernetes cluster.
|
||||
docker rm -f "${SANDBOX_NAME}" || true
|
||||
kubectl delete deploy -n $(NAMESPACE) cozystack-e2e-$(NAME)
|
||||
|
||||
exec: ## Opens an interactive shell in the sandbox container.
|
||||
docker exec -ti "${SANDBOX_NAME}" -- bash
|
||||
kubectl exec -ti -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- bash
|
||||
|
||||
proxy: sync-hosts ## Enable a SOCKS5 proxy server; mirrord and gost must be installed.
|
||||
mirrord exec --target deploy/cozystack-e2e-sandbox --target-namespace cozy-e2e-tests -- gost -L=127.0.0.1:10080
|
||||
@@ -60,6 +65,6 @@ login: ## Downloads the kubeconfig into a temporary directory and runs a shell w
|
||||
sync-hosts:
|
||||
kubectl exec -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'kubectl get ing -A -o go-template='\''{{ "127.0.0.1 localhost\n"}}{{ range .items }}{{ range .status.loadBalancer.ingress }}{{ .ip }}{{ end }} {{ range .spec.rules }}{{ .host }}{{ end }}{{ "\n" }}{{ end }}'\'' > /etc/hosts'
|
||||
|
||||
apply: delete
|
||||
docker run -d --rm --name "${SANDBOX_NAME}" --privileged "$$(yq .e2e.image values.yaml)" sleep infinity
|
||||
docker cp "${ROOT_DIR}" "${SANDBOX_NAME}":/workspace
|
||||
wait-for-sandbox:
|
||||
kubectl wait deploy --for=condition=Progressing -n $(NAMESPACE) cozystack-e2e-$(NAME)
|
||||
kubectl wait pod --for=condition=Ready -n $(NAMESPACE) -l app=cozystack-e2e-$(NAME)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ARG KUBECTL_VERSION=1.32.0
|
||||
ARG TALOSCTL_VERSION=1.9.5
|
||||
ARG TALOSCTL_VERSION=1.8.4
|
||||
ARG HELM_VERSION=3.16.4
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install genisoimage qemu-kvm qemu-utils iproute2 iptables wget xz-utils netcat curl jq make git
|
||||
RUN apt-get -y install genisoimage qemu-kvm qemu-utils iproute2 iptables wget xz-utils netcat curl jq
|
||||
RUN curl -LO "https://github.com/siderolabs/talos/releases/download/v${TALOSCTL_VERSION}/talosctl-linux-amd64" \
|
||||
&& chmod +x talosctl-linux-amd64 \
|
||||
&& mv talosctl-linux-amd64 /usr/local/bin/talosctl
|
||||
@@ -14,4 +14,3 @@ RUN curl -LO "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/amd64/kube
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
RUN curl -sSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -s - --version "v${HELM_VERSION}"
|
||||
RUN wget https://github.com/mikefarah/yq/releases/download/v4.44.3/yq_linux_amd64 -O /usr/local/bin/yq && chmod +x /usr/local/bin/yq
|
||||
RUN curl -s https://fluxcd.io/install.sh | bash
|
||||
|
||||
40
packages/core/testing/templates/sandbox.yaml
Executable file
40
packages/core/testing/templates/sandbox.yaml
Executable file
@@ -0,0 +1,40 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ .Release.Namespace }}
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cozystack-e2e-{{ .Release.Name }}
|
||||
namespace: cozy-e2e-tests
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cozystack-e2e-{{ .Release.Name }}
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cozystack-e2e-{{ .Release.Name }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
terminationGracePeriodSeconds: 1
|
||||
containers:
|
||||
- name: sandbox
|
||||
image: "{{ .Values.e2e.image }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: /kubeconfig
|
||||
- name: TALOSCONFIG
|
||||
value: /talosconfig
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
36
packages/core/testing/templates/srv1.yaml
Normal file
36
packages/core/testing/templates/srv1.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: srv1
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
instanceProfile: ubuntu
|
||||
instanceType: u1.xlarge
|
||||
running: true
|
||||
disks:
|
||||
- name: srv1-system
|
||||
- name: srv1-data
|
||||
---
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: srv1-system
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
optical: false
|
||||
source:
|
||||
http:
|
||||
url: https://github.com/cozystack/cozystack/releases/download/v0.28.2/nocloud-amd64.raw.xz
|
||||
storage: 10Gi
|
||||
storageClass: local
|
||||
---
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: srv1-data
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
optical: false
|
||||
source: {}
|
||||
storage: 100Gi
|
||||
storageClass: local
|
||||
36
packages/core/testing/templates/srv2.yaml
Normal file
36
packages/core/testing/templates/srv2.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: srv2
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
instanceProfile: ubuntu
|
||||
instanceType: u1.xlarge
|
||||
running: true
|
||||
disks:
|
||||
- name: srv2-system
|
||||
- name: srv2-data
|
||||
---
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: srv2-system
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
optical: false
|
||||
source:
|
||||
http:
|
||||
url: https://github.com/cozystack/cozystack/releases/download/v0.28.2/nocloud-amd64.raw.xz
|
||||
storage: 10Gi
|
||||
storageClass: local
|
||||
---
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: srv2-data
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
optical: false
|
||||
source: {}
|
||||
storage: 100Gi
|
||||
storageClass: local
|
||||
36
packages/core/testing/templates/srv3.yaml
Normal file
36
packages/core/testing/templates/srv3.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: srv3
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
instanceProfile: ubuntu
|
||||
instanceType: u1.xlarge
|
||||
running: true
|
||||
disks:
|
||||
- name: srv3-system
|
||||
- name: srv3-data
|
||||
---
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: srv3-system
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
optical: false
|
||||
source:
|
||||
http:
|
||||
url: https://github.com/cozystack/cozystack/releases/download/v0.28.2/nocloud-amd64.raw.xz
|
||||
storage: 10Gi
|
||||
storageClass: local
|
||||
---
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: srv3-data
|
||||
namespace: tenant-testing
|
||||
spec:
|
||||
optical: false
|
||||
source: {}
|
||||
storage: 100Gi
|
||||
storageClass: local
|
||||
5
packages/core/testing/templates/tenant.yaml
Normal file
5
packages/core/testing/templates/tenant.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: testing
|
||||
namespace: tenant-root
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.30.1@sha256:04dcc6161e9bdb4d30538e3706bb29c93c1d615c6f3d940f9af64d3dda2f491e
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.28.0@sha256:bb5e8f5d92e2e4305ea1cc7f007b3e98769645ab845f632b4788b9373cd207eb
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.30.1@sha256:a30e58f07c702e693f9bc052c3ef6eab443e1db8bcc86689199b2db6af14ebb7
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.28.0@sha256:b2002815727b71e2657a6f5b8ed558cc38fc21e81a39b9699266e558be03561f
|
||||
|
||||
@@ -3,4 +3,4 @@ name: monitoring
|
||||
description: Monitoring and observability stack
|
||||
icon: /logos/monitoring.svg
|
||||
type: application
|
||||
version: 1.9.2
|
||||
version: 1.9.1
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:fb48d37f1a9386e0023df9ac067ec2e03953b7b8c9d6abf2d12716e084f846a4
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.0@sha256:a492931b49af55ad184b485bcd7ea06f1334722d2184702d9f6f2e4123032357
|
||||
|
||||
@@ -4,8 +4,6 @@ kind: VLogs
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
spec:
|
||||
image:
|
||||
tag: v1.17.0-victorialogs
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
|
||||
@@ -34,8 +34,7 @@ monitoring 1.7.0 2a976afe
|
||||
monitoring 1.8.0 8c460528
|
||||
monitoring 1.8.1 8267072d
|
||||
monitoring 1.9.0 45a7416c
|
||||
monitoring 1.9.1 fd240701
|
||||
monitoring 1.9.2 HEAD
|
||||
monitoring 1.9.1 HEAD
|
||||
seaweedfs 0.1.0 71514249
|
||||
seaweedfs 0.2.0 5fb9cfe3
|
||||
seaweedfs 0.2.1 fde4bcfa
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:3537d3baaf96d576148e6df17552f2ead2b7a55ba122ef542a2e99bde896d218
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:218d0c017ae556e5afd074366d9a3124f954c5aefc6474844942420cca8b7640
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.18.1
|
||||
appVersion: 0.17.0
|
||||
description: Cluster API Operator
|
||||
name: cluster-api-operator
|
||||
type: application
|
||||
version: 0.18.1
|
||||
version: 0.17.0
|
||||
|
||||
@@ -26,10 +26,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $addonNamespace }}
|
||||
---
|
||||
@@ -39,10 +37,8 @@ metadata:
|
||||
name: {{ $addonName }}
|
||||
namespace: {{ $addonNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $addonVersion $.Values.secretName }}
|
||||
spec:
|
||||
|
||||
@@ -26,11 +26,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $bootstrapNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -39,11 +36,8 @@ metadata:
|
||||
name: {{ $bootstrapName }}
|
||||
namespace: {{ $bootstrapNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $bootstrapVersion $.Values.configSecret.name }}
|
||||
spec:
|
||||
{{- end}}
|
||||
|
||||
@@ -26,11 +26,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $controlPlaneNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -39,11 +36,8 @@ metadata:
|
||||
name: {{ $controlPlaneName }}
|
||||
namespace: {{ $controlPlaneNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $controlPlaneVersion $.Values.configSecret.name $.Values.manager }}
|
||||
spec:
|
||||
{{- end}}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if or .Values.addon .Values.bootstrap .Values.controlPlane .Values.infrastructure .Values.ipam }}
|
||||
{{- if or .Values.addon .Values.bootstrap .Values.controlPlane .Values.infrastructure }}
|
||||
# Deploy core components if not specified
|
||||
{{- if not .Values.core }}
|
||||
---
|
||||
@@ -6,11 +6,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-system
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -19,11 +16,8 @@ metadata:
|
||||
name: cluster-api
|
||||
namespace: capi-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
configSecret:
|
||||
@@ -34,3 +28,4 @@ spec:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -25,11 +25,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $coreNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -38,10 +35,8 @@ metadata:
|
||||
name: {{ $coreName }}
|
||||
namespace: {{ $coreNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $coreVersion $.Values.configSecret.name $.Values.manager }}
|
||||
spec:
|
||||
@@ -50,8 +45,8 @@ spec:
|
||||
version: {{ $coreVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
{{- if and $.Values.manager.featureGates $.Values.manager.featureGates.core }}
|
||||
manager:
|
||||
{{- if and $.Values.manager.featureGates $.Values.manager.featureGates.core }}
|
||||
featureGates:
|
||||
{{- range $key, $value := $.Values.manager.featureGates.core }}
|
||||
{{ $key }}: {{ $value }}
|
||||
|
||||
@@ -7,10 +7,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-kubeadm-bootstrap-system
|
||||
---
|
||||
@@ -20,10 +18,8 @@ metadata:
|
||||
name: kubeadm
|
||||
namespace: capi-kubeadm-bootstrap-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
@@ -41,10 +37,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-kubeadm-control-plane-system
|
||||
---
|
||||
@@ -54,16 +48,14 @@ metadata:
|
||||
name: kubeadm
|
||||
namespace: capi-kubeadm-control-plane-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
{{- if $.Values.manager }}
|
||||
{{- if and $.Values.manager.featureGates $.Values.manager.featureGates.kubeadm }}
|
||||
manager:
|
||||
{{- if and $.Values.manager.featureGates $.Values.manager.featureGates.kubeadm }}
|
||||
featureGates:
|
||||
{{- range $key, $value := $.Values.manager.featureGates.kubeadm }}
|
||||
{{ $key }}: {{ $value }}
|
||||
|
||||
@@ -26,10 +26,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $infrastructureNamespace }}
|
||||
---
|
||||
@@ -39,10 +37,8 @@ metadata:
|
||||
name: {{ $infrastructureName }}
|
||||
namespace: {{ $infrastructureNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $infrastructureVersion $.Values.configSecret.name $.Values.manager $.Values.additionalDeployments }}
|
||||
spec:
|
||||
@@ -51,8 +47,8 @@ spec:
|
||||
version: {{ $infrastructureVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
{{- if and (kindIs "map" $.Values.manager.featureGates) (hasKey $.Values.manager.featureGates $infrastructureName) }}
|
||||
manager:
|
||||
{{- if and (kindIs "map" $.Values.manager.featureGates) (hasKey $.Values.manager.featureGates $infrastructureName) }}
|
||||
{{- range $key, $value := $.Values.manager.featureGates }}
|
||||
{{- if eq $key $infrastructureName }}
|
||||
featureGates:
|
||||
|
||||
@@ -26,10 +26,8 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $ipamNamespace }}
|
||||
---
|
||||
@@ -39,10 +37,8 @@ metadata:
|
||||
name: {{ $ipamName }}
|
||||
namespace: {{ $ipamNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $ipamVersion $.Values.configSecret.name $.Values.manager $.Values.additionalDeployments }}
|
||||
spec:
|
||||
@@ -51,8 +47,8 @@ spec:
|
||||
version: {{ $ipamVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
{{- if and (kindIs "map" $.Values.manager.featureGates) (hasKey $.Values.manager.featureGates $ipamName) }}
|
||||
manager:
|
||||
{{- if and (kindIs "map" $.Values.manager.featureGates) (hasKey $.Values.manager.featureGates $ipamName) }}
|
||||
{{- range $key, $value := $.Values.manager.featureGates }}
|
||||
{{- if eq $key $ipamName }}
|
||||
featureGates:
|
||||
|
||||
@@ -21,7 +21,7 @@ leaderElection:
|
||||
image:
|
||||
manager:
|
||||
repository: registry.k8s.io/capi-operator/cluster-api-operator
|
||||
tag: v0.18.1
|
||||
tag: v0.17.0
|
||||
pullPolicy: IfNotPresent
|
||||
env:
|
||||
manager: []
|
||||
@@ -69,4 +69,3 @@ volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
enableHelmHook: true
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: cozy-cilium-networkpolicy
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
@@ -1,5 +0,0 @@
|
||||
export NAME=cilium-networkpolicy
|
||||
export NAMESPACE=cozy-$(NAME)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
include ../../../scripts/package.mk
|
||||
@@ -79,7 +79,7 @@ annotations:
|
||||
Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that can
|
||||
be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n"
|
||||
apiVersion: v2
|
||||
appVersion: 1.17.2
|
||||
appVersion: 1.17.1
|
||||
description: eBPF-based Networking, Security, and Observability
|
||||
home: https://cilium.io/
|
||||
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
|
||||
@@ -95,4 +95,4 @@ kubeVersion: '>= 1.21.0-0'
|
||||
name: cilium
|
||||
sources:
|
||||
- https://github.com/cilium/cilium
|
||||
version: 1.17.2
|
||||
version: 1.17.1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# cilium
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
Cilium is open source software for providing and transparently securing
|
||||
network connectivity and loadbalancing between application workloads such as
|
||||
@@ -85,7 +85,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
|
||||
| authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:498a000f370d8c37927118ed80afe8adc38d1edcbfc071627d17b25c88efcab0","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:a5d0ce49aa801d475da48f8cb163c354ab95cab073cd3c138bd458fc8257fbf1","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
|
||||
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
|
||||
| authentication.mutual.spire.install.server.annotations | object | `{}` | SPIRE server annotations |
|
||||
@@ -131,8 +131,6 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| bpf.ctTcpMax | int | `524288` | Configure the maximum number of entries in the TCP connection tracking table. |
|
||||
| bpf.datapathMode | string | `veth` | Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) |
|
||||
| bpf.disableExternalIPMitigation | bool | `false` | Disable ExternalIP mitigation (CVE-2020-8554) |
|
||||
| bpf.distributedLRU | object | `{"enabled":false}` | Control to use a distributed per-CPU backend memory for the core BPF LRU maps which Cilium uses. This improves performance significantly, but it is also recommended to increase BPF map sizing along with that. |
|
||||
| bpf.distributedLRU.enabled | bool | `false` | Enable distributed LRU backend memory. For compatibility with existing installations it is off by default. |
|
||||
| bpf.enableTCX | bool | `true` | Attach endpoint programs using tcx instead of legacy tc hooks on supported kernels. |
|
||||
| bpf.events | object | `{"default":{"burstLimit":null,"rateLimit":null},"drop":{"enabled":true},"policyVerdict":{"enabled":true},"trace":{"enabled":true}}` | Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. Helm configuration for BPF events map rate limiting is experimental and might change in upcoming releases. |
|
||||
| bpf.events.default | object | `{"burstLimit":null,"rateLimit":null}` | Default settings for all types of events except dbg and pcap. |
|
||||
@@ -197,7 +195,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
|
||||
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
|
||||
| clustermesh.apiserver.healthPort | int | `9880` | TCP port for the clustermesh-apiserver health API. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:981250ebdc6e66e190992eaf75cfca169113a8f08d5c3793fe15822176980398","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.2","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:1de22f46bfdd638de72c2224d5223ddc3bbeacda1803cb75799beca3d4bf7a4c","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.1","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.kvstoremesh.enabled | bool | `true` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
|
||||
@@ -377,7 +375,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.healthPort | int | `9878` | TCP port for the health API. |
|
||||
| envoy.httpRetryCount | int | `3` | Maximum number of retries for each HTTP request |
|
||||
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
|
||||
| envoy.image | object | `{"digest":"sha256:377c78c13d2731f3720f931721ee309159e782d882251709cb0fac3b42c03f4b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.31.5-1741765102-efed3defcc70ab5b263a0fc44c93d316b846a211","useDigest":true}` | Envoy container image. |
|
||||
| envoy.image | object | `{"digest":"sha256:fc708bd36973d306412b2e50c924cd8333de67e0167802c9b48506f9d772f521","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.31.5-1739264036-958bef243c6c66fcfd73ca319f2eb49fff1eb2ae","useDigest":true}` | Envoy container image. |
|
||||
| envoy.initialFetchTimeoutSeconds | int | `30` | Time in seconds after which the initial fetch on an xDS stream is considered timed out |
|
||||
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
|
||||
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
|
||||
@@ -394,7 +392,6 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.podLabels | object | `{}` | Labels to be added to envoy pods |
|
||||
| envoy.podSecurityContext | object | `{"appArmorProfile":{"type":"Unconfined"}}` | Security Context for cilium-envoy pods. |
|
||||
| envoy.podSecurityContext.appArmorProfile | object | `{"type":"Unconfined"}` | AppArmorProfile options for the `cilium-agent` and init containers |
|
||||
| envoy.policyRestoreTimeoutDuration | string | `nil` | Max duration to wait for endpoint policies to be restored on restart. Default "3m". |
|
||||
| envoy.priorityClassName | string | `nil` | The priority class to use for cilium-envoy. |
|
||||
| envoy.prometheus | object | `{"enabled":true,"port":"9964","serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Configure Cilium Envoy Prometheus options. Note that some of these apply to either cilium-agent or cilium-envoy. |
|
||||
| envoy.prometheus.enabled | bool | `true` | Enable prometheus metrics for cilium-envoy |
|
||||
@@ -518,7 +515,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
|
||||
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
|
||||
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:42a8db5c256c516cacb5b8937c321b2373ad7a6b0a1e5a5120d5028433d586cc","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.2","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:397e8fbb188157f744390a7b272a1dec31234e605bcbe22d8919a166d202a3dc","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.1","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
|
||||
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
|
||||
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
@@ -585,7 +582,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. |
|
||||
| hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. |
|
||||
| hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. |
|
||||
| hubble.ui.backend.image | object | `{"digest":"sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.2","useDigest":true}` | Hubble-ui backend image. |
|
||||
| hubble.ui.backend.image | object | `{"digest":"sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.1","useDigest":true}` | Hubble-ui backend image. |
|
||||
| hubble.ui.backend.livenessProbe.enabled | bool | `false` | Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) |
|
||||
| hubble.ui.backend.readinessProbe.enabled | bool | `false` | Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) |
|
||||
| hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. |
|
||||
@@ -595,7 +592,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. |
|
||||
| hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. |
|
||||
| hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. |
|
||||
| hubble.ui.frontend.image | object | `{"digest":"sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.2","useDigest":true}` | Hubble-ui frontend image. |
|
||||
| hubble.ui.frontend.image | object | `{"digest":"sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.1","useDigest":true}` | Hubble-ui frontend image. |
|
||||
| hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. |
|
||||
| hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. |
|
||||
| hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 |
|
||||
@@ -625,7 +622,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
|
||||
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends). |
|
||||
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
|
||||
| image | object | `{"digest":"sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.2","useDigest":true}` | Agent container image. |
|
||||
| image | object | `{"digest":"sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.1","useDigest":true}` | Agent container image. |
|
||||
| imagePullSecrets | list | `[]` | Configure image pull secrets for pulling container images |
|
||||
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
|
||||
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
|
||||
@@ -762,7 +759,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| operator.hostNetwork | bool | `true` | HostNetwork setting |
|
||||
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
|
||||
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:7cb8c23417f65348bb810fe92fb05b41d926f019d77442f3fa1058d17fea7ffe","awsDigest":"sha256:955096183e22a203bbb198ca66e3266ce4dbc2b63f1a2fbd03f9373dcd97893c","azureDigest":"sha256:455fb88b558b1b8ba09d63302ccce76b4930581be89def027184ab04335c20e0","genericDigest":"sha256:81f2d7198366e8dec2903a3a8361e4c68d47d19c68a0d42f0b7b6e3f0523f249","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.2","useDigest":true}` | cilium-operator image. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:034b479fba340f9d98510e509c7ce1c36e8889a109d5f1c2240fcb0942bc772c","awsDigest":"sha256:da74748057c836471bfdc0e65bb29ba0edb82916ec4b99f6a4f002b2fcc849d6","azureDigest":"sha256:b9e3e3994f5fcf1832e1f344f3b3b544832851b1990f124b2c2c68e3ffe04a9b","genericDigest":"sha256:628becaeb3e4742a1c36c4897721092375891b58bae2bfcae48bbf4420aaee97","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.1","useDigest":true}` | cilium-operator image. |
|
||||
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
|
||||
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
|
||||
@@ -812,7 +809,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
|
||||
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
|
||||
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
|
||||
| preflight.image | object | `{"digest":"sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.2","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.image | object | `{"digest":"sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.1","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
|
||||
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
|
||||
@@ -886,7 +883,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| tls.caBundle.useSecret | bool | `false` | Use a Secret instead of a ConfigMap. |
|
||||
| tls.readSecretsOnlyFromSecretsNamespace | string | `nil` | Configure if the Cilium Agent will only look in `tls.secretsNamespace` for CiliumNetworkPolicy relevant Secrets. If false, the Cilium Agent will be granted READ (GET/LIST/WATCH) access to _all_ secrets in the entire cluster. This is not recommended and is included for backwards compatibility. This value obsoletes `tls.secretsBackend`, with `true` == `local` in the old setting, and `false` == `k8s`. |
|
||||
| tls.secretSync | object | `{"enabled":null}` | Configures settings for synchronization of TLS Interception Secrets |
|
||||
| tls.secretSync.enabled | string | `nil` | Enable synchronization of Secrets for TLS Interception. If disabled and tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent. |
|
||||
| tls.secretSync.enabled | string | `nil` | Enable synchronization of Secrets for TLS Interception. If disabled and tls.secretsBackend is set to 'k8s', then secrets will be read directly by the agent. |
|
||||
| tls.secretsBackend | string | `nil` | This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies (namely the secrets referenced by terminatingTLS and originatingTLS). This value is DEPRECATED and will be removed in a future version. Use `tls.readSecretsOnlyFromSecretsNamespace` instead. Possible values: - local - k8s |
|
||||
| tls.secretsNamespace | object | `{"create":true,"name":"cilium-secrets"}` | Configures where secrets used in CiliumNetworkPolicies will be looked for |
|
||||
| tls.secretsNamespace.create | bool | `true` | Create secrets namespace for TLS Interception secrets. |
|
||||
@@ -894,7 +891,6 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for agent scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| tunnelPort | int | Port 8472 for VXLAN, Port 6081 for Geneve | Configure VXLAN and Geneve tunnel port. |
|
||||
| tunnelProtocol | string | `"vxlan"` | Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. Possible values: - "" - vxlan - geneve |
|
||||
| tunnelSourcePortRange | string | 0-0 to let the kernel driver decide the range | Configure VXLAN and Geneve tunnel source port range hint. |
|
||||
| updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":2},"type":"RollingUpdate"}` | Cilium agent update strategy |
|
||||
| upgradeCompatibility | string | `nil` | upgradeCompatibility helps users upgrading to ensure that the configMap for Cilium will not change critical values to ensure continued operation This flag is not required for new installations. For example: '1.7', '1.8', '1.9' |
|
||||
| vtep.cidr | string | `""` | A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" |
|
||||
|
||||
@@ -7,15 +7,8 @@ staticResources:
|
||||
- name: "envoy-prometheus-metrics-listener"
|
||||
address:
|
||||
socketAddress:
|
||||
address: {{ .Values.ipv4.enabled | ternary "0.0.0.0" "::" | quote }}
|
||||
address: "0.0.0.0"
|
||||
portValue: {{ .Values.envoy.prometheus.port }}
|
||||
{{- if and .Values.ipv4.enabled .Values.ipv6.enabled }}
|
||||
additionalAddresses:
|
||||
- address:
|
||||
socketAddress:
|
||||
address: "::"
|
||||
portValue: {{ .Values.envoy.prometheus.port }}
|
||||
{{- end }}
|
||||
filterChains:
|
||||
- filters:
|
||||
- name: "envoy.filters.network.http_connection_manager"
|
||||
@@ -296,7 +289,7 @@ overloadManager:
|
||||
applicationLogConfig:
|
||||
logFormat:
|
||||
{{- if .Values.envoy.log.format_json }}
|
||||
jsonFormat: {{ .Values.envoy.log.format_json | toJson }}
|
||||
jsonFormat: "{{ .Values.envoy.log.format_json | toJson }}"
|
||||
{{- else }}
|
||||
textFormat: "{{ .Values.envoy.log.format }}"
|
||||
{{- end }}
|
||||
|
||||
@@ -232,7 +232,7 @@ spec:
|
||||
resources:
|
||||
{{- toYaml . | trim | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.prometheus.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) }}
|
||||
{{- if or .Values.prometheus.enabled .Values.hubble.metrics.enabled }}
|
||||
ports:
|
||||
- name: peer-service
|
||||
containerPort: {{ .Values.hubble.peerService.targetPort }}
|
||||
@@ -364,7 +364,7 @@ spec:
|
||||
mountPath: {{ .Values.kubeConfigPath }}
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.tls.enabled }}
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }}
|
||||
- name: hubble-metrics-tls
|
||||
mountPath: /var/lib/cilium/tls/hubble-metrics
|
||||
readOnly: true
|
||||
@@ -999,7 +999,7 @@ spec:
|
||||
path: client-ca.crt
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.tls.enabled }}
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }}
|
||||
- name: hubble-metrics-tls
|
||||
projected:
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
|
||||
@@ -39,9 +39,6 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -65,9 +62,6 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -91,9 +85,6 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -113,9 +104,6 @@ metadata:
|
||||
namespace: {{ .Values.bgpControlPlane.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -135,9 +123,6 @@ metadata:
|
||||
namespace: {{ .Values.tls.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -46,9 +46,6 @@ metadata:
|
||||
k8s-app: cilium
|
||||
app.kubernetes.io/name: cilium-agent
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
|
||||
@@ -403,7 +403,7 @@ data:
|
||||
|
||||
{{- if .Values.bpf.authMapMax }}
|
||||
# bpf-auth-map-max specifies the maximum number of entries in the auth map
|
||||
bpf-auth-map-max: "{{ .Values.bpf.authMapMax | int }}"
|
||||
bpf-auth-map-max: {{ .Values.bpf.authMapMax | quote }}
|
||||
{{- end }}
|
||||
{{- if or $bpfCtTcpMax $bpfCtAnyMax }}
|
||||
# bpf-ct-global-*-max specifies the maximum number of connections
|
||||
@@ -419,34 +419,34 @@ data:
|
||||
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
|
||||
# during the upgrade process, set bpf-ct-global-tcp-max to 1000000.
|
||||
{{- if $bpfCtTcpMax }}
|
||||
bpf-ct-global-tcp-max: "{{ $bpfCtTcpMax | int }}"
|
||||
bpf-ct-global-tcp-max: {{ $bpfCtTcpMax | quote }}
|
||||
{{- end }}
|
||||
{{- if $bpfCtAnyMax }}
|
||||
bpf-ct-global-any-max: "{{ $bpfCtAnyMax | int }}"
|
||||
bpf-ct-global-any-max: {{ $bpfCtAnyMax | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.ctAccounting }}
|
||||
bpf-conntrack-accounting: "{{ .Values.bpf.ctAccounting | int }}"
|
||||
bpf-conntrack-accounting: "{{ .Values.bpf.ctAccounting }}"
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.natMax }}
|
||||
# bpf-nat-global-max specified the maximum number of entries in the
|
||||
# BPF NAT table.
|
||||
bpf-nat-global-max: "{{ .Values.bpf.natMax | int }}"
|
||||
bpf-nat-global-max: "{{ .Values.bpf.natMax }}"
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.neighMax }}
|
||||
# bpf-neigh-global-max specified the maximum number of entries in the
|
||||
# BPF neighbor table.
|
||||
bpf-neigh-global-max: "{{ .Values.bpf.neighMax | int }}"
|
||||
bpf-neigh-global-max: "{{ .Values.bpf.neighMax }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "policyMapMax" }}
|
||||
# bpf-policy-map-max specifies the maximum number of entries in endpoint
|
||||
# policy map (per endpoint)
|
||||
bpf-policy-map-max: "{{ .Values.bpf.policyMapMax | int }}"
|
||||
bpf-policy-map-max: "{{ .Values.bpf.policyMapMax }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "lbMapMax" }}
|
||||
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
|
||||
# backend and affinity maps.
|
||||
bpf-lb-map-max: "{{ .Values.bpf.lbMapMax | int }}"
|
||||
bpf-lb-map-max: "{{ .Values.bpf.lbMapMax }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "lbExternalClusterIP" }}
|
||||
bpf-lb-external-clusterip: {{ .Values.bpf.lbExternalClusterIP | quote }}
|
||||
@@ -461,7 +461,6 @@ data:
|
||||
bpf-lb-mode-annotation: {{ .Values.bpf.lbModeAnnotation | quote }}
|
||||
{{- end }}
|
||||
|
||||
bpf-distributed-lru: {{ .Values.bpf.distributedLRU.enabled | quote }}
|
||||
bpf-events-drop-enabled: {{ .Values.bpf.events.drop.enabled | quote }}
|
||||
bpf-events-policy-verdict-enabled: {{ .Values.bpf.events.policyVerdict.enabled | quote }}
|
||||
bpf-events-trace-enabled: {{ .Values.bpf.events.trace.enabled | quote }}
|
||||
@@ -514,9 +513,6 @@ data:
|
||||
{{- if .Values.tunnelPort }}
|
||||
tunnel-port: {{ .Values.tunnelPort | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tunnelSourcePortRange }}
|
||||
tunnel-source-port-range: {{ .Values.tunnelSourcePortRange | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.serviceNoBackendResponse }}
|
||||
service-no-backend-response: "{{ .Values.serviceNoBackendResponse }}"
|
||||
@@ -931,8 +927,9 @@ data:
|
||||
operator-api-serve-addr: {{ $defaultOperatorApiServeAddr | quote }}
|
||||
{{- end }}
|
||||
|
||||
enable-hubble: {{ .Values.hubble.enabled | quote }}
|
||||
{{- if .Values.hubble.enabled }}
|
||||
# Enable Hubble gRPC service.
|
||||
enable-hubble: {{ .Values.hubble.enabled | quote }}
|
||||
# UNIX domain socket for Hubble server to listen to.
|
||||
hubble-socket-path: {{ .Values.hubble.socketPath | quote }}
|
||||
{{- if hasKey .Values.hubble "eventQueueSize" }}
|
||||
@@ -944,7 +941,7 @@ data:
|
||||
# Capacity of the buffer to store recent events.
|
||||
hubble-event-buffer-capacity: {{ .Values.hubble.eventBufferCapacity | quote }}
|
||||
{{- end }}
|
||||
{{- if or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled}}
|
||||
{{- if .Values.hubble.metrics.enabled }}
|
||||
# Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this
|
||||
# field is not set.
|
||||
hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}"
|
||||
@@ -956,20 +953,14 @@ data:
|
||||
hubble-metrics-server-tls-client-ca-files: /var/lib/cilium/tls/hubble-metrics/client-ca.crt
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.hubble.metrics.enabled }}
|
||||
# A space separated list of metrics to enable. See [0] for available metrics.
|
||||
#
|
||||
# https://github.com/cilium/hubble/blob/master/Documentation/metrics.md
|
||||
hubble-metrics: {{- range .Values.hubble.metrics.enabled }}
|
||||
{{.}}
|
||||
{{- end}}
|
||||
{{- if .Values.hubble.metrics.dynamic.enabled }}
|
||||
hubble-dynamic-metrics-config-path: /dynamic-metrics-config/dynamic-metrics.yaml
|
||||
{{- end }}
|
||||
enable-hubble-open-metrics: {{ .Values.hubble.metrics.enableOpenMetrics | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.hubble.redact }}
|
||||
{{- if eq .Values.hubble.redact.enabled true }}
|
||||
# Enables hubble redact capabilities
|
||||
@@ -1013,6 +1004,10 @@ data:
|
||||
hubble-flowlogs-config-path: /flowlog-config/flowlogs.yaml
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.hubble.metrics.dynamic.enabled }}
|
||||
hubble-dynamic-metrics-config-path: /dynamic-metrics-config/dynamic-metrics.yaml
|
||||
hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.hubble "listenAddress" }}
|
||||
# An additional address for Hubble server to listen to (e.g. ":4244").
|
||||
hubble-listen-address: {{ .Values.hubble.listenAddress | quote }}
|
||||
@@ -1046,8 +1041,8 @@ data:
|
||||
{{- else }}
|
||||
ipam: {{ $ipam | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.ipam.multiPoolPreAllocation }}
|
||||
ipam-multi-pool-pre-allocation: {{ .Values.ipam.multiPoolPreAllocation | quote }}
|
||||
{{- if hasKey .Values.ipam "multiPoolPreAllocation" }}
|
||||
ipam-multi-pool-pre-allocation: {{ .Values.ipam.multiPoolPreAllocation }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.ipam.ciliumNodeUpdateRate }}
|
||||
@@ -1340,10 +1335,6 @@ data:
|
||||
external-envoy-proxy: {{ include "envoyDaemonSetEnabled" . | quote }}
|
||||
envoy-base-id: {{ .Values.envoy.baseID | quote }}
|
||||
|
||||
{{- if .Values.envoy.policyRestoreTimeoutDuration }}
|
||||
envoy-policy-restore-timeout: {{ .Values.envoy.policyRestoreTimeoutDuration | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.envoy.log.path }}
|
||||
envoy-log: {{ .Values.envoy.log.path | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -41,9 +41,6 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -69,9 +66,6 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
||||
@@ -7,23 +7,24 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-ingress-secrets
|
||||
namespace: {{ .Values.ingressController.secretsNamespace.name | quote }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: cilium-operator-ingress-secrets
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceAccounts.operator.name | quote }}
|
||||
namespace: {{ include "cilium.namespace" . }}
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceAccounts.operator.name | quote }}
|
||||
namespace: {{ include "cilium.namespace" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create .Values.gatewayAPI.enabled .Values.gatewayAPI.secretsNamespace.sync .Values.gatewayAPI.secretsNamespace.name }}
|
||||
@@ -33,15 +34,12 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-gateway-secrets
|
||||
namespace: {{ .Values.gatewayAPI.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -59,15 +57,12 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-tlsinterception-secrets
|
||||
namespace: {{ .Values.tls.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.serviceMonitor.enabled }}
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
|
||||
@@ -4,13 +4,10 @@ kind: Service
|
||||
metadata:
|
||||
name: spire-server
|
||||
namespace: {{ .Values.authentication.mutual.spire.install.namespace }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.service.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.authentication.mutual.spire.install.server.service.annotations .Values.authentication.mutual.spire.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.authentication.mutual.spire.annotations }}
|
||||
@@ -20,6 +17,10 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.service.labels }}
|
||||
labels:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.authentication.mutual.spire.install.server.service.type }}
|
||||
ports:
|
||||
|
||||
@@ -4,6 +4,10 @@ kind: StatefulSet
|
||||
metadata:
|
||||
name: spire-server
|
||||
namespace: {{ .Values.authentication.mutual.spire.install.namespace }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.authentication.mutual.spire.install.server.annotations .Values.authentication.mutual.spire.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.authentication.mutual.spire.annotations }}
|
||||
@@ -15,12 +19,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app: spire-server
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
|
||||
@@ -519,14 +519,6 @@
|
||||
"disableExternalIPMitigation": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"distributedLRU": {
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"enableTCX": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -2118,12 +2110,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"policyRestoreTimeoutDuration": {
|
||||
"type": [
|
||||
"null",
|
||||
"string"
|
||||
]
|
||||
},
|
||||
"priorityClassName": {
|
||||
"type": [
|
||||
"null",
|
||||
@@ -5476,9 +5462,6 @@
|
||||
"tunnelProtocol": {
|
||||
"type": "string"
|
||||
},
|
||||
"tunnelSourcePortRange": {
|
||||
"type": "string"
|
||||
},
|
||||
"updateStrategy": {
|
||||
"properties": {
|
||||
"rollingUpdate": {
|
||||
|
||||
@@ -191,10 +191,10 @@ image:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.1"
|
||||
pullPolicy: "IfNotPresent"
|
||||
# cilium-digest
|
||||
digest: "sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
|
||||
digest: "sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866"
|
||||
useDigest: true
|
||||
# -- Scheduling configurations for cilium pods
|
||||
scheduling:
|
||||
@@ -495,13 +495,6 @@ bpf:
|
||||
# tracking table.
|
||||
# @default -- `262144`
|
||||
ctAnyMax: ~
|
||||
# -- Control to use a distributed per-CPU backend memory for the core BPF LRU maps
|
||||
# which Cilium uses. This improves performance significantly, but it is also
|
||||
# recommended to increase BPF map sizing along with that.
|
||||
distributedLRU:
|
||||
# -- Enable distributed LRU backend memory. For compatibility with existing
|
||||
# installations it is off by default.
|
||||
enabled: false
|
||||
# -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble.
|
||||
# Helm configuration for BPF events map rate limiting is experimental and might change
|
||||
# in upcoming releases.
|
||||
@@ -1440,9 +1433,9 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-relay"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.1"
|
||||
# hubble-relay-digest
|
||||
digest: "sha256:42a8db5c256c516cacb5b8937c321b2373ad7a6b0a1e5a5120d5028433d586cc"
|
||||
digest: "sha256:397e8fbb188157f744390a7b272a1dec31234e605bcbe22d8919a166d202a3dc"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Specifies the resources for the hubble-relay pods
|
||||
@@ -1691,8 +1684,8 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-ui-backend"
|
||||
tag: "v0.13.2"
|
||||
digest: "sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15"
|
||||
tag: "v0.13.1"
|
||||
digest: "sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Hubble-ui backend security context.
|
||||
@@ -1725,8 +1718,8 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-ui"
|
||||
tag: "v0.13.2"
|
||||
digest: "sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392"
|
||||
tag: "v0.13.1"
|
||||
digest: "sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Hubble-ui frontend security context.
|
||||
@@ -2339,11 +2332,6 @@ envoy:
|
||||
xffNumTrustedHopsL7PolicyIngress: 0
|
||||
# -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners.
|
||||
xffNumTrustedHopsL7PolicyEgress: 0
|
||||
# @schema
|
||||
# type: [null, string]
|
||||
# @schema
|
||||
# -- Max duration to wait for endpoint policies to be restored on restart. Default "3m".
|
||||
policyRestoreTimeoutDuration: null
|
||||
# -- Envoy container image.
|
||||
image:
|
||||
# @schema
|
||||
@@ -2351,9 +2339,9 @@ envoy:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium-envoy"
|
||||
tag: "v1.31.5-1741765102-efed3defcc70ab5b263a0fc44c93d316b846a211"
|
||||
tag: "v1.31.5-1739264036-958bef243c6c66fcfd73ca319f2eb49fff1eb2ae"
|
||||
pullPolicy: "IfNotPresent"
|
||||
digest: "sha256:377c78c13d2731f3720f931721ee309159e782d882251709cb0fac3b42c03f4b"
|
||||
digest: "sha256:fc708bd36973d306412b2e50c924cd8333de67e0167802c9b48506f9d772f521"
|
||||
useDigest: true
|
||||
# -- Additional containers added to the cilium Envoy DaemonSet.
|
||||
extraContainers: []
|
||||
@@ -2617,7 +2605,7 @@ tls:
|
||||
# type: [null, boolean]
|
||||
# @schema
|
||||
# -- Enable synchronization of Secrets for TLS Interception. If disabled and
|
||||
# tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent.
|
||||
# tls.secretsBackend is set to 'k8s', then secrets will be read directly by the agent.
|
||||
enabled: ~
|
||||
# -- Base64 encoded PEM values for the CA certificate and private key.
|
||||
# This can be used as common CA to generate certificates used by hubble and clustermesh components.
|
||||
@@ -2670,9 +2658,6 @@ routingMode: ""
|
||||
# -- Configure VXLAN and Geneve tunnel port.
|
||||
# @default -- Port 8472 for VXLAN, Port 6081 for Geneve
|
||||
tunnelPort: 0
|
||||
# -- Configure VXLAN and Geneve tunnel source port range hint.
|
||||
# @default -- 0-0 to let the kernel driver decide the range
|
||||
tunnelSourcePortRange: 0-0
|
||||
# -- Configure what the response should be to traffic for a service without backends.
|
||||
# Possible values:
|
||||
# - reject (default)
|
||||
@@ -2708,15 +2693,15 @@ operator:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/operator"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.1"
|
||||
# operator-generic-digest
|
||||
genericDigest: "sha256:81f2d7198366e8dec2903a3a8361e4c68d47d19c68a0d42f0b7b6e3f0523f249"
|
||||
genericDigest: "sha256:628becaeb3e4742a1c36c4897721092375891b58bae2bfcae48bbf4420aaee97"
|
||||
# operator-azure-digest
|
||||
azureDigest: "sha256:455fb88b558b1b8ba09d63302ccce76b4930581be89def027184ab04335c20e0"
|
||||
azureDigest: "sha256:b9e3e3994f5fcf1832e1f344f3b3b544832851b1990f124b2c2c68e3ffe04a9b"
|
||||
# operator-aws-digest
|
||||
awsDigest: "sha256:955096183e22a203bbb198ca66e3266ce4dbc2b63f1a2fbd03f9373dcd97893c"
|
||||
awsDigest: "sha256:da74748057c836471bfdc0e65bb29ba0edb82916ec4b99f6a4f002b2fcc849d6"
|
||||
# operator-alibabacloud-digest
|
||||
alibabacloudDigest: "sha256:7cb8c23417f65348bb810fe92fb05b41d926f019d77442f3fa1058d17fea7ffe"
|
||||
alibabacloudDigest: "sha256:034b479fba340f9d98510e509c7ce1c36e8889a109d5f1c2240fcb0942bc772c"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
suffix: ""
|
||||
@@ -2991,9 +2976,9 @@ preflight:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.1"
|
||||
# cilium-digest
|
||||
digest: "sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1"
|
||||
digest: "sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- The priority class to use for the preflight pod.
|
||||
@@ -3140,9 +3125,9 @@ clustermesh:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/clustermesh-apiserver"
|
||||
tag: "v1.17.2"
|
||||
tag: "v1.17.1"
|
||||
# clustermesh-apiserver-digest
|
||||
digest: "sha256:981250ebdc6e66e190992eaf75cfca169113a8f08d5c3793fe15822176980398"
|
||||
digest: "sha256:1de22f46bfdd638de72c2224d5223ddc3bbeacda1803cb75799beca3d4bf7a4c"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- TCP port for the clustermesh-apiserver health API.
|
||||
@@ -3649,7 +3634,7 @@ authentication:
|
||||
override: ~
|
||||
repository: "docker.io/library/busybox"
|
||||
tag: "1.37.0"
|
||||
digest: "sha256:498a000f370d8c37927118ed80afe8adc38d1edcbfc071627d17b25c88efcab0"
|
||||
digest: "sha256:a5d0ce49aa801d475da48f8cb163c354ab95cab073cd3c138bd458fc8257fbf1"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# SPIRE agent configuration
|
||||
|
||||
@@ -500,13 +500,6 @@ bpf:
|
||||
# tracking table.
|
||||
# @default -- `262144`
|
||||
ctAnyMax: ~
|
||||
# -- Control to use a distributed per-CPU backend memory for the core BPF LRU maps
|
||||
# which Cilium uses. This improves performance significantly, but it is also
|
||||
# recommended to increase BPF map sizing along with that.
|
||||
distributedLRU:
|
||||
# -- Enable distributed LRU backend memory. For compatibility with existing
|
||||
# installations it is off by default.
|
||||
enabled: false
|
||||
# -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble.
|
||||
# Helm configuration for BPF events map rate limiting is experimental and might change
|
||||
# in upcoming releases.
|
||||
@@ -2358,11 +2351,6 @@ envoy:
|
||||
xffNumTrustedHopsL7PolicyIngress: 0
|
||||
# -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners.
|
||||
xffNumTrustedHopsL7PolicyEgress: 0
|
||||
# @schema
|
||||
# type: [null, string]
|
||||
# @schema
|
||||
# -- Max duration to wait for endpoint policies to be restored on restart. Default "3m".
|
||||
policyRestoreTimeoutDuration: null
|
||||
# -- Envoy container image.
|
||||
image:
|
||||
# @schema
|
||||
@@ -2638,7 +2626,7 @@ tls:
|
||||
# type: [null, boolean]
|
||||
# @schema
|
||||
# -- Enable synchronization of Secrets for TLS Interception. If disabled and
|
||||
# tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent.
|
||||
# tls.secretsBackend is set to 'k8s', then secrets will be read directly by the agent.
|
||||
enabled: ~
|
||||
# -- Base64 encoded PEM values for the CA certificate and private key.
|
||||
# This can be used as common CA to generate certificates used by hubble and clustermesh components.
|
||||
@@ -2691,9 +2679,6 @@ routingMode: ""
|
||||
# -- Configure VXLAN and Geneve tunnel port.
|
||||
# @default -- Port 8472 for VXLAN, Port 6081 for Geneve
|
||||
tunnelPort: 0
|
||||
# -- Configure VXLAN and Geneve tunnel source port range hint.
|
||||
# @default -- 0-0 to let the kernel driver decide the range
|
||||
tunnelSourcePortRange: 0-0
|
||||
# -- Configure what the response should be to traffic for a service without backends.
|
||||
# Possible values:
|
||||
# - reject (default)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user