mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-03-21 13:41:18 +00:00
Sync 0.29.1 to proxmox integration (#778)
This commit is contained in:
9
.github/workflows/pre-commit.yml
vendored
9
.github/workflows/pre-commit.yml
vendored
@@ -1,7 +1,12 @@
|
||||
name: Pre-Commit Checks
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
jobs:
|
||||
pre-commit:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
96
.github/workflows/pull-requests-release.yaml
vendored
Normal file
96
.github/workflows/pull-requests-release.yaml
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
name: Releasing PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened, closed]
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
name: Test Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
||||
github.event.action != 'closed'
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
finalize:
|
||||
name: Finalize Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const match = branch.match(/^release-(v\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
|
||||
if (!match) {
|
||||
core.setFailed(`Branch '${branch}' does not match expected format 'release-vX.Y.Z[-suffix]'`);
|
||||
} else {
|
||||
const tag = match[1];
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Extracted tag: ${tag}`);
|
||||
}
|
||||
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merged commit
|
||||
run: |
|
||||
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
const release = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!release) {
|
||||
throw new Error(`Draft release with tag ${tag} not found`);
|
||||
}
|
||||
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: release.id,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`✅ Published release for ${tag}`);
|
||||
39
.github/workflows/pull-requests.yaml
vendored
Normal file
39
.github/workflows/pull-requests.yaml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
name: Build and Test
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
if: |
|
||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: make build
|
||||
run: |
|
||||
make build
|
||||
|
||||
- name: make test
|
||||
run: |
|
||||
make test
|
||||
162
.github/workflows/tags.yaml
vendored
Normal file
162
.github/workflows/tags.yaml
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
name: Versioned Tag
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
name: Prepare Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
|
||||
if (existing) {
|
||||
core.setOutput('skip', 'true');
|
||||
} else {
|
||||
core.setOutput('skip', 'false');
|
||||
}
|
||||
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
env:
|
||||
GIT_AUTHOR_NAME: ${{ github.actor }}
|
||||
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
|
||||
run: |
|
||||
git config user.name "$GIT_AUTHOR_NAME"
|
||||
git config user.email "$GIT_AUTHOR_EMAIL"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH_NAME"
|
||||
git push origin "$BRANCH_NAME" --force
|
||||
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const branch = `release-${version}`;
|
||||
const base = 'main';
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${branch}`,
|
||||
base
|
||||
});
|
||||
|
||||
if (prs.data.length === 0) {
|
||||
const newPr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: branch,
|
||||
base: base,
|
||||
title: `Release v${version}`,
|
||||
body:
|
||||
`This PR prepares the release \`v${version}\`.\n` +
|
||||
`(Please merge it before releasing draft)`,
|
||||
draft: false
|
||||
});
|
||||
|
||||
console.log(`Created pull request #${newPr.data.number} from ${branch} to ${base}`);
|
||||
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: newPr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
|
||||
} else {
|
||||
console.log(`Pull request already exists from ${branch} to ${base}`);
|
||||
}
|
||||
|
||||
- name: Create or reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: create_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
|
||||
let release = releases.data.find(r => r.tag_name === tag);
|
||||
if (!release) {
|
||||
release = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: `${tag}`,
|
||||
draft: true,
|
||||
prerelease: false
|
||||
});
|
||||
}
|
||||
core.setOutput('upload_url', release.upload_url);
|
||||
|
||||
- name: Build assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make assets
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Delete pushed tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
git push --delete origin ${GITHUB_REF#refs/tags/}
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
@@ -6,13 +6,13 @@ As you get started, you are in the best position to give us feedbacks on areas o
|
||||
|
||||
* Problems found while setting up the development environment
|
||||
* Gaps in our documentation
|
||||
* Bugs in our Github actions
|
||||
* Bugs in our GitHub actions
|
||||
|
||||
First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md).
|
||||
First, though, it is important that you read the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
|
||||
The guidelines below are a starting point. We don't want to limit your
|
||||
creativity, passion, and initiative. If you think there's a better way, please
|
||||
feel free to bring it up in a Github discussion, or open a pull request. We're
|
||||
feel free to bring it up in a GitHub discussion, or open a pull request. We're
|
||||
certain there are always better ways to do things, we just need to start some
|
||||
constructive dialogue!
|
||||
|
||||
@@ -23,9 +23,9 @@ We welcome many types of contributions including:
|
||||
* New features
|
||||
* Builds, CI/CD
|
||||
* Bug fixes
|
||||
* [Documentation](https://github.com/cozystack/cozystack-website/tree/main)
|
||||
* [Documentation](https://GitHub.com/cozystack/cozystack-website/tree/main)
|
||||
* Issue Triage
|
||||
* Answering questions on Slack or Github Discussions
|
||||
* Answering questions on Slack or GitHub Discussions
|
||||
* Web design
|
||||
* Communications / Social Media / Blog Posts
|
||||
* Events participation
|
||||
@@ -34,7 +34,7 @@ We welcome many types of contributions including:
|
||||
## Ask for Help
|
||||
|
||||
The best way to reach us with a question when contributing is to drop a line in
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new Github discussion.
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new GitHub discussion.
|
||||
|
||||
## Raising Issues
|
||||
|
||||
|
||||
91
GOVERNANCE.md
Normal file
91
GOVERNANCE.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Cozystack Governance
|
||||
|
||||
This document defines the governance structure of the Cozystack community, outlining how members collaborate to achieve shared goals.
|
||||
|
||||
## Overview
|
||||
|
||||
**Cozystack**, a Cloud Native Computing Foundation (CNCF) project, is committed
|
||||
to building an open, inclusive, productive, and self-governing open source
|
||||
community focused on building a high-quality open source PaaS and framework for building clouds.
|
||||
|
||||
## Code Repositories
|
||||
|
||||
The following code repositories are governed by the Cozystack community and
|
||||
maintained under the `cozystack` namespace:
|
||||
|
||||
* **[Cozystack](https://github.com/cozystack/cozystack):** Main Cozystack codebase
|
||||
* **[website](https://github.com/cozystack/website):** Cozystack website and documentation sources
|
||||
* **[Talm](https://github.com/cozystack/talm):** Tool for managing Talos Linux the GitOps way
|
||||
* **[cozy-proxy](https://github.com/cozystack/cozy-proxy):** A simple kube-proxy addon for 1:1 NAT services in Kubernetes with NFT backend
|
||||
* **[cozystack-telemetry-server](https://github.com/cozystack/cozystack-telemetry-server):** Cozystack telemetry
|
||||
* **[talos-bootstrap](https://github.com/cozystack/talos-bootstrap):** An interactive Talos Linux installer
|
||||
* **[talos-meta-tool](https://github.com/cozystack/talos-meta-tool):** Tool for writing network metadata into META partition
|
||||
|
||||
## Community Roles
|
||||
|
||||
* **Users:** Members that engage with the Cozystack community via any medium, including Slack, Telegram, GitHub, and mailing lists.
|
||||
* **Contributors:** Members contributing to the projects by contributing and reviewing code, writing documentation,
|
||||
responding to issues, participating in proposal discussions, and so on.
|
||||
* **Directors:** Non-technical project leaders.
|
||||
* **Maintainers**: Technical project leaders.
|
||||
|
||||
## Contributors
|
||||
|
||||
Cozystack is for everyone. Anyone can become a Cozystack contributor simply by
|
||||
contributing to the project, whether through code, documentation, blog posts,
|
||||
community management, or other means.
|
||||
As with all Cozystack community members, contributors are expected to follow the
|
||||
[Cozystack Code of Conduct](https://github.com/cozystack/cozystack/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
All contributions to Cozystack code, documentation, or other components in the
|
||||
Cozystack GitHub organisation must follow the
|
||||
[contributing guidelines](https://github.com/cozystack/cozystack/blob/main/CONTRIBUTING.md).
|
||||
Whether these contributions are merged into the project is the prerogative of the maintainers.
|
||||
|
||||
## Directors
|
||||
|
||||
Directors are responsible for non-technical leadership functions within the project.
|
||||
This includes representing Cozystack and its maintainers to the community, to the press,
|
||||
and to the outside world; interfacing with CNCF and other governance entities;
|
||||
and participating in project decision-making processes when appropriate.
|
||||
|
||||
Directors are elected by a majority vote of the maintainers.
|
||||
|
||||
## Maintainers
|
||||
|
||||
Maintainers have the right to merge code into the project.
|
||||
Anyone can become a Cozystack maintainer (see "Becoming a maintainer" below).
|
||||
|
||||
### Expectations
|
||||
|
||||
Cozystack maintainers are expected to:
|
||||
|
||||
* Review pull requests, triage issues, and fix bugs in their areas of
|
||||
expertise, ensuring that all changes go through the project's code review
|
||||
and integration processes.
|
||||
* Monitor cncf-cozystack-* emails, the Cozystack Slack channels in Kubernetes
|
||||
and CNCF Slack workspaces, Telegram groups, and help out when possible.
|
||||
* Rapidly respond to any time-sensitive security release processes.
|
||||
* Attend Cozystack community meetings.
|
||||
|
||||
If a maintainer is no longer interested in or cannot perform the duties
|
||||
listed above, they should move themselves to emeritus status.
|
||||
If necessary, this can also occur through the decision-making process outlined below.
|
||||
|
||||
### Becoming a Maintainer
|
||||
|
||||
Anyone can become a Cozystack maintainer. Maintainers should be extremely
|
||||
proficient in cloud native technologies and/or Go; have relevant domain expertise;
|
||||
have the time and ability to meet the maintainer's expectations above;
|
||||
and demonstrate the ability to work with the existing maintainers and project processes.
|
||||
|
||||
To become a maintainer, start by expressing interest to existing maintainers.
|
||||
Existing maintainers will then ask you to demonstrate the qualifications above
|
||||
by contributing PRs, doing code reviews, and other such tasks under their guidance.
|
||||
After several months of working together, maintainers will decide whether to grant maintainer status.
|
||||
|
||||
## Project Decision-making Process
|
||||
|
||||
Ideally, all project decisions are resolved by consensus of maintainers and directors.
|
||||
If this is not possible, a vote will be called.
|
||||
The voting process is a simple majority in which each maintainer and director receives one vote.
|
||||
24
Makefile
24
Makefile
@@ -1,6 +1,13 @@
|
||||
.PHONY: manifests repos assets
|
||||
|
||||
build:
|
||||
build-deps:
|
||||
@command -V find docker skopeo jq gh helm > /dev/null
|
||||
@yq --version | grep -q "mikefarah" || (echo "mikefarah/yq is required" && exit 1)
|
||||
@tar --version | grep -q GNU || (echo "GNU tar is required" && exit 1)
|
||||
@sed --version | grep -q GNU || (echo "GNU sed is required" && exit 1)
|
||||
@awk --version | grep -q GNU || (echo "GNU awk is required" && exit 1)
|
||||
|
||||
build: build-deps
|
||||
make -C packages/apps/http-cache image
|
||||
make -C packages/apps/postgres image
|
||||
make -C packages/apps/mysql image
|
||||
@@ -19,10 +26,6 @@ build:
|
||||
make -C packages/core/installer image
|
||||
make manifests
|
||||
|
||||
manifests:
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > manifests/cozystack-installer.yaml
|
||||
sed -i 's|@sha256:[^"]\+||' manifests/cozystack-installer.yaml
|
||||
|
||||
repos:
|
||||
rm -rf _out
|
||||
make -C packages/apps check-version-map
|
||||
@@ -33,14 +36,21 @@ repos:
|
||||
mkdir -p _out/logos
|
||||
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
|
||||
|
||||
|
||||
manifests:
|
||||
mkdir -p _out/assets
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||
|
||||
assets:
|
||||
make -C packages/core/installer/ assets
|
||||
|
||||
test:
|
||||
test -f _out/assets/nocloud-amd64.raw.xz || make -C packages/core/installer talos-nocloud
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
make -C packages/core/testing test-applications
|
||||
#make -C packages/core/testing test-applications
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
upload_assets: manifests
|
||||
hack/upload-assets.sh
|
||||
|
||||
33
README.md
33
README.md
@@ -12,20 +12,21 @@
|
||||
|
||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||
|
||||
With Cozystack, you can transform your bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters, Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
|
||||
You can use Cozystack to build your own cloud or to provide a cost-effective development environments.
|
||||
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||
|
||||
## Use-Cases
|
||||
|
||||
* [**Using Cozystack to build public cloud**](https://cozystack.io/docs/use-cases/public-cloud/)
|
||||
You can use Cozystack as backend for a public cloud
|
||||
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||
You can use Cozystack as a backend for a public cloud
|
||||
|
||||
* [**Using Cozystack to build private cloud**](https://cozystack.io/docs/use-cases/private-cloud/)
|
||||
You can use Cozystack as platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
* [**Using Cozystack to build a private cloud**](https://cozystack.io/docs/guides/use-cases/private-cloud/)
|
||||
You can use Cozystack as a platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
|
||||
* [**Using Cozystack as Kubernetes distribution**](https://cozystack.io/docs/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as Kubernetes distribution for Bare Metal
|
||||
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
|
||||
## Screenshot
|
||||
|
||||
@@ -33,11 +34,11 @@ You can use Cozystack as Kubernetes distribution for Bare Metal
|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is located on official [cozystack.io](https://cozystack.io) website.
|
||||
The documentation is located on the [cozystack.io](https://cozystack.io) website.
|
||||
|
||||
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
||||
Read the [Getting Started](https://cozystack.io/docs/getting-started/) section for a quick start.
|
||||
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/troubleshooting/), and work your way through the process that we've outlined.
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/operations/troubleshooting/) and work your way through the process that we've outlined.
|
||||
|
||||
## Versioning
|
||||
|
||||
@@ -50,15 +51,15 @@ A full list of the available releases is available in the GitHub repository's [R
|
||||
|
||||
Contributions are highly appreciated and very welcomed!
|
||||
|
||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||
In case of bugs, please check if the issue has already been opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||
If it isn't, you can open a new one. A detailed report will help us replicate it, assess it, and work on a fix.
|
||||
|
||||
You can express your intention in working on the fix on your own.
|
||||
You can express your intention to on the fix on your own.
|
||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||
|
||||
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
|
||||
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
You are welcome to join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -178,6 +178,15 @@ func main() {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.WorkloadReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Workload")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
|
||||
16
hack/e2e.sh
16
hack/e2e.sh
@@ -84,7 +84,7 @@ done
|
||||
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
@@ -113,6 +113,11 @@ machine:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
@@ -313,7 +318,12 @@ kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"s
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
||||
|
||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
||||
fi
|
||||
|
||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||
"dashboard": true
|
||||
@@ -328,7 +338,7 @@ kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
file=versions_map
|
||||
|
||||
charts=$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")')
|
||||
|
||||
# <chart> <version> <commit>
|
||||
new_map=$(
|
||||
for chart in $charts; do
|
||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' $chart/Chart.yaml
|
||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' "$chart/Chart.yaml"
|
||||
done
|
||||
)
|
||||
|
||||
@@ -15,47 +16,46 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { new_map[$1 " " $2] = $3; next } { if (!($1 " " $2 in new_map)) print $1, $2, $3}' - $file)
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||
|
||||
# search accross all tags sorted by version
|
||||
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||
|
||||
resolved_miss_map=$(
|
||||
echo "$miss_map" | while read chart version commit; do
|
||||
if [ "$commit" = HEAD ]; then
|
||||
line=$(awk '/^version:/ {print NR; exit}' "./$chart/Chart.yaml")
|
||||
change_commit=$(git --no-pager blame -L"$line",+1 -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
|
||||
if [ "$change_commit" = "00000000" ]; then
|
||||
# Not committed yet, use previous commit
|
||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
||||
commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
if [ $(echo $commit | cut -c1) = "^" ]; then
|
||||
# Previous commit not exists
|
||||
commit=$(echo $commit | cut -c2-)
|
||||
fi
|
||||
else
|
||||
# Committed, but version_map wasn't updated
|
||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
||||
change_commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
if [ $(echo $change_commit | cut -c1) = "^" ]; then
|
||||
# Previous commit not exists
|
||||
commit=$(echo $change_commit | cut -c2-)
|
||||
else
|
||||
commit=$(git describe --always "$change_commit~1")
|
||||
fi
|
||||
echo "$miss_map" | while read -r chart version commit; do
|
||||
# if version is found in HEAD, it's HEAD
|
||||
if [ $(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml) = "${version}" ]; then
|
||||
echo "$chart $version HEAD"
|
||||
continue
|
||||
fi
|
||||
|
||||
# if commit is not HEAD, check if it's valid
|
||||
if [ $commit != "HEAD" ]; then
|
||||
if [ $(git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') != "${version}" ]; then
|
||||
echo "Commit $commit for $chart $version is not valid" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the commit belongs to the main branch
|
||||
if ! git merge-base --is-ancestor "$commit" main; then
|
||||
# Find the closest parent commit that belongs to main
|
||||
commit_in_main=$(git log --pretty=format:"%h" main -- "$chart" | head -n 1)
|
||||
if [ -n "$commit_in_main" ]; then
|
||||
commit="$commit_in_main"
|
||||
else
|
||||
# No valid commit found in main branch for $chart, skipping..."
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
commit=$(git rev-parse --short "$commit")
|
||||
echo "$chart $version $commit"
|
||||
continue
|
||||
fi
|
||||
echo "$chart $version $commit"
|
||||
|
||||
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||
found_tag=""
|
||||
for tag in $search_commits; do
|
||||
if [ $(git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | awk '$1 == "version:" {print $2}') = "${version}" ]; then
|
||||
found_tag=$(git rev-parse --short "${tag}")
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$found_tag" ]; then
|
||||
echo "Can't find $chart $version in any version tag, removing it" >&2
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "$chart $version $found_tag"
|
||||
done
|
||||
)
|
||||
|
||||
|
||||
11
hack/upload-assets.sh
Executable file
11
hack/upload-assets.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
version=${VERSION:-$(git describe --tags)}
|
||||
|
||||
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/kernel-amd64
|
||||
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
|
||||
87
internal/controller/workload_controller.go
Normal file
87
internal/controller/workload_controller.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
type WorkloadReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
err := r.Get(ctx, req.NamespacedName, w)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "Unable to fetch Workload")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// it's being deleted, nothing to handle
|
||||
if w.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
if err == nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// error getting object but not 404 -- requeue
|
||||
if !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
// Watch WorkloadMonitor objects
|
||||
For(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
if strings.HasPrefix(w.Name, "pvc-") {
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
if strings.HasPrefix(w.Name, "svc-") {
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = w.Name
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -33,6 +34,17 @@ type WorkloadMonitorReconciler struct {
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch
|
||||
|
||||
// isServiceReady checks if the service has an external IP bound
|
||||
func (r *WorkloadMonitorReconciler) isServiceReady(svc *corev1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||
}
|
||||
|
||||
// isPVCReady checks if the PVC is bound
|
||||
func (r *WorkloadMonitorReconciler) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool {
|
||||
return pvc.Status.Phase == corev1.ClaimBound
|
||||
}
|
||||
|
||||
// isPodReady checks if the Pod is in the Ready condition.
|
||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||
@@ -88,6 +100,96 @@ func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
||||
obj.SetOwnerReferences(owners)
|
||||
}
|
||||
|
||||
// reconcileServiceForMonitor creates or updates a Workload object for the given Service and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
svc corev1.Service,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%s", svc.Name),
|
||||
Namespace: svc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
q := resource.MustParse("0")
|
||||
|
||||
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||
if ing.IP != "" {
|
||||
q.Add(resource.MustParse("1"))
|
||||
}
|
||||
}
|
||||
|
||||
resources["public-ips"] = q
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = svc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isServiceReady(&svc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePVCForMonitor creates or updates a Workload object for the given PVC and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
pvc corev1.PersistentVolumeClaim,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pvc-%s", pvc.Name),
|
||||
Namespace: pvc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||
resources[resourceName.String()] = resourceQuantity
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = pvc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isPVCReady(&pvc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
ctx context.Context,
|
||||
@@ -205,6 +307,45 @@ func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
}
|
||||
}
|
||||
|
||||
pvcList := &corev1.PersistentVolumeClaimList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
pvcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list PVCs for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, pvc := range pvcList.Items {
|
||||
if err := r.reconcilePVCForMonitor(ctx, monitor, pvc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for PVC", "PVC", pvc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
svcList := &corev1.ServiceList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
svcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list Services for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, svc := range svcList.Items {
|
||||
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
|
||||
continue
|
||||
}
|
||||
if err := r.reconcileServiceForMonitor(ctx, monitor, svc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for Service", "Service", svc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Update WorkloadMonitor status based on observed pods
|
||||
monitor.Status.ObservedReplicas = observedReplicas
|
||||
monitor.Status.AvailableReplicas = availableReplicas
|
||||
@@ -233,41 +374,51 @@ func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||
Watches(
|
||||
&corev1.Pod{},
|
||||
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
pod, ok := obj.(*corev1.Pod)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}),
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.Pod{}, r.Client)),
|
||||
).
|
||||
// Watch PVCs as well
|
||||
Watches(
|
||||
&corev1.PersistentVolumeClaim{},
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.PersistentVolumeClaim{}, r.Client)),
|
||||
).
|
||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||
Owns(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
concrete, ok := obj.(T)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := c.List(ctx, &monitorList, client.InNamespace(concrete.GetNamespace())); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
labels := concrete.GetLabels()
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if labelVal, exists := labels[k]; !exists || labelVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cozy-system
|
||||
labels:
|
||||
cozystack.io/system: "true"
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cozystack
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8123
|
||||
selector:
|
||||
app: cozystack
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cozystack
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cozystack
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: cozystack
|
||||
containers:
|
||||
- name: cozystack
|
||||
image: "ghcr.io/cozystack/cozystack/installer:v0.28.0"
|
||||
env:
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: localhost
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "7445"
|
||||
- name: K8S_AWAIT_ELECTION_ENABLED
|
||||
value: "1"
|
||||
- name: K8S_AWAIT_ELECTION_NAME
|
||||
value: cozystack
|
||||
- name: K8S_AWAIT_ELECTION_LOCK_NAME
|
||||
value: cozystack
|
||||
- name: K8S_AWAIT_ELECTION_LOCK_NAMESPACE
|
||||
value: cozy-system
|
||||
- name: K8S_AWAIT_ELECTION_IDENTITY
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: assets
|
||||
image: "ghcr.io/cozystack/cozystack/installer:v0.28.0"
|
||||
command:
|
||||
- /usr/bin/cozystack-assets-server
|
||||
- "-dir=/cozystack/assets"
|
||||
- "-address=:8123"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8123
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
- key: "node.cilium.io/agent-not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.6.2@sha256:67dd53efa86b704fc5cb876aca055fef294b31ab67899b683a4821ea12582ea7
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.9.0@sha256:2b6ba87f5688a439bd2ac12835a5ab9e601feb15c0c44ed0d9ca48cec7c52521
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.3.1@sha256:2b82eae28239ca0f9968602c69bbb752cd2a5818e64934ccd06cb91d95d019c7
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.4.0@sha256:529650c1aa6ee4ceba74af35b526e4e6f4ad44d9a8a75d1f2f2dbb015cbf194c
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.17.0
|
||||
version: 0.17.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.15.2@sha256:967e51702102d0dbd97f9847de4159d62681b31eb606322d2c29755393c2236e
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.17.1@sha256:85371c6aabf5a7fea2214556deac930c600e362f92673464fe2443784e2869c3
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.15.2@sha256:5e054eae6274963b6e84f87bf3330c94325103c6407b08bfb1189da721333b5c
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.17.1@sha256:795d8e1ef4b2b0df2aa1e09d96cd13476ebb545b4bf4b5779b7547a70ef64cf9
|
||||
|
||||
@@ -3,12 +3,11 @@ FROM --platform=linux/amd64 golang:1.20.6 AS builder
|
||||
|
||||
RUN git clone https://github.com/kubevirt/cloud-provider-kubevirt /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& cd /go/src/kubevirt.io/cloud-provider-kubevirt \
|
||||
&& git checkout da9e0cf
|
||||
&& git checkout 443a1fe
|
||||
|
||||
WORKDIR /go/src/kubevirt.io/cloud-provider-kubevirt
|
||||
|
||||
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/335
|
||||
# see: https://github.com/kubevirt/cloud-provider-kubevirt/pull/336
|
||||
ADD patches /patches
|
||||
RUN git apply /patches/*.diff
|
||||
RUN go get 'k8s.io/endpointslice/util@v0.28' 'k8s.io/apiserver@v0.28'
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
index a3c1aa33..95c31438 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
@@ -412,11 +412,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
|
||||
// Create the desired port configuration
|
||||
var desiredPorts []discovery.EndpointPort
|
||||
|
||||
- for _, port := range service.Spec.Ports {
|
||||
+ for i := range service.Spec.Ports {
|
||||
desiredPorts = append(desiredPorts, discovery.EndpointPort{
|
||||
- Port: &port.TargetPort.IntVal,
|
||||
- Protocol: &port.Protocol,
|
||||
- Name: &port.Name,
|
||||
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
|
||||
+ Protocol: &service.Spec.Ports[i].Protocol,
|
||||
+ Name: &service.Spec.Ports[i].Name,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
index a3c1aa33..6f6e3d32 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
@@ -108,32 +108,24 @@ func newRequest(reqType ReqType, obj interface{}, oldObj interface{}) *Request {
|
||||
}
|
||||
|
||||
func (c *Controller) Init() error {
|
||||
-
|
||||
- // Act on events from Services on the infra cluster. These are created by the EnsureLoadBalancer function.
|
||||
- // We need to watch for these events so that we can update the EndpointSlices in the infra cluster accordingly.
|
||||
+ // Existing Service event handlers...
|
||||
_, err := c.infraFactory.Core().V1().Services().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
- // cast obj to Service
|
||||
svc := obj.(*v1.Service)
|
||||
- // Only act on Services of type LoadBalancer
|
||||
if svc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
||||
klog.Infof("Service added: %v/%v", svc.Namespace, svc.Name)
|
||||
c.queue.Add(newRequest(AddReq, obj, nil))
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
- // cast obj to Service
|
||||
newSvc := newObj.(*v1.Service)
|
||||
- // Only act on Services of type LoadBalancer
|
||||
if newSvc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
||||
klog.Infof("Service updated: %v/%v", newSvc.Namespace, newSvc.Name)
|
||||
c.queue.Add(newRequest(UpdateReq, newObj, oldObj))
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
- // cast obj to Service
|
||||
svc := obj.(*v1.Service)
|
||||
- // Only act on Services of type LoadBalancer
|
||||
if svc.Spec.Type == v1.ServiceTypeLoadBalancer {
|
||||
klog.Infof("Service deleted: %v/%v", svc.Namespace, svc.Name)
|
||||
c.queue.Add(newRequest(DeleteReq, obj, nil))
|
||||
@@ -144,7 +136,7 @@ func (c *Controller) Init() error {
|
||||
return err
|
||||
}
|
||||
|
||||
- // Monitor endpoint slices that we are interested in based on known services in the infra cluster
|
||||
+ // Existing EndpointSlice event handlers in tenant cluster...
|
||||
_, err = c.tenantFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
eps := obj.(*discovery.EndpointSlice)
|
||||
@@ -194,10 +186,80 @@ func (c *Controller) Init() error {
|
||||
return err
|
||||
}
|
||||
|
||||
- //TODO: Add informer for EndpointSlices in the infra cluster to watch for (unwanted) changes
|
||||
+ // Add an informer for EndpointSlices in the infra cluster
|
||||
+ _, err = c.infraFactory.Discovery().V1().EndpointSlices().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
+ AddFunc: func(obj interface{}) {
|
||||
+ eps := obj.(*discovery.EndpointSlice)
|
||||
+ if c.managedByController(eps) {
|
||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
||||
+ if svcErr != nil {
|
||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s: %v", eps.Namespace, eps.Name, svcErr)
|
||||
+ return
|
||||
+ }
|
||||
+ if svc != nil {
|
||||
+ klog.Infof("Infra EndpointSlice added: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
||||
+ c.queue.Add(newRequest(AddReq, svc, nil))
|
||||
+ }
|
||||
+ }
|
||||
+ },
|
||||
+ UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
+ eps := newObj.(*discovery.EndpointSlice)
|
||||
+ if c.managedByController(eps) {
|
||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
||||
+ if svcErr != nil {
|
||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s: %v", eps.Namespace, eps.Name, svcErr)
|
||||
+ return
|
||||
+ }
|
||||
+ if svc != nil {
|
||||
+ klog.Infof("Infra EndpointSlice updated: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
||||
+ c.queue.Add(newRequest(UpdateReq, svc, nil))
|
||||
+ }
|
||||
+ }
|
||||
+ },
|
||||
+ DeleteFunc: func(obj interface{}) {
|
||||
+ eps := obj.(*discovery.EndpointSlice)
|
||||
+ if c.managedByController(eps) {
|
||||
+ svc, svcErr := c.getInfraServiceForEPS(context.TODO(), eps)
|
||||
+ if svcErr != nil {
|
||||
+ klog.Errorf("Failed to get infra Service for EndpointSlice %s/%s on delete: %v", eps.Namespace, eps.Name, svcErr)
|
||||
+ return
|
||||
+ }
|
||||
+ if svc != nil {
|
||||
+ klog.Infof("Infra EndpointSlice deleted: %v/%v, requeuing Service: %v/%v", eps.Namespace, eps.Name, svc.Namespace, svc.Name)
|
||||
+ c.queue.Add(newRequest(DeleteReq, svc, nil))
|
||||
+ }
|
||||
+ }
|
||||
+ },
|
||||
+ })
|
||||
+ if err != nil {
|
||||
+ return err
|
||||
+ }
|
||||
+
|
||||
return nil
|
||||
}
|
||||
|
||||
+// getInfraServiceForEPS returns the Service in the infra cluster associated with the given EndpointSlice.
|
||||
+// It does this by reading the "kubernetes.io/service-name" label from the EndpointSlice, which should correspond
|
||||
+// to the Service name. If not found or if the Service doesn't exist, it returns nil.
|
||||
+func (c *Controller) getInfraServiceForEPS(ctx context.Context, eps *discovery.EndpointSlice) (*v1.Service, error) {
|
||||
+ svcName := eps.Labels[discovery.LabelServiceName]
|
||||
+ if svcName == "" {
|
||||
+ // No service name label found, can't determine infra service.
|
||||
+ return nil, nil
|
||||
+ }
|
||||
+
|
||||
+ svc, err := c.infraClient.CoreV1().Services(c.infraNamespace).Get(ctx, svcName, metav1.GetOptions{})
|
||||
+ if err != nil {
|
||||
+ if k8serrors.IsNotFound(err) {
|
||||
+ // Service doesn't exist
|
||||
+ return nil, nil
|
||||
+ }
|
||||
+ return nil, err
|
||||
+ }
|
||||
+
|
||||
+ return svc, nil
|
||||
+}
|
||||
+
|
||||
// Run starts an asynchronous loop that monitors and updates GKENetworkParamSet in the cluster.
|
||||
func (c *Controller) Run(numWorkers int, stopCh <-chan struct{}, controllerManagerMetrics *controllersmetrics.ControllerManagerMetrics) {
|
||||
defer utilruntime.HandleCrash()
|
||||
@@ -0,0 +1,689 @@
|
||||
diff --git a/.golangci.yml b/.golangci.yml
|
||||
index cf72a41a2..1c9237e83 100644
|
||||
--- a/.golangci.yml
|
||||
+++ b/.golangci.yml
|
||||
@@ -122,3 +122,9 @@ linters:
|
||||
# - testpackage
|
||||
# - revive
|
||||
# - wsl
|
||||
+issues:
|
||||
+ exclude-rules:
|
||||
+ - filename: "kubevirteps_controller_test.go"
|
||||
+ linters:
|
||||
+ - govet
|
||||
+ text: "declaration of \"err\" shadows"
|
||||
diff --git a/cmd/kubevirt-cloud-controller-manager/kubevirteps.go b/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
|
||||
index 74166b5d9..4e744f8de 100644
|
||||
--- a/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
|
||||
+++ b/cmd/kubevirt-cloud-controller-manager/kubevirteps.go
|
||||
@@ -101,7 +101,18 @@ func startKubevirtCloudController(
|
||||
|
||||
klog.Infof("Setting up kubevirtEPSController")
|
||||
|
||||
- kubevirtEPSController := kubevirteps.NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, kubevirtCloud.Namespace())
|
||||
+ clusterName := ccmConfig.ComponentConfig.KubeCloudShared.ClusterName
|
||||
+ if clusterName == "" {
|
||||
+ klog.Fatalf("Required flag --cluster-name is missing")
|
||||
+ }
|
||||
+
|
||||
+ kubevirtEPSController := kubevirteps.NewKubevirtEPSController(
|
||||
+ tenantClient,
|
||||
+ infraClient,
|
||||
+ infraDynamic,
|
||||
+ kubevirtCloud.Namespace(),
|
||||
+ clusterName,
|
||||
+ )
|
||||
|
||||
klog.Infof("Initializing kubevirtEPSController")
|
||||
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller.go b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
index 6f6e3d322..b56882c12 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller.go
|
||||
@@ -54,10 +54,10 @@ type Controller struct {
|
||||
infraDynamic dynamic.Interface
|
||||
infraFactory informers.SharedInformerFactory
|
||||
|
||||
- infraNamespace string
|
||||
- queue workqueue.RateLimitingInterface
|
||||
- maxRetries int
|
||||
-
|
||||
+ infraNamespace string
|
||||
+ clusterName string
|
||||
+ queue workqueue.RateLimitingInterface
|
||||
+ maxRetries int
|
||||
maxEndPointsPerSlice int
|
||||
}
|
||||
|
||||
@@ -65,8 +65,9 @@ func NewKubevirtEPSController(
|
||||
tenantClient kubernetes.Interface,
|
||||
infraClient kubernetes.Interface,
|
||||
infraDynamic dynamic.Interface,
|
||||
- infraNamespace string) *Controller {
|
||||
-
|
||||
+ infraNamespace string,
|
||||
+ clusterName string,
|
||||
+) *Controller {
|
||||
tenantFactory := informers.NewSharedInformerFactory(tenantClient, 0)
|
||||
infraFactory := informers.NewSharedInformerFactoryWithOptions(infraClient, 0, informers.WithNamespace(infraNamespace))
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
@@ -79,6 +80,7 @@ func NewKubevirtEPSController(
|
||||
infraDynamic: infraDynamic,
|
||||
infraFactory: infraFactory,
|
||||
infraNamespace: infraNamespace,
|
||||
+ clusterName: clusterName,
|
||||
queue: queue,
|
||||
maxRetries: 25,
|
||||
maxEndPointsPerSlice: 100,
|
||||
@@ -320,22 +322,30 @@ func (c *Controller) processNextItem(ctx context.Context) bool {
|
||||
|
||||
// getInfraServiceFromTenantEPS returns the Service in the infra cluster that is associated with the given tenant endpoint slice.
|
||||
func (c *Controller) getInfraServiceFromTenantEPS(ctx context.Context, slice *discovery.EndpointSlice) (*v1.Service, error) {
|
||||
- infraServices, err := c.infraClient.CoreV1().Services(c.infraNamespace).List(ctx,
|
||||
- metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s,%s=%s", kubevirt.TenantServiceNameLabelKey, slice.Labels["kubernetes.io/service-name"],
|
||||
- kubevirt.TenantServiceNamespaceLabelKey, slice.Namespace)})
|
||||
+ tenantServiceName := slice.Labels[discovery.LabelServiceName]
|
||||
+ tenantServiceNamespace := slice.Namespace
|
||||
+
|
||||
+ labelSelector := fmt.Sprintf(
|
||||
+ "%s=%s,%s=%s,%s=%s",
|
||||
+ kubevirt.TenantServiceNameLabelKey, tenantServiceName,
|
||||
+ kubevirt.TenantServiceNamespaceLabelKey, tenantServiceNamespace,
|
||||
+ kubevirt.TenantClusterNameLabelKey, c.clusterName,
|
||||
+ )
|
||||
+
|
||||
+ svcList, err := c.infraClient.CoreV1().Services(c.infraNamespace).List(ctx, metav1.ListOptions{
|
||||
+ LabelSelector: labelSelector,
|
||||
+ })
|
||||
if err != nil {
|
||||
- klog.Errorf("Failed to get Service in Infra for EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
|
||||
+ klog.Errorf("Failed to get Service in Infra for EndpointSlice %s in namespace %s: %v", slice.Name, tenantServiceNamespace, err)
|
||||
return nil, err
|
||||
}
|
||||
- if len(infraServices.Items) > 1 {
|
||||
- // This should never be possible, only one service should exist for a given tenant endpoint slice
|
||||
- klog.Errorf("Multiple services found for tenant endpoint slice %s in namespace %s", slice.Name, slice.Namespace)
|
||||
+ if len(svcList.Items) > 1 {
|
||||
+ klog.Errorf("Multiple services found for tenant endpoint slice %s in namespace %s", slice.Name, tenantServiceNamespace)
|
||||
return nil, errors.New("multiple services found for tenant endpoint slice")
|
||||
}
|
||||
- if len(infraServices.Items) == 1 {
|
||||
- return &infraServices.Items[0], nil
|
||||
+ if len(svcList.Items) == 1 {
|
||||
+ return &svcList.Items[0], nil
|
||||
}
|
||||
- // No service found, possible if service is deleted.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -363,16 +373,27 @@ func (c *Controller) getTenantEPSFromInfraService(ctx context.Context, svc *v1.S
|
||||
// getInfraEPSFromInfraService returns the EndpointSlices in the infra cluster that are associated with the given infra service.
|
||||
func (c *Controller) getInfraEPSFromInfraService(ctx context.Context, svc *v1.Service) ([]*discovery.EndpointSlice, error) {
|
||||
var infraEPSSlices []*discovery.EndpointSlice
|
||||
- klog.Infof("Searching for endpoints on infra cluster for service %s in namespace %s.", svc.Name, svc.Namespace)
|
||||
- result, err := c.infraClient.DiscoveryV1().EndpointSlices(svc.Namespace).List(ctx,
|
||||
- metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discovery.LabelServiceName, svc.Name)})
|
||||
+
|
||||
+ klog.Infof("Searching for EndpointSlices in infra cluster for service %s/%s", svc.Namespace, svc.Name)
|
||||
+
|
||||
+ labelSelector := fmt.Sprintf(
|
||||
+ "%s=%s,%s=%s",
|
||||
+ discovery.LabelServiceName, svc.Name,
|
||||
+ kubevirt.TenantClusterNameLabelKey, c.clusterName,
|
||||
+ )
|
||||
+
|
||||
+ result, err := c.infraClient.DiscoveryV1().EndpointSlices(svc.Namespace).List(ctx, metav1.ListOptions{
|
||||
+ LabelSelector: labelSelector,
|
||||
+ })
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to get EndpointSlices for Service %s in namespace %s: %v", svc.Name, svc.Namespace, err)
|
||||
return nil, err
|
||||
}
|
||||
+
|
||||
for _, eps := range result.Items {
|
||||
infraEPSSlices = append(infraEPSSlices, &eps)
|
||||
}
|
||||
+
|
||||
return infraEPSSlices, nil
|
||||
}
|
||||
|
||||
@@ -382,74 +403,117 @@ func (c *Controller) reconcile(ctx context.Context, r *Request) error {
|
||||
return errors.New("could not cast object to service")
|
||||
}
|
||||
|
||||
+ // Skip services not managed by this controller (missing required labels)
|
||||
if service.Labels[kubevirt.TenantServiceNameLabelKey] == "" ||
|
||||
service.Labels[kubevirt.TenantServiceNamespaceLabelKey] == "" ||
|
||||
service.Labels[kubevirt.TenantClusterNameLabelKey] == "" {
|
||||
- klog.Infof("This LoadBalancer Service: %s is not managed by the %s. Skipping.", service.Name, ControllerName)
|
||||
+ klog.Infof("Service %s is not managed by this controller. Skipping.", service.Name)
|
||||
+ return nil
|
||||
+ }
|
||||
+
|
||||
+ // Skip services for other clusters
|
||||
+ if service.Labels[kubevirt.TenantClusterNameLabelKey] != c.clusterName {
|
||||
+ klog.Infof("Skipping Service %s: cluster label %q doesn't match our clusterName %q", service.Name, service.Labels[kubevirt.TenantClusterNameLabelKey], c.clusterName)
|
||||
return nil
|
||||
}
|
||||
+
|
||||
klog.Infof("Reconciling: %v", service.Name)
|
||||
|
||||
+ /*
|
||||
+ 1) Check if Service in the infra cluster is actually present.
|
||||
+ If it's not found, mark it as 'deleted' so that we don't create new slices.
|
||||
+ */
|
||||
serviceDeleted := false
|
||||
- svc, err := c.infraFactory.Core().V1().Services().Lister().Services(c.infraNamespace).Get(service.Name)
|
||||
+ infraSvc, err := c.infraFactory.Core().V1().Services().Lister().Services(c.infraNamespace).Get(service.Name)
|
||||
if err != nil {
|
||||
- klog.Infof("Service %s in namespace %s is deleted.", service.Name, service.Namespace)
|
||||
+ // The Service is not present in the infra lister => treat as deleted
|
||||
+ klog.Infof("Service %s in namespace %s is deleted (or not found).", service.Name, service.Namespace)
|
||||
serviceDeleted = true
|
||||
} else {
|
||||
- service = svc
|
||||
+ // Use the actual object from the lister, so we have the latest state
|
||||
+ service = infraSvc
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ 2) Get all existing EndpointSlices in the infra cluster that belong to this LB Service.
|
||||
+ We'll decide which of them should be updated or deleted.
|
||||
+ */
|
||||
infraExistingEpSlices, err := c.getInfraEPSFromInfraService(ctx, service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
- // At this point we have the current state of the 3 main objects we are interested in:
|
||||
- // 1. The Service in the infra cluster, the one created by the KubevirtCloudController.
|
||||
- // 2. The EndpointSlices in the tenant cluster, created for the tenant cluster's Service.
|
||||
- // 3. The EndpointSlices in the infra cluster, managed by this controller.
|
||||
-
|
||||
slicesToDelete := []*discovery.EndpointSlice{}
|
||||
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice)
|
||||
|
||||
+ // For example, if the service is single-stack IPv4 => only AddressTypeIPv4
|
||||
+ // or if dual-stack => IPv4 and IPv6, etc.
|
||||
serviceSupportedAddressesTypes := getAddressTypesForService(service)
|
||||
- // If the services switched to a different address type, we need to delete the old ones, because it's immutable.
|
||||
- // If the services switched to a different externalTrafficPolicy, we need to delete the old ones.
|
||||
+
|
||||
+ /*
|
||||
+ 3) Determine which slices to delete, and which to pass on to the normal
|
||||
+ "reconcileByAddressType" logic.
|
||||
+
|
||||
+ - If 'serviceDeleted' is true OR service.Spec.Selector != nil, we remove them.
|
||||
+ - Also, if the slice's address type is unsupported by the Service, we remove it.
|
||||
+ */
|
||||
for _, eps := range infraExistingEpSlices {
|
||||
- if service.Spec.Selector != nil || serviceDeleted {
|
||||
- klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has a selector", eps.Name, eps.Namespace)
|
||||
- // to be sure we don't delete any slice that is not managed by us
|
||||
+ // If service is deleted or has a non-nil selector => remove slices
|
||||
+ if serviceDeleted || service.Spec.Selector != nil {
|
||||
+ /*
|
||||
+ Only remove if it is clearly labeled as managed by us:
|
||||
+ we do not want to accidentally remove slices that are not
|
||||
+ created by this controller.
|
||||
+ */
|
||||
if c.managedByController(eps) {
|
||||
+ klog.Infof("Added for deletion EndpointSlice %s in namespace %s because service is deleted or has a selector",
|
||||
+ eps.Name, eps.Namespace)
|
||||
slicesToDelete = append(slicesToDelete, eps)
|
||||
}
|
||||
continue
|
||||
}
|
||||
+
|
||||
+ // If the Service does not support this slice's AddressType => remove
|
||||
if !serviceSupportedAddressesTypes.Has(eps.AddressType) {
|
||||
- klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has an unsupported address type: %v", eps.Name, eps.Namespace, eps.AddressType)
|
||||
+ klog.Infof("Added for deletion EndpointSlice %s in namespace %s because it has an unsupported address type: %v",
|
||||
+ eps.Name, eps.Namespace, eps.AddressType)
|
||||
slicesToDelete = append(slicesToDelete, eps)
|
||||
continue
|
||||
}
|
||||
+
|
||||
+ /*
|
||||
+ Otherwise, this slice is potentially still valid for the given AddressType,
|
||||
+ we'll send it to reconcileByAddressType for final merging and updates.
|
||||
+ */
|
||||
slicesByAddressType[eps.AddressType] = append(slicesByAddressType[eps.AddressType], eps)
|
||||
}
|
||||
|
||||
- if !serviceDeleted {
|
||||
- // Get tenant's endpoint slices for this service
|
||||
+ /*
|
||||
+ 4) If the Service was NOT deleted and has NO selector (i.e., it's a "no-selector" LB Service),
|
||||
+ we proceed to handle creation and updates. That means:
|
||||
+ - Gather Tenant's EndpointSlices
|
||||
+ - Reconcile them by each AddressType
|
||||
+ */
|
||||
+ if !serviceDeleted && service.Spec.Selector == nil {
|
||||
tenantEpSlices, err := c.getTenantEPSFromInfraService(ctx, service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
- // Reconcile the EndpointSlices for each address type e.g. ipv4, ipv6
|
||||
+ // For each addressType (ipv4, ipv6, etc.) reconcile the infra slices
|
||||
for addressType := range serviceSupportedAddressesTypes {
|
||||
existingSlices := slicesByAddressType[addressType]
|
||||
- err := c.reconcileByAddressType(service, tenantEpSlices, existingSlices, addressType)
|
||||
- if err != nil {
|
||||
+ if err := c.reconcileByAddressType(service, tenantEpSlices, existingSlices, addressType); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- // Delete the EndpointSlices that are no longer needed
|
||||
+ /*
|
||||
+ 5) Perform the actual deletion of all slices we flagged.
|
||||
+ In many cases (serviceDeleted or .Spec.Selector != nil),
|
||||
+ we end up with only "delete" actions and no new slice creation.
|
||||
+ */
|
||||
for _, eps := range slicesToDelete {
|
||||
err := c.infraClient.DiscoveryV1().EndpointSlices(eps.Namespace).Delete(context.TODO(), eps.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
@@ -474,11 +538,11 @@ func (c *Controller) reconcileByAddressType(service *v1.Service, tenantSlices []
|
||||
// Create the desired port configuration
|
||||
var desiredPorts []discovery.EndpointPort
|
||||
|
||||
- for _, port := range service.Spec.Ports {
|
||||
+ for i := range service.Spec.Ports {
|
||||
desiredPorts = append(desiredPorts, discovery.EndpointPort{
|
||||
- Port: &port.TargetPort.IntVal,
|
||||
- Protocol: &port.Protocol,
|
||||
- Name: &port.Name,
|
||||
+ Port: &service.Spec.Ports[i].TargetPort.IntVal,
|
||||
+ Protocol: &service.Spec.Ports[i].Protocol,
|
||||
+ Name: &service.Spec.Ports[i].Name,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -588,55 +652,114 @@ func ownedBy(endpointSlice *discovery.EndpointSlice, svc *v1.Service) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
-func (c *Controller) finalize(service *v1.Service, slicesToCreate []*discovery.EndpointSlice, slicesToUpdate []*discovery.EndpointSlice, slicesToDelete []*discovery.EndpointSlice) error {
|
||||
- // If there are slices to delete and slices to create, make them as update
|
||||
- for i := 0; i < len(slicesToDelete); {
|
||||
+func (c *Controller) finalize(
|
||||
+ service *v1.Service,
|
||||
+ slicesToCreate []*discovery.EndpointSlice,
|
||||
+ slicesToUpdate []*discovery.EndpointSlice,
|
||||
+ slicesToDelete []*discovery.EndpointSlice,
|
||||
+) error {
|
||||
+ /*
|
||||
+ We try to turn a "delete + create" pair into a single "update" operation
|
||||
+ if the original slice (slicesToDelete[i]) has the same address type as
|
||||
+ the first slice in slicesToCreate, and is owned by the same Service.
|
||||
+
|
||||
+ However, we must re-check the lengths of slicesToDelete and slicesToCreate
|
||||
+ within the loop to avoid an out-of-bounds index in slicesToCreate.
|
||||
+ */
|
||||
+
|
||||
+ i := 0
|
||||
+ for i < len(slicesToDelete) {
|
||||
+ // If there is nothing to create, break early
|
||||
if len(slicesToCreate) == 0 {
|
||||
break
|
||||
}
|
||||
- if slicesToDelete[i].AddressType == slicesToCreate[0].AddressType && ownedBy(slicesToDelete[i], service) {
|
||||
- slicesToCreate[0].Name = slicesToDelete[i].Name
|
||||
+
|
||||
+ sd := slicesToDelete[i]
|
||||
+ sc := slicesToCreate[0] // We can safely do this now, because len(slicesToCreate) > 0
|
||||
+
|
||||
+ // If the address type matches, and the slice is owned by the same Service,
|
||||
+ // then instead of deleting sd and creating sc, we'll transform it into an update:
|
||||
+ // we rename sc with sd's name, remove sd from the delete list, remove sc from the create list,
|
||||
+ // and add sc to the update list.
|
||||
+ if sd.AddressType == sc.AddressType && ownedBy(sd, service) {
|
||||
+ sliceToUpdate := sc
|
||||
+ sliceToUpdate.Name = sd.Name
|
||||
+
|
||||
+ // Remove the first element from slicesToCreate
|
||||
slicesToCreate = slicesToCreate[1:]
|
||||
- slicesToUpdate = append(slicesToUpdate, slicesToCreate[0])
|
||||
+
|
||||
+ // Remove the slice from slicesToDelete
|
||||
slicesToDelete = append(slicesToDelete[:i], slicesToDelete[i+1:]...)
|
||||
+
|
||||
+ // Now add the renamed slice to the list of slices we want to update
|
||||
+ slicesToUpdate = append(slicesToUpdate, sliceToUpdate)
|
||||
+
|
||||
+ /*
|
||||
+ Do not increment i here, because we've just removed an element from
|
||||
+ slicesToDelete. The next slice to examine is now at the same index i.
|
||||
+ */
|
||||
} else {
|
||||
+ // If they don't match, move on to the next slice in slicesToDelete.
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
- // Create the new slices if service is not marked for deletion
|
||||
+ /*
|
||||
+ If the Service is not being deleted, create all remaining slices in slicesToCreate.
|
||||
+ (If the Service has a DeletionTimestamp, it means it is going away, so we do not
|
||||
+ want to create new EndpointSlices.)
|
||||
+ */
|
||||
if service.DeletionTimestamp == nil {
|
||||
for _, slice := range slicesToCreate {
|
||||
- createdSlice, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Create(context.TODO(), slice, metav1.CreateOptions{})
|
||||
+ createdSlice, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Create(
|
||||
+ context.TODO(),
|
||||
+ slice,
|
||||
+ metav1.CreateOptions{},
|
||||
+ )
|
||||
if err != nil {
|
||||
- klog.Errorf("Failed to create EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
|
||||
+ klog.Errorf("Failed to create EndpointSlice %s in namespace %s: %v",
|
||||
+ slice.Name, slice.Namespace, err)
|
||||
+ // If the namespace is terminating, it's safe to ignore the error.
|
||||
if k8serrors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
|
||||
- return nil
|
||||
+ continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
- klog.Infof("Created EndpointSlice %s in namespace %s", createdSlice.Name, createdSlice.Namespace)
|
||||
+ klog.Infof("Created EndpointSlice %s in namespace %s",
|
||||
+ createdSlice.Name, createdSlice.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
- // Update slices
|
||||
+ // Update slices that are in the slicesToUpdate list.
|
||||
for _, slice := range slicesToUpdate {
|
||||
- _, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Update(context.TODO(), slice, metav1.UpdateOptions{})
|
||||
+ _, err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Update(
|
||||
+ context.TODO(),
|
||||
+ slice,
|
||||
+ metav1.UpdateOptions{},
|
||||
+ )
|
||||
if err != nil {
|
||||
- klog.Errorf("Failed to update EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
|
||||
+ klog.Errorf("Failed to update EndpointSlice %s in namespace %s: %v",
|
||||
+ slice.Name, slice.Namespace, err)
|
||||
return err
|
||||
}
|
||||
- klog.Infof("Updated EndpointSlice %s in namespace %s", slice.Name, slice.Namespace)
|
||||
+ klog.Infof("Updated EndpointSlice %s in namespace %s",
|
||||
+ slice.Name, slice.Namespace)
|
||||
}
|
||||
|
||||
- // Delete slices
|
||||
+ // Finally, delete slices that are in slicesToDelete and are no longer needed.
|
||||
for _, slice := range slicesToDelete {
|
||||
- err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Delete(context.TODO(), slice.Name, metav1.DeleteOptions{})
|
||||
+ err := c.infraClient.DiscoveryV1().EndpointSlices(slice.Namespace).Delete(
|
||||
+ context.TODO(),
|
||||
+ slice.Name,
|
||||
+ metav1.DeleteOptions{},
|
||||
+ )
|
||||
if err != nil {
|
||||
- klog.Errorf("Failed to delete EndpointSlice %s in namespace %s: %v", slice.Name, slice.Namespace, err)
|
||||
+ klog.Errorf("Failed to delete EndpointSlice %s in namespace %s: %v",
|
||||
+ slice.Name, slice.Namespace, err)
|
||||
return err
|
||||
}
|
||||
- klog.Infof("Deleted EndpointSlice %s in namespace %s", slice.Name, slice.Namespace)
|
||||
+ klog.Infof("Deleted EndpointSlice %s in namespace %s",
|
||||
+ slice.Name, slice.Namespace)
|
||||
}
|
||||
|
||||
return nil
|
||||
diff --git a/pkg/controller/kubevirteps/kubevirteps_controller_test.go b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
index 1fb86e25f..14d92d340 100644
|
||||
--- a/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
+++ b/pkg/controller/kubevirteps/kubevirteps_controller_test.go
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
+ "k8s.io/apimachinery/pkg/util/sets"
|
||||
dfake "k8s.io/client-go/dynamic/fake"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/testing"
|
||||
@@ -189,7 +190,7 @@ func setupTestKubevirtEPSController() *testKubevirtEPSController {
|
||||
}: "VirtualMachineInstanceList",
|
||||
})
|
||||
|
||||
- controller := NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, "test")
|
||||
+ controller := NewKubevirtEPSController(tenantClient, infraClient, infraDynamic, "test", "test-cluster")
|
||||
|
||||
err := controller.Init()
|
||||
if err != nil {
|
||||
@@ -686,5 +687,229 @@ var _ = g.Describe("KubevirtEPSController", g.Ordered, func() {
|
||||
return false, err
|
||||
}).Should(BeTrue(), "EndpointSlice in infra cluster should be recreated by the controller after deletion")
|
||||
})
|
||||
+
|
||||
+ g.It("Should correctly handle multiple unique ports in EndpointSlice", func() {
|
||||
+ // Create a VMI in the infra cluster
|
||||
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||
+
|
||||
+ // Create an EndpointSlice in the tenant cluster
|
||||
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||
+ *createPort("http", 80, v1.ProtocolTCP),
|
||||
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
|
||||
+
|
||||
+ // Define multiple ports for the Service
|
||||
+ servicePorts := []v1.ServicePort{
|
||||
+ {
|
||||
+ Name: "client",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 10001,
|
||||
+ TargetPort: intstr.FromInt(30396),
|
||||
+ NodePort: 30396,
|
||||
+ },
|
||||
+ {
|
||||
+ Name: "dashboard",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 8265,
|
||||
+ TargetPort: intstr.FromInt(31003),
|
||||
+ NodePort: 31003,
|
||||
+ },
|
||||
+ {
|
||||
+ Name: "metrics",
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ Port: 8080,
|
||||
+ TargetPort: intstr.FromInt(30452),
|
||||
+ NodePort: 30452,
|
||||
+ },
|
||||
+ }
|
||||
+
|
||||
+ createAndAssertInfraServiceLB("infra-multiport-service", "tenant-service-name", "test-cluster",
|
||||
+ servicePorts[0], v1.ServiceExternalTrafficPolicyLocal)
|
||||
+
|
||||
+ svc, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(context.TODO(), "infra-multiport-service", metav1.GetOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ svc.Spec.Ports = servicePorts
|
||||
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ var epsListMultiPort *discoveryv1.EndpointSliceList
|
||||
+
|
||||
+ Eventually(func() (bool, error) {
|
||||
+ epsListMultiPort, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||
+ if len(epsListMultiPort.Items) != 1 {
|
||||
+ return false, err
|
||||
+ }
|
||||
+
|
||||
+ createdSlice := epsListMultiPort.Items[0]
|
||||
+ expectedPortNames := []string{"client", "dashboard", "metrics"}
|
||||
+ foundPortNames := []string{}
|
||||
+
|
||||
+ for _, port := range createdSlice.Ports {
|
||||
+ if port.Name != nil {
|
||||
+ foundPortNames = append(foundPortNames, *port.Name)
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if len(foundPortNames) != len(expectedPortNames) {
|
||||
+ return false, err
|
||||
+ }
|
||||
+
|
||||
+ portSet := sets.NewString(foundPortNames...)
|
||||
+ expectedPortSet := sets.NewString(expectedPortNames...)
|
||||
+ return portSet.Equal(expectedPortSet), err
|
||||
+ }).Should(BeTrue(), "EndpointSlice should contain all unique ports from the Service without duplicates")
|
||||
+ })
|
||||
+
|
||||
+ g.It("Should not panic when Service changes to have a non-nil selector, causing EndpointSlice deletion with no new slices to create", func() {
|
||||
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||
+ *createPort("http", 80, v1.ProtocolTCP),
|
||||
+ []discoveryv1.Endpoint{*createEndpoint("123.45.67.89", "worker-0-test", true, true, false)})
|
||||
+ createAndAssertInfraServiceLB("infra-service-no-selector", "tenant-service-name", "test-cluster",
|
||||
+ v1.ServicePort{
|
||||
+ Name: "web",
|
||||
+ Port: 80,
|
||||
+ NodePort: 31900,
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ TargetPort: intstr.IntOrString{IntVal: 30390},
|
||||
+ },
|
||||
+ v1.ServiceExternalTrafficPolicyLocal,
|
||||
+ )
|
||||
+
|
||||
+ // Wait for the controller to create an EndpointSlice in the infra cluster.
|
||||
+ var epsList *discoveryv1.EndpointSliceList
|
||||
+ var err error
|
||||
+ Eventually(func() (bool, error) {
|
||||
+ epsList, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
|
||||
+ List(context.TODO(), metav1.ListOptions{})
|
||||
+ if err != nil {
|
||||
+ return false, err
|
||||
+ }
|
||||
+ // Wait exactly 1 slice
|
||||
+ if len(epsList.Items) == 1 {
|
||||
+ return true, nil
|
||||
+ }
|
||||
+ return false, nil
|
||||
+ }).Should(BeTrue(), "Controller should create an EndpointSlice in infra cluster for the LB service")
|
||||
+
|
||||
+ svcWithSelector, err := testVals.infraClient.CoreV1().Services(infraNamespace).
|
||||
+ Get(context.TODO(), "infra-service-no-selector", metav1.GetOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ // Let's set any selector to run the slice deletion logic
|
||||
+ svcWithSelector.Spec.Selector = map[string]string{"test": "selector-added"}
|
||||
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).
|
||||
+ Update(context.TODO(), svcWithSelector, metav1.UpdateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ Eventually(func() (bool, error) {
|
||||
+ epsList, err = testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
|
||||
+ List(context.TODO(), metav1.ListOptions{})
|
||||
+ if err != nil {
|
||||
+ return false, err
|
||||
+ }
|
||||
+ // We expect that after the update service.EndpointSlice will become 0
|
||||
+ if len(epsList.Items) == 0 {
|
||||
+ return true, nil
|
||||
+ }
|
||||
+ return false, nil
|
||||
+ }).Should(BeTrue(), "Existing EndpointSlice should be removed because Service now has a selector")
|
||||
+ })
|
||||
+
|
||||
+ g.It("Should remove EndpointSlices and not recreate them when a previously no-selector Service obtains a selector", func() {
|
||||
+ testVals.infraClient.Fake.PrependReactor("create", "endpointslices", func(action testing.Action) (bool, runtime.Object, error) {
|
||||
+ createAction := action.(testing.CreateAction)
|
||||
+ slice := createAction.GetObject().(*discoveryv1.EndpointSlice)
|
||||
+ if slice.Name == "" && slice.GenerateName != "" {
|
||||
+ slice.Name = slice.GenerateName + "-fake001"
|
||||
+ }
|
||||
+ return false, slice, nil
|
||||
+ })
|
||||
+
|
||||
+ createAndAssertVMI("worker-0-test", "ip-10-32-5-13", "123.45.67.89")
|
||||
+
|
||||
+ createAndAssertTenantSlice("test-epslice", "tenant-service-name", discoveryv1.AddressTypeIPv4,
|
||||
+ *createPort("http", 80, v1.ProtocolTCP),
|
||||
+ []discoveryv1.Endpoint{
|
||||
+ *createEndpoint("123.45.67.89", "worker-0-test", true, true, false),
|
||||
+ },
|
||||
+ )
|
||||
+
|
||||
+ noSelectorSvcName := "svc-without-selector"
|
||||
+ svc := &v1.Service{
|
||||
+ ObjectMeta: metav1.ObjectMeta{
|
||||
+ Name: noSelectorSvcName,
|
||||
+ Namespace: infraNamespace,
|
||||
+ Labels: map[string]string{
|
||||
+ kubevirt.TenantServiceNameLabelKey: "tenant-service-name",
|
||||
+ kubevirt.TenantServiceNamespaceLabelKey: tenantNamespace,
|
||||
+ kubevirt.TenantClusterNameLabelKey: "test-cluster",
|
||||
+ },
|
||||
+ },
|
||||
+ Spec: v1.ServiceSpec{
|
||||
+ Ports: []v1.ServicePort{
|
||||
+ {
|
||||
+ Name: "web",
|
||||
+ Port: 80,
|
||||
+ NodePort: 31900,
|
||||
+ Protocol: v1.ProtocolTCP,
|
||||
+ TargetPort: intstr.IntOrString{IntVal: 30390},
|
||||
+ },
|
||||
+ },
|
||||
+ Type: v1.ServiceTypeLoadBalancer,
|
||||
+ ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyLocal,
|
||||
+ },
|
||||
+ }
|
||||
+
|
||||
+ _, err := testVals.infraClient.CoreV1().Services(infraNamespace).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ Eventually(func() (bool, error) {
|
||||
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
|
||||
+ List(context.TODO(), metav1.ListOptions{})
|
||||
+ if err != nil {
|
||||
+ return false, err
|
||||
+ }
|
||||
+ return len(epsList.Items) == 1, nil
|
||||
+ }).Should(BeTrue(), "Controller should create an EndpointSlice in infra cluster for the no-selector LB service")
|
||||
+
|
||||
+ svcWithSelector, err := testVals.infraClient.CoreV1().Services(infraNamespace).Get(
|
||||
+ context.TODO(), noSelectorSvcName, metav1.GetOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ svcWithSelector.Spec.Selector = map[string]string{"app": "test-value"}
|
||||
+ _, err = testVals.infraClient.CoreV1().Services(infraNamespace).
|
||||
+ Update(context.TODO(), svcWithSelector, metav1.UpdateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ Eventually(func() (bool, error) {
|
||||
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).
|
||||
+ List(context.TODO(), metav1.ListOptions{})
|
||||
+ if err != nil {
|
||||
+ return false, err
|
||||
+ }
|
||||
+ return len(epsList.Items) == 0, nil
|
||||
+ }).Should(BeTrue(), "All EndpointSlices should be removed after Service acquires a selector (no new slices created)")
|
||||
+ })
|
||||
+
|
||||
+ g.It("Should ignore Services from a different cluster", func() {
|
||||
+ // Create a Service with cluster label "other-cluster"
|
||||
+ svc := createInfraServiceLB("infra-service-conflict", "tenant-service-name", "other-cluster",
|
||||
+ v1.ServicePort{Name: "web", Port: 80, NodePort: 31900, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{IntVal: 30390}},
|
||||
+ v1.ServiceExternalTrafficPolicyLocal)
|
||||
+ _, err := testVals.infraClient.CoreV1().Services(infraNamespace).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||
+ Expect(err).To(BeNil())
|
||||
+
|
||||
+ // The controller should ignore this Service, so no EndpointSlice should be created.
|
||||
+ Eventually(func() (bool, error) {
|
||||
+ epsList, err := testVals.infraClient.DiscoveryV1().EndpointSlices(infraNamespace).List(context.TODO(), metav1.ListOptions{})
|
||||
+ if err != nil {
|
||||
+ return false, err
|
||||
+ }
|
||||
+ // Expect zero slices since cluster label does not match "test-cluster"
|
||||
+ return len(epsList.Items) == 0, nil
|
||||
+ }).Should(BeTrue(), "Services with a different cluster label should be ignored")
|
||||
+ })
|
||||
+
|
||||
})
|
||||
})
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.15.2@sha256:cb4ab74099662f73e058f7c7495fb403488622c3425c06ad23b687bfa8bc805b
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.17.1@sha256:1a6605d3bff6342e12bcc257e852a4f89e97e8af6d3d259930ec07c7ad5f001d
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:bc08ea0ced2cb7dd98b26d72a9462fc0a3863adb908a5effbfcdf7227656ea65
|
||||
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.30.1@sha256:07392e7a87a3d4ef1c86c1b146e6c5de5c2b524aed5a53bf48870dc8a296f99a
|
||||
|
||||
@@ -85,7 +85,7 @@ kamajiControlPlane:
|
||||
# memory: 512Mi
|
||||
|
||||
## @param kamajiControlPlane.apiServer.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
||||
resourcesPreset: "micro"
|
||||
resourcesPreset: "small"
|
||||
|
||||
controllerManager:
|
||||
## @param kamajiControlPlane.controllerManager.resources Resources
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.5.3@sha256:8ca1fb01e880d351ee7d984a0b437c1142836963cd079986156ed28750067138
|
||||
ghcr.io/cozystack/cozystack/mariadb-backup:0.6.0@sha256:cfd1c37d8ad24e10681d82d6e6ce8a641b4602c1b0ffa8516ae15b4958bb12d4
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.9.0@sha256:2b6ba87f5688a439bd2ac12835a5ab9e601feb15c0c44ed0d9ca48cec7c52521
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
@@ -1,157 +1,159 @@
|
||||
bucket 0.1.0 HEAD
|
||||
clickhouse 0.1.0 ca79f72
|
||||
clickhouse 0.2.0 7cd7de73
|
||||
clickhouse 0.2.1 5ca8823
|
||||
clickhouse 0.3.0 b00621e
|
||||
clickhouse 0.4.0 320fc32
|
||||
clickhouse 0.5.0 2a4768a5
|
||||
clickhouse 0.6.0 18bbdb67
|
||||
clickhouse 0.6.1 b7375f73
|
||||
clickhouse 0.6.2 425ce77f
|
||||
clickhouse 0.1.0 f7eaab0a
|
||||
clickhouse 0.2.0 53f2365e
|
||||
clickhouse 0.2.1 dfbc210b
|
||||
clickhouse 0.3.0 6c5cf5bf
|
||||
clickhouse 0.4.0 b40e1b09
|
||||
clickhouse 0.5.0 0f312d5c
|
||||
clickhouse 0.6.0 1ec10165
|
||||
clickhouse 0.6.1 c62a83a7
|
||||
clickhouse 0.6.2 8267072d
|
||||
clickhouse 0.7.0 HEAD
|
||||
ferretdb 0.1.0 4ffa8615
|
||||
ferretdb 0.1.1 5ca8823
|
||||
ferretdb 0.2.0 adaf603
|
||||
ferretdb 0.3.0 aa2f553
|
||||
ferretdb 0.4.0 def2eb0f
|
||||
ferretdb 0.4.1 a9555210
|
||||
ferretdb 0.4.2 425ce77f
|
||||
ferretdb 0.1.0 e9716091
|
||||
ferretdb 0.1.1 91b0499a
|
||||
ferretdb 0.2.0 6c5cf5bf
|
||||
ferretdb 0.3.0 b8e33d19
|
||||
ferretdb 0.4.0 b40e1b09
|
||||
ferretdb 0.4.1 1ec10165
|
||||
ferretdb 0.4.2 8267072d
|
||||
ferretdb 0.5.0 HEAD
|
||||
http-cache 0.1.0 a956713
|
||||
http-cache 0.2.0 5ca8823
|
||||
http-cache 0.3.0 fab5940
|
||||
http-cache 0.3.1 fab5940b
|
||||
http-cache 0.1.0 263e47be
|
||||
http-cache 0.2.0 53f2365e
|
||||
http-cache 0.3.0 6c5cf5bf
|
||||
http-cache 0.3.1 0f312d5c
|
||||
http-cache 0.4.0 HEAD
|
||||
kafka 0.1.0 760f86d2
|
||||
kafka 0.2.0 a2cc83d
|
||||
kafka 0.2.1 3ac17018
|
||||
kafka 0.2.2 d0758692
|
||||
kafka 0.2.3 5ca8823
|
||||
kafka 0.3.0 c07c4bbd
|
||||
kafka 0.3.1 b7375f73
|
||||
kafka 0.3.2 b75aaf17
|
||||
kafka 0.3.3 425ce77f
|
||||
kafka 0.4.0 0e10f952
|
||||
kafka 0.1.0 f7eaab0a
|
||||
kafka 0.2.0 c0685f43
|
||||
kafka 0.2.1 dfbc210b
|
||||
kafka 0.2.2 e9716091
|
||||
kafka 0.2.3 91b0499a
|
||||
kafka 0.3.0 6c5cf5bf
|
||||
kafka 0.3.1 c62a83a7
|
||||
kafka 0.3.2 93c46161
|
||||
kafka 0.3.3 8267072d
|
||||
kafka 0.4.0 85ec09b8
|
||||
kafka 0.5.0 HEAD
|
||||
kubernetes 0.1.0 f642698
|
||||
kubernetes 0.2.0 7cd7de73
|
||||
kubernetes 0.3.0 7caccec1
|
||||
kubernetes 0.4.0 6cae6ce8
|
||||
kubernetes 0.5.0 6bd2d455
|
||||
kubernetes 0.6.0 4cbc8a2c
|
||||
kubernetes 0.7.0 ceefae03
|
||||
kubernetes 0.1.0 263e47be
|
||||
kubernetes 0.2.0 53f2365e
|
||||
kubernetes 0.3.0 007d414f
|
||||
kubernetes 0.4.0 d7cfa53c
|
||||
kubernetes 0.5.0 dfbc210b
|
||||
kubernetes 0.6.0 5bbc488e
|
||||
kubernetes 0.7.0 e9716091
|
||||
kubernetes 0.8.0 ac11056e
|
||||
kubernetes 0.8.1 e54608d8
|
||||
kubernetes 0.8.2 5ca8823
|
||||
kubernetes 0.9.0 9b6dd19
|
||||
kubernetes 0.10.0 ac5c38b
|
||||
kubernetes 0.11.0 4eaca42
|
||||
kubernetes 0.11.1 4f430a90
|
||||
kubernetes 0.12.0 74649f8
|
||||
kubernetes 0.12.1 28fca4e
|
||||
kubernetes 0.13.0 ced8e5b9
|
||||
kubernetes 0.8.1 366bcafc
|
||||
kubernetes 0.8.2 f81be075
|
||||
kubernetes 0.9.0 6c5cf5bf
|
||||
kubernetes 0.10.0 b8e33d19
|
||||
kubernetes 0.11.0 4b90bf5a
|
||||
kubernetes 0.11.1 5fb9cfe3
|
||||
kubernetes 0.12.0 bb985806
|
||||
kubernetes 0.12.1 28fca4ef
|
||||
kubernetes 0.13.0 1ec10165
|
||||
kubernetes 0.14.0 bfbde07c
|
||||
kubernetes 0.14.1 fde4bcfa
|
||||
kubernetes 0.15.0 cb7b8158
|
||||
kubernetes 0.15.1 43e593c7
|
||||
kubernetes 0.15.2 43e593c7
|
||||
kubernetes 0.16.0 3d03b227
|
||||
kubernetes 0.17.0 HEAD
|
||||
mysql 0.1.0 f642698
|
||||
mysql 0.2.0 8b975ff0
|
||||
mysql 0.3.0 5ca8823
|
||||
mysql 0.4.0 93018c4
|
||||
mysql 0.5.0 4b84798
|
||||
mysql 0.5.1 fab5940b
|
||||
mysql 0.5.2 d8a92aa3
|
||||
mysql 0.5.3 425ce77f
|
||||
kubernetes 0.14.1 898374b5
|
||||
kubernetes 0.15.0 4e68e65c
|
||||
kubernetes 0.15.1 160e4e2a
|
||||
kubernetes 0.15.2 8267072d
|
||||
kubernetes 0.16.0 077045b0
|
||||
kubernetes 0.17.0 1fbbfcd0
|
||||
kubernetes 0.17.1 HEAD
|
||||
mysql 0.1.0 263e47be
|
||||
mysql 0.2.0 c24a103f
|
||||
mysql 0.3.0 53f2365e
|
||||
mysql 0.4.0 6c5cf5bf
|
||||
mysql 0.5.0 b40e1b09
|
||||
mysql 0.5.1 0f312d5c
|
||||
mysql 0.5.2 1ec10165
|
||||
mysql 0.5.3 8267072d
|
||||
mysql 0.6.0 HEAD
|
||||
nats 0.1.0 5ca8823
|
||||
nats 0.2.0 c07c4bbd
|
||||
nats 0.1.0 e9716091
|
||||
nats 0.2.0 6c5cf5bf
|
||||
nats 0.3.0 78366f19
|
||||
nats 0.3.1 b7375f73
|
||||
nats 0.4.0 da1e705a
|
||||
nats 0.4.1 425ce77f
|
||||
nats 0.3.1 c62a83a7
|
||||
nats 0.4.0 898374b5
|
||||
nats 0.4.1 8267072d
|
||||
nats 0.5.0 HEAD
|
||||
postgres 0.1.0 f642698
|
||||
postgres 0.2.0 7cd7de73
|
||||
postgres 0.2.1 4a97e297
|
||||
postgres 0.3.0 995dea6f
|
||||
postgres 0.4.0 ec283c33
|
||||
postgres 0.4.1 5ca8823
|
||||
postgres 0.5.0 c07c4bbd
|
||||
postgres 0.6.0 2a4768a
|
||||
postgres 0.6.2 54fd61c
|
||||
postgres 0.7.0 dc9d8bb
|
||||
postgres 0.7.1 175a65f
|
||||
postgres 0.8.0 cb7b8158
|
||||
postgres 0.9.0 160e4e2a
|
||||
postgres 0.1.0 263e47be
|
||||
postgres 0.2.0 53f2365e
|
||||
postgres 0.2.1 d7cfa53c
|
||||
postgres 0.3.0 dfbc210b
|
||||
postgres 0.4.0 e9716091
|
||||
postgres 0.4.1 91b0499a
|
||||
postgres 0.5.0 6c5cf5bf
|
||||
postgres 0.6.0 b40e1b09
|
||||
postgres 0.6.2 0f312d5c
|
||||
postgres 0.7.0 4b90bf5a
|
||||
postgres 0.7.1 1ec10165
|
||||
postgres 0.8.0 4e68e65c
|
||||
postgres 0.9.0 8267072d
|
||||
postgres 0.10.0 HEAD
|
||||
rabbitmq 0.1.0 f642698
|
||||
rabbitmq 0.2.0 5ca8823
|
||||
rabbitmq 0.3.0 9e33dc0
|
||||
rabbitmq 0.4.0 36d8855
|
||||
rabbitmq 0.4.1 35536bb
|
||||
rabbitmq 0.4.2 00b2834e
|
||||
rabbitmq 0.4.3 d8a92aa3
|
||||
rabbitmq 0.4.4 425ce77f
|
||||
rabbitmq 0.1.0 263e47be
|
||||
rabbitmq 0.2.0 53f2365e
|
||||
rabbitmq 0.3.0 6c5cf5bf
|
||||
rabbitmq 0.4.0 b40e1b09
|
||||
rabbitmq 0.4.1 1128d0cb
|
||||
rabbitmq 0.4.2 4b90bf5a
|
||||
rabbitmq 0.4.3 1ec10165
|
||||
rabbitmq 0.4.4 8267072d
|
||||
rabbitmq 0.5.0 HEAD
|
||||
redis 0.1.1 f642698
|
||||
redis 0.2.0 5ca8823
|
||||
redis 0.3.0 c07c4bbd
|
||||
redis 0.3.1 b7375f73
|
||||
redis 0.4.0 abc8f082
|
||||
redis 0.5.0 0e728870
|
||||
redis 0.1.1 263e47be
|
||||
redis 0.2.0 53f2365e
|
||||
redis 0.3.0 6c5cf5bf
|
||||
redis 0.3.1 c62a83a7
|
||||
redis 0.4.0 84f3ccc0
|
||||
redis 0.5.0 4e68e65c
|
||||
redis 0.6.0 HEAD
|
||||
tcp-balancer 0.1.0 f642698
|
||||
tcp-balancer 0.2.0 a9567139
|
||||
tcp-balancer 0.1.0 263e47be
|
||||
tcp-balancer 0.2.0 53f2365e
|
||||
tcp-balancer 0.3.0 HEAD
|
||||
tenant 0.1.3 3d1b86c
|
||||
tenant 0.1.4 d200480
|
||||
tenant 0.1.5 e3ab858
|
||||
tenant 1.0.0 7cd7de7
|
||||
tenant 1.1.0 4da8ac3b
|
||||
tenant 1.2.0 15478a88
|
||||
tenant 1.3.0 ceefae03
|
||||
tenant 1.3.1 c56e5769
|
||||
tenant 1.4.0 94c688f7
|
||||
tenant 1.5.0 48128743
|
||||
tenant 0.1.4 afc997ef
|
||||
tenant 0.1.5 e3ab858a
|
||||
tenant 1.0.0 263e47be
|
||||
tenant 1.1.0 c0685f43
|
||||
tenant 1.2.0 dfbc210b
|
||||
tenant 1.3.0 e9716091
|
||||
tenant 1.3.1 91b0499a
|
||||
tenant 1.4.0 71514249
|
||||
tenant 1.5.0 1ec10165
|
||||
tenant 1.6.0 df448b99
|
||||
tenant 1.6.1 edbbb9be
|
||||
tenant 1.6.2 ccedc5fe
|
||||
tenant 1.6.1 c62a83a7
|
||||
tenant 1.6.2 898374b5
|
||||
tenant 1.6.3 2057bb96
|
||||
tenant 1.6.4 3c9e50a4
|
||||
tenant 1.6.5 f1e11451
|
||||
tenant 1.6.6 d4634797
|
||||
tenant 1.6.7 06afcf27
|
||||
tenant 1.6.8 4cc48e6f
|
||||
tenant 1.7.0 6c73e3f3
|
||||
tenant 1.8.0 e2369ba
|
||||
tenant 1.9.0 43e593c7
|
||||
tenant 1.6.4 84f3ccc0
|
||||
tenant 1.6.5 fde4bcfa
|
||||
tenant 1.6.6 4e68e65c
|
||||
tenant 1.6.7 0ab39f20
|
||||
tenant 1.6.8 bc95159a
|
||||
tenant 1.7.0 24fa7222
|
||||
tenant 1.8.0 160e4e2a
|
||||
tenant 1.9.0 728743db
|
||||
tenant 1.9.1 HEAD
|
||||
virtual-machine 0.1.4 f2015d6
|
||||
virtual-machine 0.1.5 7cd7de7
|
||||
virtual-machine 0.2.0 5ca8823
|
||||
virtual-machine 0.3.0 b908400
|
||||
virtual-machine 0.4.0 4746d51
|
||||
virtual-machine 0.5.0 cad9cde
|
||||
virtual-machine 0.6.0 0e728870
|
||||
virtual-machine 0.6.1 af58018a
|
||||
virtual-machine 0.7.0 af58018a
|
||||
virtual-machine 0.7.1 05857b95
|
||||
virtual-machine 0.8.0 3fa4dd3
|
||||
virtual-machine 0.8.1 3fa4dd3a
|
||||
virtual-machine 0.8.2 HEAD
|
||||
vm-disk 0.1.0 HEAD
|
||||
vm-instance 0.1.0 ced8e5b9
|
||||
vm-instance 0.2.0 4f767ee3
|
||||
vm-instance 0.3.0 0e728870
|
||||
vm-instance 0.4.0 af58018a
|
||||
vm-instance 0.4.1 05857b95
|
||||
vm-instance 0.5.0 3fa4dd3
|
||||
vm-instance 0.5.1 HEAD
|
||||
vpn 0.1.0 f642698
|
||||
vpn 0.2.0 7151424
|
||||
vpn 0.3.0 a2bcf100
|
||||
vpn 0.3.1 f7220f19
|
||||
virtual-machine 0.1.4 f2015d65
|
||||
virtual-machine 0.1.5 263e47be
|
||||
virtual-machine 0.2.0 c0685f43
|
||||
virtual-machine 0.3.0 6c5cf5bf
|
||||
virtual-machine 0.4.0 b8e33d19
|
||||
virtual-machine 0.5.0 1ec10165
|
||||
virtual-machine 0.6.0 4e68e65c
|
||||
virtual-machine 0.7.0 e23286a3
|
||||
virtual-machine 0.7.1 0ab39f20
|
||||
virtual-machine 0.8.0 3fa4dd3a
|
||||
virtual-machine 0.8.1 93c46161
|
||||
virtual-machine 0.8.2 de19450f
|
||||
virtual-machine 0.9.0 HEAD
|
||||
vm-disk 0.1.0 d971f2ff
|
||||
vm-disk 0.1.1 HEAD
|
||||
vm-instance 0.1.0 1ec10165
|
||||
vm-instance 0.2.0 84f3ccc0
|
||||
vm-instance 0.3.0 4e68e65c
|
||||
vm-instance 0.4.0 e23286a3
|
||||
vm-instance 0.4.1 0ab39f20
|
||||
vm-instance 0.5.0 3fa4dd3a
|
||||
vm-instance 0.5.1 de19450f
|
||||
vm-instance 0.6.0 HEAD
|
||||
vpn 0.1.0 263e47be
|
||||
vpn 0.2.0 53f2365e
|
||||
vpn 0.3.0 6c5cf5bf
|
||||
vpn 0.3.1 1ec10165
|
||||
vpn 0.4.0 HEAD
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.8.2
|
||||
version: 0.9.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.8.2"
|
||||
appVersion: 0.9.0
|
||||
|
||||
@@ -2,6 +2,7 @@ include ../../../scripts/package.mk
|
||||
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.gpus.items.type = "object" | .properties.gpus.default = []' values.schema.json
|
||||
INSTANCE_TYPES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/instancetypes.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
&& yq -i -o json ".properties.instanceType.optional=true | .properties.instanceType.enum = $${INSTANCE_TYPES}" values.schema.json
|
||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
|
||||
@@ -36,22 +36,23 @@ virtctl ssh <user>@<vm>
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
||||
| `systemDisk.image` | The base image for the virtual machine. Allowed values: `ubuntu`, `cirros`, `alpine`, `fedora` and `talos` | `ubuntu` |
|
||||
| `systemDisk.storage` | The size of the disk allocated for the virtual machine | `5Gi` |
|
||||
| `systemDisk.storageClass` | StorageClass used to store the data | `replicated` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
||||
` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ------------ |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine preferences profile | `ubuntu` |
|
||||
| `systemDisk.image` | The base image for the virtual machine. Allowed values: `ubuntu`, `cirros`, `alpine`, `fedora` and `talos` | `ubuntu` |
|
||||
| `systemDisk.storage` | The size of the disk allocated for the virtual machine | `5Gi` |
|
||||
| `systemDisk.storageClass` | StorageClass used to store the data | `replicated` |
|
||||
| `gpus` | List of GPUs to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `""` |
|
||||
| `cloudInitSeed` | A seed string to generate an SMBIOS UUID for the VM. | `""` |
|
||||
|
||||
## U Series
|
||||
|
||||
|
||||
@@ -49,3 +49,23 @@ Selector labels
|
||||
app.kubernetes.io/name: {{ include "virtual-machine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Generate a stable UUID for cloud-init re-initialization upon upgrade.
|
||||
*/}}
|
||||
{{- define "virtual-machine.stableUuid" -}}
|
||||
{{- $source := printf "%s-%s-%s" .Release.Namespace (include "virtual-machine.fullname" .) .Values.cloudInitSeed }}
|
||||
{{- $hash := sha256sum $source }}
|
||||
{{- $uuid := printf "%s-%s-4%s-9%s-%s" (substr 0 8 $hash) (substr 8 12 $hash) (substr 13 16 $hash) (substr 17 20 $hash) (substr 20 32 $hash) }}
|
||||
{{- if eq .Values.cloudInitSeed "" }}
|
||||
{{- /* Try to save previous uuid to not trigger full cloud-init again if user decided to remove the seed. */}}
|
||||
{{- $vmResource := lookup "kubevirt.io/v1" "VirtualMachine" .Release.Namespace (include "virtual-machine.fullname" .) -}}
|
||||
{{- if $vmResource }}
|
||||
{{- $existingUuid := $vmResource | dig "spec" "template" "spec" "domain" "firmware" "uuid" "" }}
|
||||
{{- if $existingUuid }}
|
||||
{{- $uuid = $existingUuid }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $uuid }}
|
||||
{{- end }}
|
||||
|
||||
@@ -68,7 +68,15 @@ spec:
|
||||
requests:
|
||||
memory: {{ .Values.resources.memory | quote }}
|
||||
{{- end }}
|
||||
firmware:
|
||||
uuid: {{ include "virtual-machine.stableUuid" . }}
|
||||
devices:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
- disk:
|
||||
bus: scsi
|
||||
@@ -90,6 +98,7 @@ spec:
|
||||
secret:
|
||||
secretName: {{ include "virtual-machine.fullname" $ }}-ssh-keys
|
||||
propagationMethod:
|
||||
# keys will be injected into metadata part of cloud-init disk
|
||||
noCloud: {}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 30
|
||||
@@ -100,8 +109,14 @@ spec:
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
{{- if .Values.cloudInit }}
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else }}
|
||||
userData: |
|
||||
#cloud-config
|
||||
final_message: Cloud-init user-data was left blank intentionally.
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
networks:
|
||||
- name: default
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
},
|
||||
"instanceProfile": {
|
||||
"type": "string",
|
||||
"description": "Virtual Machine prefferences profile",
|
||||
"description": "Virtual Machine preferences profile",
|
||||
"default": "ubuntu",
|
||||
"optional": true,
|
||||
"enum": [
|
||||
@@ -164,6 +164,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"gpus": {
|
||||
"type": "array",
|
||||
"description": "List of GPUs to attach",
|
||||
"default": [],
|
||||
"items": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -190,7 +198,12 @@
|
||||
"cloudInit": {
|
||||
"type": "string",
|
||||
"description": "cloud-init user data config. See cloud-init documentation for more details.",
|
||||
"default": "#cloud-config\n"
|
||||
"default": ""
|
||||
},
|
||||
"cloudInitSeed": {
|
||||
"type": "string",
|
||||
"description": "A seed string to generate an SMBIOS UUID for the VM.",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ externalPorts:
|
||||
running: true
|
||||
|
||||
## @param instanceType Virtual Machine instance type
|
||||
## @param instanceProfile Virtual Machine prefferences profile
|
||||
## @param instanceProfile Virtual Machine preferences profile
|
||||
##
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
@@ -26,6 +26,12 @@ systemDisk:
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
|
||||
## @param gpus [array] List of GPUs to attach
|
||||
## Example:
|
||||
## gpus:
|
||||
## - name: nvidia.com/GA102GL_A10
|
||||
gpus: []
|
||||
|
||||
## @param resources.cpu The number of CPU cores allocated to the virtual machine
|
||||
## @param resources.memory The amount of memory allocated to the virtual machine
|
||||
resources:
|
||||
@@ -49,5 +55,13 @@ sshKeys: []
|
||||
## password: ubuntu
|
||||
## chpasswd: { expire: False }
|
||||
##
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
cloudInit: ""
|
||||
|
||||
## @param cloudInitSeed A seed string to generate an SMBIOS UUID for the VM.
|
||||
cloudInitSeed: ""
|
||||
## Change it to any new value to force a full cloud-init reconfiguration. Change it when you want to apply
|
||||
## to an existing VM settings that are usually written only once, like new SSH keys or new network configuration.
|
||||
## An empty value does nothing (and the existing UUID is not reverted). Please note that changing this value
|
||||
## does not trigger a VM restart. You must perform the restart separately.
|
||||
## Example:
|
||||
## cloudInitSeed: "upd1"
|
||||
|
||||
@@ -16,10 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
version: 0.1.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: 0.1.0
|
||||
appVersion: 0.1.1
|
||||
|
||||
@@ -3,7 +3,9 @@ apiVersion: cdi.kubevirt.io/v1beta1
|
||||
kind: DataVolume
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if hasKey .Values.source "upload" }}
|
||||
cdi.kubevirt.io/storage.bind.immediate.requested: ""
|
||||
{{- end }}
|
||||
vm-disk.cozystack.io/optical: "{{ .Values.optical }}"
|
||||
name: {{ .Release.Name }}
|
||||
spec:
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.1
|
||||
version: 0.6.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.5.1"
|
||||
appVersion: 0.6.0
|
||||
|
||||
@@ -3,6 +3,7 @@ include ../../../scripts/package.mk
|
||||
generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
yq -o json -i '.properties.disks.items.type = "object" | .properties.disks.default = []' values.schema.json
|
||||
yq -o json -i '.properties.gpus.items.type = "object" | .properties.gpus.default = []' values.schema.json
|
||||
INSTANCE_TYPES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/instancetypes.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
&& yq -i -o json ".properties.instanceType.optional=true | .properties.instanceType.enum = $${INSTANCE_TYPES}" values.schema.json
|
||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
|
||||
@@ -36,20 +36,21 @@ virtctl ssh <user>@<vm>
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
||||
| `disks` | List of disks to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
||||
` |
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ----------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine preferences profile | `ubuntu` |
|
||||
| `disks` | List of disks to attach | `[]` |
|
||||
| `gpus` | List of GPUs to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `""` |
|
||||
| `cloudInitSeed` | A seed string to generate an SMBIOS UUID for the VM. | `""` |
|
||||
|
||||
## U Series
|
||||
|
||||
|
||||
@@ -49,3 +49,23 @@ Selector labels
|
||||
app.kubernetes.io/name: {{ include "virtual-machine.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Generate a stable UUID for cloud-init re-initialization upon upgrade.
|
||||
*/}}
|
||||
{{- define "virtual-machine.stableUuid" -}}
|
||||
{{- $source := printf "%s-%s-%s" .Release.Namespace (include "virtual-machine.fullname" .) .Values.cloudInitSeed }}
|
||||
{{- $hash := sha256sum $source }}
|
||||
{{- $uuid := printf "%s-%s-4%s-9%s-%s" (substr 0 8 $hash) (substr 8 12 $hash) (substr 13 16 $hash) (substr 17 20 $hash) (substr 20 32 $hash) }}
|
||||
{{- if eq .Values.cloudInitSeed "" }}
|
||||
{{- /* Try to save previous uuid to not trigger full cloud-init again if user decided to remove the seed. */}}
|
||||
{{- $vmResource := lookup "kubevirt.io/v1" "VirtualMachine" .Release.Namespace (include "virtual-machine.fullname" .) -}}
|
||||
{{- if $vmResource }}
|
||||
{{- $existingUuid := $vmResource | dig "spec" "template" "spec" "domain" "firmware" "uuid" "" }}
|
||||
{{- if $existingUuid }}
|
||||
{{- $uuid = $existingUuid }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- $uuid }}
|
||||
{{- end }}
|
||||
|
||||
@@ -22,5 +22,5 @@ spec:
|
||||
kind: virtual-machine
|
||||
type: virtual-machine
|
||||
selector:
|
||||
vm.kubevirt.io/name: {{ $.Release.Name }}
|
||||
{{- include "virtual-machine.selectorLabels" . | nindent 4 }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{{- if and .Values.instanceType (not (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterInstancetype" "" .Values.instanceType)) }}
|
||||
{{- fail (printf "Specified instancetype not exists in cluster: %s" .Values.instanceType) }}
|
||||
{{- fail (printf "Specified instanceType does not exist in the cluster: %s" .Values.instanceType) }}
|
||||
{{- end }}
|
||||
{{- if and .Values.instanceProfile (not (lookup "instancetype.kubevirt.io/v1beta1" "VirtualMachineClusterPreference" "" .Values.instanceProfile)) }}
|
||||
{{- fail (printf "Specified profile not exists in cluster: %s" .Values.instanceProfile) }}
|
||||
{{- fail (printf "Specified instanceProfile does not exist in the cluster: %s" .Values.instanceProfile) }}
|
||||
{{- end }}
|
||||
|
||||
apiVersion: kubevirt.io/v1
|
||||
@@ -40,11 +40,19 @@ spec:
|
||||
requests:
|
||||
memory: {{ .Values.resources.memory | quote }}
|
||||
{{- end }}
|
||||
firmware:
|
||||
uuid: {{ include "virtual-machine.stableUuid" . }}
|
||||
devices:
|
||||
{{- if .Values.gpus }}
|
||||
gpus:
|
||||
{{- range $i, $gpu := .Values.gpus }}
|
||||
- deviceName: {{ $gpu.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
disks:
|
||||
{{- range $i, $disk := .Values.disks }}
|
||||
- name: disk-{{ .name }}
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" .name) }}
|
||||
- name: disk-{{ $disk.name }}
|
||||
{{- $disk := lookup "cdi.kubevirt.io/v1beta1" "DataVolume" $.Release.Namespace (printf "vm-disk-%s" $disk.name) }}
|
||||
{{- if $disk }}
|
||||
{{- if and (hasKey $disk.metadata.annotations "vm-disk.cozystack.io/optical") (eq (index $disk.metadata.annotations "vm-disk.cozystack.io/optical") "true") }}
|
||||
cdrom: {}
|
||||
@@ -75,6 +83,7 @@ spec:
|
||||
secret:
|
||||
secretName: {{ include "virtual-machine.fullname" $ }}-ssh-keys
|
||||
propagationMethod:
|
||||
# keys will be injected into metadata part of cloud-init disk
|
||||
noCloud: {}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 30
|
||||
@@ -87,8 +96,14 @@ spec:
|
||||
{{- if or .Values.sshKeys .Values.cloudInit }}
|
||||
- name: cloudinitdisk
|
||||
cloudInitNoCloud:
|
||||
{{- if .Values.cloudInit }}
|
||||
secretRef:
|
||||
name: {{ include "virtual-machine.fullname" . }}-cloud-init
|
||||
{{- else }}
|
||||
userData: |
|
||||
#cloud-config
|
||||
final_message: Cloud-init user-data was left blank intentionally.
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
networks:
|
||||
- name: default
|
||||
|
||||
@@ -88,7 +88,7 @@
|
||||
},
|
||||
"instanceProfile": {
|
||||
"type": "string",
|
||||
"description": "Virtual Machine prefferences profile",
|
||||
"description": "Virtual Machine preferences profile",
|
||||
"default": "ubuntu",
|
||||
"optional": true,
|
||||
"enum": [
|
||||
@@ -145,6 +145,14 @@
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"gpus": {
|
||||
"type": "array",
|
||||
"description": "List of GPUs to attach",
|
||||
"default": [],
|
||||
"items": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -171,7 +179,12 @@
|
||||
"cloudInit": {
|
||||
"type": "string",
|
||||
"description": "cloud-init user data config. See cloud-init documentation for more details.",
|
||||
"default": "#cloud-config\n"
|
||||
"default": ""
|
||||
},
|
||||
"cloudInitSeed": {
|
||||
"type": "string",
|
||||
"description": "A seed string to generate an SMBIOS UUID for the VM.",
|
||||
"default": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ externalPorts:
|
||||
running: true
|
||||
|
||||
## @param instanceType Virtual Machine instance type
|
||||
## @param instanceProfile Virtual Machine prefferences profile
|
||||
## @param instanceProfile Virtual Machine preferences profile
|
||||
##
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
@@ -24,6 +24,12 @@ instanceProfile: ubuntu
|
||||
## - name: example-data
|
||||
disks: []
|
||||
|
||||
## @param gpus [array] List of GPUs to attach
|
||||
## Example:
|
||||
## gpus:
|
||||
## - name: nvidia.com/GA102GL_A10
|
||||
gpus: []
|
||||
|
||||
## @param resources.cpu The number of CPU cores allocated to the virtual machine
|
||||
## @param resources.memory The amount of memory allocated to the virtual machine
|
||||
resources:
|
||||
@@ -47,5 +53,13 @@ sshKeys: []
|
||||
## password: ubuntu
|
||||
## chpasswd: { expire: False }
|
||||
##
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
cloudInit: ""
|
||||
|
||||
## @param cloudInitSeed A seed string to generate an SMBIOS UUID for the VM.
|
||||
cloudInitSeed: ""
|
||||
## Change it to any new value to force a full cloud-init reconfiguration. Change it when you want to apply
|
||||
## to an existing VM settings that are usually written only once, like new SSH keys or new network configuration.
|
||||
## An empty value does nothing (and the existing UUID is not reverted). Please note that changing this value
|
||||
## does not trigger a VM restart. You must perform the restart separately.
|
||||
## Example:
|
||||
## cloudInitSeed: "upd1"
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
NAMESPACE=cozy-builder
|
||||
NAME := builder
|
||||
|
||||
TALOS_VERSION=$(shell awk '/^version:/ {print $$2}' ../installer/images/talos/profiles/installer.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
|
||||
help: ## Show this help.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
show:
|
||||
helm template -n $(NAMESPACE) $(NAME) .
|
||||
|
||||
apply: ## Create builder sandbox in existing Kubernetes cluster.
|
||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl apply -f -
|
||||
docker buildx ls | grep -q '^buildkit-builder*' || docker buildx create \
|
||||
--bootstrap \
|
||||
--name=buildkit-$(NAME) \
|
||||
--driver=kubernetes \
|
||||
--driver-opt=namespace=$(NAMESPACE),replicas=1 \
|
||||
--platform=linux/amd64 \
|
||||
--platform=linux/arm64 \
|
||||
--use \
|
||||
--config config.toml
|
||||
|
||||
diff:
|
||||
helm template -n $(NAMESPACE) $(NAME) . | kubectl diff -f -
|
||||
|
||||
delete: ## Remove builder sandbox from existing Kubernetes cluster.
|
||||
kubectl delete deploy -n $(NAMESPACE) $(NAME)-talos-imager
|
||||
docker buildx rm buildkit-$(NAME)
|
||||
|
||||
wait-for-builder:
|
||||
kubectl wait deploy --for=condition=Progressing -n $(NAMESPACE) $(NAME)-talos-imager
|
||||
kubectl wait pod --for=condition=Ready -n $(NAMESPACE) -l app=$(NAME)-talos-imager
|
||||
@@ -1,11 +0,0 @@
|
||||
[worker.oci]
|
||||
gc = true
|
||||
gckeepstorage = 50000
|
||||
|
||||
[[worker.oci.gcpolicy]]
|
||||
keepBytes = 10737418240
|
||||
keepDuration = 604800
|
||||
filters = [ "type==source.local", "type==exec.cachemount", "type==source.git.checkout"]
|
||||
[[worker.oci.gcpolicy]]
|
||||
all = true
|
||||
keepBytes = 53687091200
|
||||
@@ -1,43 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ .Release.Namespace }}
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-talos-imager
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Release.Name }}-talos-imager
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}-talos-imager
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
terminationGracePeriodSeconds: 1
|
||||
containers:
|
||||
- name: imager
|
||||
image: "{{ .Values.talos.imager.image }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
volumeMounts:
|
||||
- mountPath: /dev
|
||||
name: dev
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
name: dev
|
||||
@@ -1,3 +0,0 @@
|
||||
talos:
|
||||
imager:
|
||||
image: ghcr.io/siderolabs/imager:v1.9.3
|
||||
@@ -19,12 +19,10 @@ diff:
|
||||
|
||||
update:
|
||||
hack/gen-profiles.sh
|
||||
IMAGE=$$(yq '.input.baseInstaller.imageRef | sub("/installer:", "/imager:")' images/talos/profiles/installer.yaml) \
|
||||
yq -i '.talos.imager.image = strenv(IMAGE)' ../builder/values.yaml
|
||||
|
||||
image: pre-checks image-cozystack image-talos image-matchbox
|
||||
image: pre-checks image-matchbox image-cozystack image-talos
|
||||
|
||||
image-cozystack: run-builder
|
||||
image-cozystack:
|
||||
make -C ../../.. repos
|
||||
docker buildx build -f images/cozystack/Dockerfile ../../.. \
|
||||
--provenance false \
|
||||
@@ -40,11 +38,11 @@ image-cozystack: run-builder
|
||||
yq -i '.cozystack.image = strenv(IMAGE)' values.yaml
|
||||
rm -f images/installer.json
|
||||
|
||||
image-talos: run-builder
|
||||
image-talos:
|
||||
test -f ../../../_out/assets/installer-amd64.tar || make talos-installer
|
||||
skopeo copy docker-archive:../../../_out/assets/installer-amd64.tar docker://$(REGISTRY)/talos:$(call settag,$(TALOS_VERSION))
|
||||
|
||||
image-matchbox: run-builder
|
||||
image-matchbox:
|
||||
test -f ../../../_out/assets/kernel-amd64 || make talos-kernel
|
||||
test -f ../../../_out/assets/initramfs-metal-amd64.xz || make talos-initramfs
|
||||
docker buildx build -f images/matchbox/Dockerfile ../../.. \
|
||||
@@ -61,13 +59,10 @@ image-matchbox: run-builder
|
||||
> ../../extra/bootbox/images/matchbox.tag
|
||||
rm -f images/matchbox.json
|
||||
|
||||
assets: talos-iso talos-nocloud talos-metal
|
||||
assets: talos-iso talos-nocloud talos-metal talos-kernel talos-initramfs
|
||||
|
||||
talos-initramfs talos-kernel talos-installer talos-iso talos-nocloud talos-metal:
|
||||
mkdir -p ../../../_out/assets
|
||||
cat images/talos/profiles/$(subst talos-,,$@).yaml | \
|
||||
kubectl exec -i -n cozy-builder deploy/builder-talos-imager -- imager --tar-to-stdout - | \
|
||||
docker run --rm -i -v /dev:/dev --privileged "ghcr.io/siderolabs/imager:$(TALOS_VERSION)" --tar-to-stdout - | \
|
||||
tar -C ../../../_out/assets -xzf-
|
||||
|
||||
run-builder:
|
||||
make -C ../builder/ apply wait-for-builder
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.5
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
output:
|
||||
kind: initramfs
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.5
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
output:
|
||||
kind: installer
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.5
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
output:
|
||||
kind: iso
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.5
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
output:
|
||||
kind: kernel
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.5
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
@@ -3,24 +3,24 @@
|
||||
arch: amd64
|
||||
platform: nocloud
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.5
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.5
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250311
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250109
|
||||
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20250311
|
||||
- imageRef: ghcr.io/siderolabs/i915-ucode:20241110
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20250211
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250311
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.5
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.5
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.28.0@sha256:71ae2037ca44d49bbcf8be56c127ee92f2486089a8ea1cdd6508af49705956ac
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.30.0@sha256:aba19d8524cd9d55db8dd5600be92cf53cd218507df46b4294905336603fc7cc
|
||||
|
||||
@@ -31,6 +31,13 @@ releases:
|
||||
autoDirectNodeRoutes: true
|
||||
routingMode: native
|
||||
|
||||
- name: cilium-networkpolicy
|
||||
releaseName: cilium-networkpolicy
|
||||
chart: cozy-cilium-networkpolicy
|
||||
namespace: cozy-cilium
|
||||
privileged: true
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: cozy-proxy
|
||||
releaseName: cozystack
|
||||
chart: cozy-cozy-proxy
|
||||
@@ -127,14 +134,14 @@ releases:
|
||||
chart: cozy-kafka-operator
|
||||
namespace: cozy-kafka-operator
|
||||
optional: true
|
||||
dependsOn: [cilium]
|
||||
dependsOn: [cilium,victoria-metrics-operator]
|
||||
|
||||
- name: clickhouse-operator
|
||||
releaseName: clickhouse-operator
|
||||
chart: cozy-clickhouse-operator
|
||||
namespace: cozy-clickhouse-operator
|
||||
optional: true
|
||||
dependsOn: [cilium]
|
||||
dependsOn: [cilium,victoria-metrics-operator]
|
||||
|
||||
- name: rabbitmq-operator
|
||||
releaseName: rabbitmq-operator
|
||||
@@ -154,7 +161,7 @@ releases:
|
||||
releaseName: piraeus-operator
|
||||
chart: cozy-piraeus-operator
|
||||
namespace: cozy-linstor
|
||||
dependsOn: [cilium,cert-manager]
|
||||
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
|
||||
|
||||
- name: snapshot-controller
|
||||
releaseName: snapshot-controller
|
||||
@@ -218,3 +225,8 @@ releases:
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: reloader
|
||||
releaseName: reloader
|
||||
chart: cozy-reloader
|
||||
namespace: cozy-reloader
|
||||
|
||||
@@ -96,14 +96,14 @@ releases:
|
||||
chart: cozy-kafka-operator
|
||||
namespace: cozy-kafka-operator
|
||||
optional: true
|
||||
dependsOn: []
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
|
||||
- name: clickhouse-operator
|
||||
releaseName: clickhouse-operator
|
||||
chart: cozy-clickhouse-operator
|
||||
namespace: cozy-clickhouse-operator
|
||||
optional: true
|
||||
dependsOn: []
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
|
||||
- name: rabbitmq-operator
|
||||
releaseName: rabbitmq-operator
|
||||
|
||||
@@ -34,6 +34,13 @@ releases:
|
||||
- values-talos.yaml
|
||||
- values-kubeovn.yaml
|
||||
|
||||
- name: cilium-networkpolicy
|
||||
releaseName: cilium-networkpolicy
|
||||
chart: cozy-cilium-networkpolicy
|
||||
namespace: cozy-cilium
|
||||
privileged: true
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: kubeovn
|
||||
releaseName: kubeovn
|
||||
chart: cozy-kubeovn
|
||||
@@ -109,7 +116,7 @@ releases:
|
||||
chart: cozy-monitoring-agents
|
||||
namespace: cozy-monitoring
|
||||
privileged: true
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
dependsOn: [victoria-metrics-operator, vertical-pod-autoscaler-crds]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
@@ -146,6 +153,17 @@ releases:
|
||||
namespace: cozy-kubevirt-cdi
|
||||
dependsOn: [cilium,kubeovn,kubevirt-cdi-operator]
|
||||
|
||||
- name: gpu-operator
|
||||
releaseName: gpu-operator
|
||||
chart: cozy-gpu-operator
|
||||
namespace: cozy-gpu-operator
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [cilium,kubeovn]
|
||||
valuesFiles:
|
||||
- values.yaml
|
||||
- values-talos.yaml
|
||||
|
||||
- name: metallb
|
||||
releaseName: metallb
|
||||
chart: cozy-metallb
|
||||
@@ -181,13 +199,13 @@ releases:
|
||||
releaseName: kafka-operator
|
||||
chart: cozy-kafka-operator
|
||||
namespace: cozy-kafka-operator
|
||||
dependsOn: [cilium,kubeovn]
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
|
||||
- name: clickhouse-operator
|
||||
releaseName: clickhouse-operator
|
||||
chart: cozy-clickhouse-operator
|
||||
namespace: cozy-clickhouse-operator
|
||||
dependsOn: [cilium,kubeovn]
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
|
||||
- name: rabbitmq-operator
|
||||
releaseName: rabbitmq-operator
|
||||
@@ -380,3 +398,15 @@ releases:
|
||||
namespace: cozy-vertical-pod-autoscaler
|
||||
privileged: true
|
||||
dependsOn: [monitoring-agents]
|
||||
|
||||
- name: vertical-pod-autoscaler-crds
|
||||
releaseName: vertical-pod-autoscaler-crds
|
||||
chart: cozy-vertical-pod-autoscaler-crds
|
||||
namespace: cozy-vertical-pod-autoscaler
|
||||
privileged: true
|
||||
dependsOn: [cilium, kubeovn]
|
||||
|
||||
- name: reloader
|
||||
releaseName: reloader
|
||||
chart: cozy-reloader
|
||||
namespace: cozy-reloader
|
||||
|
||||
@@ -69,7 +69,7 @@ releases:
|
||||
chart: cozy-monitoring-agents
|
||||
namespace: cozy-monitoring
|
||||
privileged: true
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
dependsOn: [victoria-metrics-operator, vertical-pod-autoscaler-crds]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
@@ -103,13 +103,13 @@ releases:
|
||||
releaseName: kafka-operator
|
||||
chart: cozy-kafka-operator
|
||||
namespace: cozy-kafka-operator
|
||||
dependsOn: []
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
|
||||
- name: clickhouse-operator
|
||||
releaseName: clickhouse-operator
|
||||
chart: cozy-clickhouse-operator
|
||||
namespace: cozy-clickhouse-operator
|
||||
dependsOn: []
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
|
||||
- name: rabbitmq-operator
|
||||
releaseName: rabbitmq-operator
|
||||
@@ -254,3 +254,10 @@ releases:
|
||||
namespace: cozy-vertical-pod-autoscaler
|
||||
privileged: true
|
||||
dependsOn: [monitoring-agents]
|
||||
|
||||
- name: vertical-pod-autoscaler-crds
|
||||
releaseName: vertical-pod-autoscaler-crds
|
||||
chart: cozy-vertical-pod-autoscaler-crds
|
||||
namespace: cozy-vertical-pod-autoscaler
|
||||
privileged: true
|
||||
dependsOn: [cilium, kubeovn]
|
||||
|
||||
@@ -2,6 +2,9 @@ NAMESPACE=cozy-e2e-tests
|
||||
NAME := sandbox
|
||||
CLEAN := 1
|
||||
TESTING_APPS := $(shell find ../../apps -maxdepth 1 -mindepth 1 -type d | awk -F/ '{print $$NF}')
|
||||
SANDBOX_NAME := cozy-e2e-sandbox-$(shell echo "$$(hostname):$$(pwd)" | sha256sum | cut -c -6)
|
||||
|
||||
ROOT_DIR = $(dir $(abspath $(firstword $(MAKEFILE_LIST))/../../..))
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
|
||||
@@ -24,7 +27,6 @@ image-e2e-sandbox:
|
||||
--provenance false \
|
||||
--tag $(REGISTRY)/e2e-sandbox:$(call settag,$(TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/e2e-sandbox:latest \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/e2e-sandbox.json \
|
||||
--push=$(PUSH) \
|
||||
@@ -34,27 +36,20 @@ image-e2e-sandbox:
|
||||
yq -i '.e2e.image = strenv(IMAGE)' values.yaml
|
||||
rm -f images/e2e-sandbox.json
|
||||
|
||||
copy-hack-dir:
|
||||
tar -C ../../../ -cf- hack | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- tar -xf-
|
||||
test: ## Run the end-to-end tests in existing sandbox.
|
||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/e2e.sh'
|
||||
|
||||
copy-image:
|
||||
cat ../../../_out/assets/nocloud-amd64.raw.xz | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -xec 'xz --decompress > /nocloud-amd64.raw'
|
||||
|
||||
test: wait-for-sandbox copy-hack-dir copy-image ## Run the end-to-end tests in existing sandbox.
|
||||
helm template -n cozy-system installer ../installer | kubectl exec -i -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'cat > /cozystack-installer.yaml'
|
||||
kubectl exec -ti -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'export COZYSTACK_INSTALLER_YAML=$$(cat /cozystack-installer.yaml) && /hack/e2e.sh'
|
||||
|
||||
test-applications: wait-for-sandbox copy-hack-dir ## Run the end-to-end tests in existing sandbox for applications.
|
||||
test-applications: ## Run the end-to-end tests in existing sandbox for applications.
|
||||
for app in $(TESTING_APPS); do \
|
||||
kubectl exec -ti -n cozy-e2e-tests deploy/cozystack-e2e-sandbox -- bash -c "/hack/e2e.application.sh $${app}"; \
|
||||
docker exec ${SANDBOX_NAME} bash -c "/hack/e2e.application.sh $${app}"; \
|
||||
done
|
||||
kubectl exec -ti -n cozy-e2e-tests deploy/cozystack-e2e-sandbox -- bash -c "kubectl get hr -A | grep -v 'True'"
|
||||
docker exec ${SANDBOX_NAME} bash -c "kubectl get hr -A | grep -v 'True'"
|
||||
|
||||
delete: ## Remove sandbox from existing Kubernetes cluster.
|
||||
kubectl delete deploy -n $(NAMESPACE) cozystack-e2e-$(NAME)
|
||||
docker rm -f "${SANDBOX_NAME}" || true
|
||||
|
||||
exec: ## Opens an interactive shell in the sandbox container.
|
||||
kubectl exec -ti -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- bash
|
||||
docker exec -ti "${SANDBOX_NAME}" -- bash
|
||||
|
||||
proxy: sync-hosts ## Enable a SOCKS5 proxy server; mirrord and gost must be installed.
|
||||
mirrord exec --target deploy/cozystack-e2e-sandbox --target-namespace cozy-e2e-tests -- gost -L=127.0.0.1:10080
|
||||
@@ -65,6 +60,6 @@ login: ## Downloads the kubeconfig into a temporary directory and runs a shell w
|
||||
sync-hosts:
|
||||
kubectl exec -n $(NAMESPACE) deploy/cozystack-e2e-$(NAME) -- sh -c 'kubectl get ing -A -o go-template='\''{{ "127.0.0.1 localhost\n"}}{{ range .items }}{{ range .status.loadBalancer.ingress }}{{ .ip }}{{ end }} {{ range .spec.rules }}{{ .host }}{{ end }}{{ "\n" }}{{ end }}'\'' > /etc/hosts'
|
||||
|
||||
wait-for-sandbox:
|
||||
kubectl wait deploy --for=condition=Progressing -n $(NAMESPACE) cozystack-e2e-$(NAME)
|
||||
kubectl wait pod --for=condition=Ready -n $(NAMESPACE) -l app=cozystack-e2e-$(NAME)
|
||||
apply: delete
|
||||
docker run -d --rm --name "${SANDBOX_NAME}" --privileged "$$(yq .e2e.image values.yaml)" sleep infinity
|
||||
docker cp "${ROOT_DIR}" "${SANDBOX_NAME}":/workspace
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ARG KUBECTL_VERSION=1.32.0
|
||||
ARG TALOSCTL_VERSION=1.8.4
|
||||
ARG TALOSCTL_VERSION=1.9.5
|
||||
ARG HELM_VERSION=3.16.4
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -y install genisoimage qemu-kvm qemu-utils iproute2 iptables wget xz-utils netcat curl jq
|
||||
RUN apt-get -y install genisoimage qemu-kvm qemu-utils iproute2 iptables wget xz-utils netcat curl jq make git
|
||||
RUN curl -LO "https://github.com/siderolabs/talos/releases/download/v${TALOSCTL_VERSION}/talosctl-linux-amd64" \
|
||||
&& chmod +x talosctl-linux-amd64 \
|
||||
&& mv talosctl-linux-amd64 /usr/local/bin/talosctl
|
||||
@@ -14,3 +14,4 @@ RUN curl -LO "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/amd64/kube
|
||||
&& mv kubectl /usr/local/bin/kubectl
|
||||
RUN curl -sSL https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash -s - --version "v${HELM_VERSION}"
|
||||
RUN wget https://github.com/mikefarah/yq/releases/download/v4.44.3/yq_linux_amd64 -O /usr/local/bin/yq && chmod +x /usr/local/bin/yq
|
||||
RUN curl -s https://fluxcd.io/install.sh | bash
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ .Release.Namespace }}
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cozystack-e2e-{{ .Release.Name }}
|
||||
namespace: cozy-e2e-tests
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cozystack-e2e-{{ .Release.Name }}
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cozystack-e2e-{{ .Release.Name }}
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
terminationGracePeriodSeconds: 1
|
||||
containers:
|
||||
- name: sandbox
|
||||
image: "{{ .Values.e2e.image }}"
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: KUBECONFIG
|
||||
value: /kubeconfig
|
||||
- name: TALOSCONFIG
|
||||
value: /talosconfig
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.28.0@sha256:bb5e8f5d92e2e4305ea1cc7f007b3e98769645ab845f632b4788b9373cd207eb
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.30.0@sha256:c887410f9004805522594680fd05d5454953613fd568c527589952294d9793e9
|
||||
|
||||
@@ -3,4 +3,4 @@ name: bootbox
|
||||
description: PXE hardware provisioning
|
||||
icon: /logos/bootbox.svg
|
||||
type: application
|
||||
version: 0.1.0
|
||||
version: 0.1.1
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.28.0@sha256:b2002815727b71e2657a6f5b8ed558cc38fc21e81a39b9699266e558be03561f
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.30.0@sha256:d67e66faf1da74d60bbfa7502eb4aa0d9ebf961bf641132e4b22a09505ed2445
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
{{- range $mac := $m.mac }}
|
||||
- dhcp:
|
||||
hostname: {{ $m.hostname }}
|
||||
mac: {{ $mac }}
|
||||
mac: {{ lower $mac }}
|
||||
{{- with $m.arch }}
|
||||
arch: {{ . }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.0@sha256:a492931b49af55ad184b485bcd7ea06f1334722d2184702d9f6f2e4123032357
|
||||
ghcr.io/cozystack/cozystack/grafana:1.9.1@sha256:c63978e1ed0304e8518b31ddee56c4e8115541b997d8efbe1c0a74da57140399
|
||||
|
||||
@@ -4,6 +4,8 @@ kind: VLogs
|
||||
metadata:
|
||||
name: {{ .name }}
|
||||
spec:
|
||||
image:
|
||||
tag: v1.17.0-victorialogs
|
||||
storage:
|
||||
resources:
|
||||
requests:
|
||||
|
||||
@@ -1,41 +1,42 @@
|
||||
bootbox 0.1.0 HEAD
|
||||
etcd 1.0.0 f7eaab0
|
||||
etcd 2.0.0 a6d0f7cf
|
||||
etcd 2.0.1 6fc1cc7d
|
||||
etcd 2.1.0 2b00fcf8
|
||||
etcd 2.2.0 5ca8823
|
||||
etcd 2.3.0 b908400d
|
||||
etcd 2.4.0 cb7b8158
|
||||
etcd 2.5.0 861e6c46
|
||||
etcd 2.6.0 a7425b0
|
||||
etcd 2.6.1 063439ac
|
||||
bootbox 0.1.0 45a7416c
|
||||
bootbox 0.1.1 HEAD
|
||||
etcd 1.0.0 ca79f725
|
||||
etcd 2.0.0 c0685f43
|
||||
etcd 2.0.1 007d414f
|
||||
etcd 2.1.0 25221fdc
|
||||
etcd 2.2.0 71514249
|
||||
etcd 2.3.0 fde4bcfa
|
||||
etcd 2.4.0 af48519d
|
||||
etcd 2.5.0 24fa7222
|
||||
etcd 2.6.0 8c460528
|
||||
etcd 2.6.1 45a7416c
|
||||
etcd 2.7.0 HEAD
|
||||
info 1.0.0 HEAD
|
||||
ingress 1.0.0 f642698
|
||||
ingress 1.1.0 838bee5d
|
||||
ingress 1.2.0 ced8e5b
|
||||
ingress 1.3.0 edbbb9be
|
||||
ingress 1.0.0 d7cfa53c
|
||||
ingress 1.1.0 5bbc488e
|
||||
ingress 1.2.0 28fca4ef
|
||||
ingress 1.3.0 fde4bcfa
|
||||
ingress 1.4.0 HEAD
|
||||
monitoring 1.0.0 f642698
|
||||
monitoring 1.1.0 15478a88
|
||||
monitoring 1.2.0 c9e0d63b
|
||||
monitoring 1.2.1 4471b4ba
|
||||
monitoring 1.3.0 6c5cf5b
|
||||
monitoring 1.4.0 adaf603b
|
||||
monitoring 1.5.0 4b90bf5a
|
||||
monitoring 1.5.1 57e90b70
|
||||
monitoring 1.5.2 898374b5
|
||||
monitoring 1.5.3 c1ca19dc
|
||||
monitoring 1.0.0 d7cfa53c
|
||||
monitoring 1.1.0 25221fdc
|
||||
monitoring 1.2.0 f81be075
|
||||
monitoring 1.2.1 71514249
|
||||
monitoring 1.3.0 6c5cf5bf
|
||||
monitoring 1.4.0 0f312d5c
|
||||
monitoring 1.5.0 b8949304
|
||||
monitoring 1.5.1 c62a83a7
|
||||
monitoring 1.5.2 e44bece1
|
||||
monitoring 1.5.3 fde4bcfa
|
||||
monitoring 1.5.4 d4634797
|
||||
monitoring 1.6.0 cb7b8158
|
||||
monitoring 1.6.1 3bb97596
|
||||
monitoring 1.7.0 749110aa
|
||||
monitoring 1.8.0 80b4c151
|
||||
monitoring 1.8.1 06daf341
|
||||
monitoring 1.9.0 8267072d
|
||||
monitoring 1.6.1 4e68e65c
|
||||
monitoring 1.7.0 2a976afe
|
||||
monitoring 1.8.0 8c460528
|
||||
monitoring 1.8.1 8267072d
|
||||
monitoring 1.9.0 45a7416c
|
||||
monitoring 1.9.1 HEAD
|
||||
seaweedfs 0.1.0 5ca8823
|
||||
seaweedfs 0.2.0 9e33dc0
|
||||
seaweedfs 0.2.1 249bf35
|
||||
seaweedfs 0.3.0 0e728870
|
||||
seaweedfs 0.1.0 71514249
|
||||
seaweedfs 0.2.0 5fb9cfe3
|
||||
seaweedfs 0.2.1 fde4bcfa
|
||||
seaweedfs 0.3.0 45a7416c
|
||||
seaweedfs 0.4.0 HEAD
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:218d0c017ae556e5afd074366d9a3124f954c5aefc6474844942420cca8b7640
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:d008018f35fc30ad86de200e2cf3d8ab93b3d8cff303a7ffe388192b87d86ac4
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
appVersion: 0.17.0
|
||||
appVersion: 0.18.1
|
||||
description: Cluster API Operator
|
||||
name: cluster-api-operator
|
||||
type: application
|
||||
version: 0.17.0
|
||||
version: 0.18.1
|
||||
|
||||
@@ -26,8 +26,10 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $addonNamespace }}
|
||||
---
|
||||
@@ -37,8 +39,10 @@ metadata:
|
||||
name: {{ $addonName }}
|
||||
namespace: {{ $addonNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $addonVersion $.Values.secretName }}
|
||||
spec:
|
||||
|
||||
@@ -26,8 +26,11 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $bootstrapNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -36,8 +39,11 @@ metadata:
|
||||
name: {{ $bootstrapName }}
|
||||
namespace: {{ $bootstrapNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $bootstrapVersion $.Values.configSecret.name }}
|
||||
spec:
|
||||
{{- end}}
|
||||
|
||||
@@ -26,8 +26,11 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $controlPlaneNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -36,8 +39,11 @@ metadata:
|
||||
name: {{ $controlPlaneName }}
|
||||
namespace: {{ $controlPlaneNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $controlPlaneVersion $.Values.configSecret.name $.Values.manager }}
|
||||
spec:
|
||||
{{- end}}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if or .Values.addon .Values.bootstrap .Values.controlPlane .Values.infrastructure }}
|
||||
{{- if or .Values.addon .Values.bootstrap .Values.controlPlane .Values.infrastructure .Values.ipam }}
|
||||
# Deploy core components if not specified
|
||||
{{- if not .Values.core }}
|
||||
---
|
||||
@@ -6,8 +6,11 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-system
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -16,8 +19,11 @@ metadata:
|
||||
name: cluster-api
|
||||
namespace: capi-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
configSecret:
|
||||
@@ -28,4 +34,3 @@ spec:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -25,8 +25,11 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $coreNamespace }}
|
||||
---
|
||||
apiVersion: operator.cluster.x-k8s.io/v1alpha2
|
||||
@@ -35,8 +38,10 @@ metadata:
|
||||
name: {{ $coreName }}
|
||||
namespace: {{ $coreNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $coreVersion $.Values.configSecret.name $.Values.manager }}
|
||||
spec:
|
||||
@@ -45,8 +50,8 @@ spec:
|
||||
version: {{ $coreVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
manager:
|
||||
{{- if and $.Values.manager.featureGates $.Values.manager.featureGates.core }}
|
||||
manager:
|
||||
featureGates:
|
||||
{{- range $key, $value := $.Values.manager.featureGates.core }}
|
||||
{{ $key }}: {{ $value }}
|
||||
|
||||
@@ -7,8 +7,10 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-kubeadm-bootstrap-system
|
||||
---
|
||||
@@ -18,8 +20,10 @@ metadata:
|
||||
name: kubeadm
|
||||
namespace: capi-kubeadm-bootstrap-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
@@ -37,8 +41,10 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: capi-kubeadm-control-plane-system
|
||||
---
|
||||
@@ -48,14 +54,16 @@ metadata:
|
||||
name: kubeadm
|
||||
namespace: capi-kubeadm-control-plane-system
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- with .Values.configSecret }}
|
||||
spec:
|
||||
{{- if $.Values.manager }}
|
||||
manager:
|
||||
{{- if and $.Values.manager.featureGates $.Values.manager.featureGates.kubeadm }}
|
||||
manager:
|
||||
featureGates:
|
||||
{{- range $key, $value := $.Values.manager.featureGates.kubeadm }}
|
||||
{{ $key }}: {{ $value }}
|
||||
|
||||
@@ -26,8 +26,10 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $infrastructureNamespace }}
|
||||
---
|
||||
@@ -37,8 +39,10 @@ metadata:
|
||||
name: {{ $infrastructureName }}
|
||||
namespace: {{ $infrastructureNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $infrastructureVersion $.Values.configSecret.name $.Values.manager $.Values.additionalDeployments }}
|
||||
spec:
|
||||
@@ -47,8 +51,8 @@ spec:
|
||||
version: {{ $infrastructureVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
manager:
|
||||
{{- if and (kindIs "map" $.Values.manager.featureGates) (hasKey $.Values.manager.featureGates $infrastructureName) }}
|
||||
manager:
|
||||
{{- range $key, $value := $.Values.manager.featureGates }}
|
||||
{{- if eq $key $infrastructureName }}
|
||||
featureGates:
|
||||
|
||||
@@ -26,8 +26,10 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "1"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "1"
|
||||
name: {{ $ipamNamespace }}
|
||||
---
|
||||
@@ -37,8 +39,10 @@ metadata:
|
||||
name: {{ $ipamName }}
|
||||
namespace: {{ $ipamNamespace }}
|
||||
annotations:
|
||||
{{- if $.Values.enableHelmHook }}
|
||||
"helm.sh/hook": "post-install,post-upgrade"
|
||||
"helm.sh/hook-weight": "2"
|
||||
{{- end }}
|
||||
"argocd.argoproj.io/sync-wave": "2"
|
||||
{{- if or $ipamVersion $.Values.configSecret.name $.Values.manager $.Values.additionalDeployments }}
|
||||
spec:
|
||||
@@ -47,8 +51,8 @@ spec:
|
||||
version: {{ $ipamVersion }}
|
||||
{{- end }}
|
||||
{{- if $.Values.manager }}
|
||||
manager:
|
||||
{{- if and (kindIs "map" $.Values.manager.featureGates) (hasKey $.Values.manager.featureGates $ipamName) }}
|
||||
manager:
|
||||
{{- range $key, $value := $.Values.manager.featureGates }}
|
||||
{{- if eq $key $ipamName }}
|
||||
featureGates:
|
||||
|
||||
@@ -21,7 +21,7 @@ leaderElection:
|
||||
image:
|
||||
manager:
|
||||
repository: registry.k8s.io/capi-operator/cluster-api-operator
|
||||
tag: v0.17.0
|
||||
tag: v0.18.1
|
||||
pullPolicy: IfNotPresent
|
||||
env:
|
||||
manager: []
|
||||
@@ -69,3 +69,4 @@ volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
enableHelmHook: true
|
||||
|
||||
3
packages/system/cilium-networkpolicy/Chart.yaml
Normal file
3
packages/system/cilium-networkpolicy/Chart.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
apiVersion: v2
|
||||
name: cozy-cilium-networkpolicy
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
5
packages/system/cilium-networkpolicy/Makefile
Normal file
5
packages/system/cilium-networkpolicy/Makefile
Normal file
@@ -0,0 +1,5 @@
|
||||
export NAME=cilium-networkpolicy
|
||||
export NAMESPACE=cozy-$(NAME)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
include ../../../scripts/package.mk
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
apiVersion: cilium.io/v2
|
||||
kind: CiliumClusterwideNetworkPolicy
|
||||
metadata:
|
||||
name: restrict-system-components
|
||||
spec:
|
||||
ingressDeny:
|
||||
- fromEntities:
|
||||
- world
|
||||
toPorts:
|
||||
- ports:
|
||||
- port: "2379" # etcd
|
||||
- port: "2380" # etcd
|
||||
- port: "3367" # linstor
|
||||
- port: "7473" # frr-metrics (metallb)
|
||||
- port: "8123" # cozy assets server
|
||||
- port: "9443" # kube-rbac-proxy
|
||||
- port: "10250" # kubelet
|
||||
- port: "10257" # kube-controller-manager
|
||||
- port: "10259" # kube-scheduler
|
||||
ingress:
|
||||
- fromEntities:
|
||||
- world
|
||||
- host
|
||||
- cluster
|
||||
nodeSelector:
|
||||
matchLabels: {}
|
||||
@@ -79,7 +79,7 @@ annotations:
|
||||
Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that can
|
||||
be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n"
|
||||
apiVersion: v2
|
||||
appVersion: 1.17.1
|
||||
appVersion: 1.17.2
|
||||
description: eBPF-based Networking, Security, and Observability
|
||||
home: https://cilium.io/
|
||||
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
|
||||
@@ -95,4 +95,4 @@ kubeVersion: '>= 1.21.0-0'
|
||||
name: cilium
|
||||
sources:
|
||||
- https://github.com/cilium/cilium
|
||||
version: 1.17.1
|
||||
version: 1.17.2
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# cilium
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
Cilium is open source software for providing and transparently securing
|
||||
network connectivity and loadbalancing between application workloads such as
|
||||
@@ -85,7 +85,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
|
||||
| authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:a5d0ce49aa801d475da48f8cb163c354ab95cab073cd3c138bd458fc8257fbf1","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:498a000f370d8c37927118ed80afe8adc38d1edcbfc071627d17b25c88efcab0","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
|
||||
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
|
||||
| authentication.mutual.spire.install.server.annotations | object | `{}` | SPIRE server annotations |
|
||||
@@ -131,6 +131,8 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| bpf.ctTcpMax | int | `524288` | Configure the maximum number of entries in the TCP connection tracking table. |
|
||||
| bpf.datapathMode | string | `veth` | Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) |
|
||||
| bpf.disableExternalIPMitigation | bool | `false` | Disable ExternalIP mitigation (CVE-2020-8554) |
|
||||
| bpf.distributedLRU | object | `{"enabled":false}` | Control to use a distributed per-CPU backend memory for the core BPF LRU maps which Cilium uses. This improves performance significantly, but it is also recommended to increase BPF map sizing along with that. |
|
||||
| bpf.distributedLRU.enabled | bool | `false` | Enable distributed LRU backend memory. For compatibility with existing installations it is off by default. |
|
||||
| bpf.enableTCX | bool | `true` | Attach endpoint programs using tcx instead of legacy tc hooks on supported kernels. |
|
||||
| bpf.events | object | `{"default":{"burstLimit":null,"rateLimit":null},"drop":{"enabled":true},"policyVerdict":{"enabled":true},"trace":{"enabled":true}}` | Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. Helm configuration for BPF events map rate limiting is experimental and might change in upcoming releases. |
|
||||
| bpf.events.default | object | `{"burstLimit":null,"rateLimit":null}` | Default settings for all types of events except dbg and pcap. |
|
||||
@@ -195,7 +197,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
|
||||
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
|
||||
| clustermesh.apiserver.healthPort | int | `9880` | TCP port for the clustermesh-apiserver health API. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:1de22f46bfdd638de72c2224d5223ddc3bbeacda1803cb75799beca3d4bf7a4c","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.1","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:981250ebdc6e66e190992eaf75cfca169113a8f08d5c3793fe15822176980398","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.2","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.kvstoremesh.enabled | bool | `true` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
|
||||
@@ -375,7 +377,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.healthPort | int | `9878` | TCP port for the health API. |
|
||||
| envoy.httpRetryCount | int | `3` | Maximum number of retries for each HTTP request |
|
||||
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
|
||||
| envoy.image | object | `{"digest":"sha256:fc708bd36973d306412b2e50c924cd8333de67e0167802c9b48506f9d772f521","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.31.5-1739264036-958bef243c6c66fcfd73ca319f2eb49fff1eb2ae","useDigest":true}` | Envoy container image. |
|
||||
| envoy.image | object | `{"digest":"sha256:377c78c13d2731f3720f931721ee309159e782d882251709cb0fac3b42c03f4b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.31.5-1741765102-efed3defcc70ab5b263a0fc44c93d316b846a211","useDigest":true}` | Envoy container image. |
|
||||
| envoy.initialFetchTimeoutSeconds | int | `30` | Time in seconds after which the initial fetch on an xDS stream is considered timed out |
|
||||
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
|
||||
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
|
||||
@@ -392,6 +394,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.podLabels | object | `{}` | Labels to be added to envoy pods |
|
||||
| envoy.podSecurityContext | object | `{"appArmorProfile":{"type":"Unconfined"}}` | Security Context for cilium-envoy pods. |
|
||||
| envoy.podSecurityContext.appArmorProfile | object | `{"type":"Unconfined"}` | AppArmorProfile options for the `cilium-agent` and init containers |
|
||||
| envoy.policyRestoreTimeoutDuration | string | `nil` | Max duration to wait for endpoint policies to be restored on restart. Default "3m". |
|
||||
| envoy.priorityClassName | string | `nil` | The priority class to use for cilium-envoy. |
|
||||
| envoy.prometheus | object | `{"enabled":true,"port":"9964","serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Configure Cilium Envoy Prometheus options. Note that some of these apply to either cilium-agent or cilium-envoy. |
|
||||
| envoy.prometheus.enabled | bool | `true` | Enable prometheus metrics for cilium-envoy |
|
||||
@@ -515,7 +518,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
|
||||
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
|
||||
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:397e8fbb188157f744390a7b272a1dec31234e605bcbe22d8919a166d202a3dc","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.1","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:42a8db5c256c516cacb5b8937c321b2373ad7a6b0a1e5a5120d5028433d586cc","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.2","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
|
||||
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
|
||||
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
@@ -582,7 +585,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. |
|
||||
| hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. |
|
||||
| hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. |
|
||||
| hubble.ui.backend.image | object | `{"digest":"sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.1","useDigest":true}` | Hubble-ui backend image. |
|
||||
| hubble.ui.backend.image | object | `{"digest":"sha256:a034b7e98e6ea796ed26df8f4e71f83fc16465a19d166eff67a03b822c0bfa15","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.2","useDigest":true}` | Hubble-ui backend image. |
|
||||
| hubble.ui.backend.livenessProbe.enabled | bool | `false` | Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) |
|
||||
| hubble.ui.backend.readinessProbe.enabled | bool | `false` | Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) |
|
||||
| hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. |
|
||||
@@ -592,7 +595,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. |
|
||||
| hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. |
|
||||
| hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. |
|
||||
| hubble.ui.frontend.image | object | `{"digest":"sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.1","useDigest":true}` | Hubble-ui frontend image. |
|
||||
| hubble.ui.frontend.image | object | `{"digest":"sha256:9e37c1296b802830834cc87342a9182ccbb71ffebb711971e849221bd9d59392","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.2","useDigest":true}` | Hubble-ui frontend image. |
|
||||
| hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. |
|
||||
| hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. |
|
||||
| hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 |
|
||||
@@ -622,7 +625,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
|
||||
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends). |
|
||||
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
|
||||
| image | object | `{"digest":"sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.1","useDigest":true}` | Agent container image. |
|
||||
| image | object | `{"digest":"sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.2","useDigest":true}` | Agent container image. |
|
||||
| imagePullSecrets | list | `[]` | Configure image pull secrets for pulling container images |
|
||||
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
|
||||
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
|
||||
@@ -759,7 +762,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| operator.hostNetwork | bool | `true` | HostNetwork setting |
|
||||
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
|
||||
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:034b479fba340f9d98510e509c7ce1c36e8889a109d5f1c2240fcb0942bc772c","awsDigest":"sha256:da74748057c836471bfdc0e65bb29ba0edb82916ec4b99f6a4f002b2fcc849d6","azureDigest":"sha256:b9e3e3994f5fcf1832e1f344f3b3b544832851b1990f124b2c2c68e3ffe04a9b","genericDigest":"sha256:628becaeb3e4742a1c36c4897721092375891b58bae2bfcae48bbf4420aaee97","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.1","useDigest":true}` | cilium-operator image. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:7cb8c23417f65348bb810fe92fb05b41d926f019d77442f3fa1058d17fea7ffe","awsDigest":"sha256:955096183e22a203bbb198ca66e3266ce4dbc2b63f1a2fbd03f9373dcd97893c","azureDigest":"sha256:455fb88b558b1b8ba09d63302ccce76b4930581be89def027184ab04335c20e0","genericDigest":"sha256:81f2d7198366e8dec2903a3a8361e4c68d47d19c68a0d42f0b7b6e3f0523f249","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.2","useDigest":true}` | cilium-operator image. |
|
||||
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
|
||||
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
|
||||
@@ -809,7 +812,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
|
||||
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
|
||||
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
|
||||
| preflight.image | object | `{"digest":"sha256:8969bfd9c87cbea91e40665f8ebe327268c99d844ca26d7d12165de07f702866","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.1","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.image | object | `{"digest":"sha256:3c4c9932b5d8368619cb922a497ff2ebc8def5f41c18e410bcc84025fcd385b1","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.2","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
|
||||
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
|
||||
@@ -883,7 +886,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| tls.caBundle.useSecret | bool | `false` | Use a Secret instead of a ConfigMap. |
|
||||
| tls.readSecretsOnlyFromSecretsNamespace | string | `nil` | Configure if the Cilium Agent will only look in `tls.secretsNamespace` for CiliumNetworkPolicy relevant Secrets. If false, the Cilium Agent will be granted READ (GET/LIST/WATCH) access to _all_ secrets in the entire cluster. This is not recommended and is included for backwards compatibility. This value obsoletes `tls.secretsBackend`, with `true` == `local` in the old setting, and `false` == `k8s`. |
|
||||
| tls.secretSync | object | `{"enabled":null}` | Configures settings for synchronization of TLS Interception Secrets |
|
||||
| tls.secretSync.enabled | string | `nil` | Enable synchronization of Secrets for TLS Interception. If disabled and tls.secretsBackend is set to 'k8s', then secrets will be read directly by the agent. |
|
||||
| tls.secretSync.enabled | string | `nil` | Enable synchronization of Secrets for TLS Interception. If disabled and tls.readSecretsOnlyFromSecretsNamespace is set to 'false', then secrets will be read directly by the agent. |
|
||||
| tls.secretsBackend | string | `nil` | This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies (namely the secrets referenced by terminatingTLS and originatingTLS). This value is DEPRECATED and will be removed in a future version. Use `tls.readSecretsOnlyFromSecretsNamespace` instead. Possible values: - local - k8s |
|
||||
| tls.secretsNamespace | object | `{"create":true,"name":"cilium-secrets"}` | Configures where secrets used in CiliumNetworkPolicies will be looked for |
|
||||
| tls.secretsNamespace.create | bool | `true` | Create secrets namespace for TLS Interception secrets. |
|
||||
@@ -891,6 +894,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| tolerations | list | `[{"operator":"Exists"}]` | Node tolerations for agent scheduling to nodes with taints ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| tunnelPort | int | Port 8472 for VXLAN, Port 6081 for Geneve | Configure VXLAN and Geneve tunnel port. |
|
||||
| tunnelProtocol | string | `"vxlan"` | Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. Possible values: - "" - vxlan - geneve |
|
||||
| tunnelSourcePortRange | string | 0-0 to let the kernel driver decide the range | Configure VXLAN and Geneve tunnel source port range hint. |
|
||||
| updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":2},"type":"RollingUpdate"}` | Cilium agent update strategy |
|
||||
| upgradeCompatibility | string | `nil` | upgradeCompatibility helps users upgrading to ensure that the configMap for Cilium will not change critical values to ensure continued operation This flag is not required for new installations. For example: '1.7', '1.8', '1.9' |
|
||||
| vtep.cidr | string | `""` | A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" |
|
||||
|
||||
@@ -7,8 +7,15 @@ staticResources:
|
||||
- name: "envoy-prometheus-metrics-listener"
|
||||
address:
|
||||
socketAddress:
|
||||
address: "0.0.0.0"
|
||||
address: {{ .Values.ipv4.enabled | ternary "0.0.0.0" "::" | quote }}
|
||||
portValue: {{ .Values.envoy.prometheus.port }}
|
||||
{{- if and .Values.ipv4.enabled .Values.ipv6.enabled }}
|
||||
additionalAddresses:
|
||||
- address:
|
||||
socketAddress:
|
||||
address: "::"
|
||||
portValue: {{ .Values.envoy.prometheus.port }}
|
||||
{{- end }}
|
||||
filterChains:
|
||||
- filters:
|
||||
- name: "envoy.filters.network.http_connection_manager"
|
||||
@@ -289,7 +296,7 @@ overloadManager:
|
||||
applicationLogConfig:
|
||||
logFormat:
|
||||
{{- if .Values.envoy.log.format_json }}
|
||||
jsonFormat: "{{ .Values.envoy.log.format_json | toJson }}"
|
||||
jsonFormat: {{ .Values.envoy.log.format_json | toJson }}
|
||||
{{- else }}
|
||||
textFormat: "{{ .Values.envoy.log.format }}"
|
||||
{{- end }}
|
||||
|
||||
@@ -232,7 +232,7 @@ spec:
|
||||
resources:
|
||||
{{- toYaml . | trim | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.prometheus.enabled .Values.hubble.metrics.enabled }}
|
||||
{{- if or .Values.prometheus.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) }}
|
||||
ports:
|
||||
- name: peer-service
|
||||
containerPort: {{ .Values.hubble.peerService.targetPort }}
|
||||
@@ -364,7 +364,7 @@ spec:
|
||||
mountPath: {{ .Values.kubeConfigPath }}
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }}
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.tls.enabled }}
|
||||
- name: hubble-metrics-tls
|
||||
mountPath: /var/lib/cilium/tls/hubble-metrics
|
||||
readOnly: true
|
||||
@@ -999,7 +999,7 @@ spec:
|
||||
path: client-ca.crt
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.tls.enabled }}
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.tls.enabled }}
|
||||
- name: hubble-metrics-tls
|
||||
projected:
|
||||
# note: the leading zero means this number is in octal representation: do not remove it
|
||||
|
||||
@@ -39,6 +39,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -62,6 +65,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -85,6 +91,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -104,6 +113,9 @@ metadata:
|
||||
namespace: {{ .Values.bgpControlPlane.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -123,6 +135,9 @@ metadata:
|
||||
namespace: {{ .Values.tls.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -46,6 +46,9 @@ metadata:
|
||||
k8s-app: cilium
|
||||
app.kubernetes.io/name: cilium-agent
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
type: ClusterIP
|
||||
|
||||
@@ -403,7 +403,7 @@ data:
|
||||
|
||||
{{- if .Values.bpf.authMapMax }}
|
||||
# bpf-auth-map-max specifies the maximum number of entries in the auth map
|
||||
bpf-auth-map-max: {{ .Values.bpf.authMapMax | quote }}
|
||||
bpf-auth-map-max: "{{ .Values.bpf.authMapMax | int }}"
|
||||
{{- end }}
|
||||
{{- if or $bpfCtTcpMax $bpfCtAnyMax }}
|
||||
# bpf-ct-global-*-max specifies the maximum number of connections
|
||||
@@ -419,34 +419,34 @@ data:
|
||||
# For users upgrading from Cilium 1.2 or earlier, to minimize disruption
|
||||
# during the upgrade process, set bpf-ct-global-tcp-max to 1000000.
|
||||
{{- if $bpfCtTcpMax }}
|
||||
bpf-ct-global-tcp-max: {{ $bpfCtTcpMax | quote }}
|
||||
bpf-ct-global-tcp-max: "{{ $bpfCtTcpMax | int }}"
|
||||
{{- end }}
|
||||
{{- if $bpfCtAnyMax }}
|
||||
bpf-ct-global-any-max: {{ $bpfCtAnyMax | quote }}
|
||||
bpf-ct-global-any-max: "{{ $bpfCtAnyMax | int }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.ctAccounting }}
|
||||
bpf-conntrack-accounting: "{{ .Values.bpf.ctAccounting }}"
|
||||
bpf-conntrack-accounting: "{{ .Values.bpf.ctAccounting | int }}"
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.natMax }}
|
||||
# bpf-nat-global-max specified the maximum number of entries in the
|
||||
# BPF NAT table.
|
||||
bpf-nat-global-max: "{{ .Values.bpf.natMax }}"
|
||||
bpf-nat-global-max: "{{ .Values.bpf.natMax | int }}"
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.neighMax }}
|
||||
# bpf-neigh-global-max specified the maximum number of entries in the
|
||||
# BPF neighbor table.
|
||||
bpf-neigh-global-max: "{{ .Values.bpf.neighMax }}"
|
||||
bpf-neigh-global-max: "{{ .Values.bpf.neighMax | int }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "policyMapMax" }}
|
||||
# bpf-policy-map-max specifies the maximum number of entries in endpoint
|
||||
# policy map (per endpoint)
|
||||
bpf-policy-map-max: "{{ .Values.bpf.policyMapMax }}"
|
||||
bpf-policy-map-max: "{{ .Values.bpf.policyMapMax | int }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "lbMapMax" }}
|
||||
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
|
||||
# backend and affinity maps.
|
||||
bpf-lb-map-max: "{{ .Values.bpf.lbMapMax }}"
|
||||
bpf-lb-map-max: "{{ .Values.bpf.lbMapMax | int }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.bpf "lbExternalClusterIP" }}
|
||||
bpf-lb-external-clusterip: {{ .Values.bpf.lbExternalClusterIP | quote }}
|
||||
@@ -461,6 +461,7 @@ data:
|
||||
bpf-lb-mode-annotation: {{ .Values.bpf.lbModeAnnotation | quote }}
|
||||
{{- end }}
|
||||
|
||||
bpf-distributed-lru: {{ .Values.bpf.distributedLRU.enabled | quote }}
|
||||
bpf-events-drop-enabled: {{ .Values.bpf.events.drop.enabled | quote }}
|
||||
bpf-events-policy-verdict-enabled: {{ .Values.bpf.events.policyVerdict.enabled | quote }}
|
||||
bpf-events-trace-enabled: {{ .Values.bpf.events.trace.enabled | quote }}
|
||||
@@ -513,6 +514,9 @@ data:
|
||||
{{- if .Values.tunnelPort }}
|
||||
tunnel-port: {{ .Values.tunnelPort | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tunnelSourcePortRange }}
|
||||
tunnel-source-port-range: {{ .Values.tunnelSourcePortRange | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.serviceNoBackendResponse }}
|
||||
service-no-backend-response: "{{ .Values.serviceNoBackendResponse }}"
|
||||
@@ -927,9 +931,8 @@ data:
|
||||
operator-api-serve-addr: {{ $defaultOperatorApiServeAddr | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.hubble.enabled }}
|
||||
# Enable Hubble gRPC service.
|
||||
enable-hubble: {{ .Values.hubble.enabled | quote }}
|
||||
{{- if .Values.hubble.enabled }}
|
||||
# UNIX domain socket for Hubble server to listen to.
|
||||
hubble-socket-path: {{ .Values.hubble.socketPath | quote }}
|
||||
{{- if hasKey .Values.hubble "eventQueueSize" }}
|
||||
@@ -941,7 +944,7 @@ data:
|
||||
# Capacity of the buffer to store recent events.
|
||||
hubble-event-buffer-capacity: {{ .Values.hubble.eventBufferCapacity | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.hubble.metrics.enabled }}
|
||||
{{- if or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled}}
|
||||
# Address to expose Hubble metrics (e.g. ":7070"). Metrics server will be disabled if this
|
||||
# field is not set.
|
||||
hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}"
|
||||
@@ -953,14 +956,20 @@ data:
|
||||
hubble-metrics-server-tls-client-ca-files: /var/lib/cilium/tls/hubble-metrics/client-ca.crt
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.hubble.metrics.enabled }}
|
||||
# A space separated list of metrics to enable. See [0] for available metrics.
|
||||
#
|
||||
# https://github.com/cilium/hubble/blob/master/Documentation/metrics.md
|
||||
hubble-metrics: {{- range .Values.hubble.metrics.enabled }}
|
||||
{{.}}
|
||||
{{- end}}
|
||||
{{- if .Values.hubble.metrics.dynamic.enabled }}
|
||||
hubble-dynamic-metrics-config-path: /dynamic-metrics-config/dynamic-metrics.yaml
|
||||
{{- end }}
|
||||
enable-hubble-open-metrics: {{ .Values.hubble.metrics.enableOpenMetrics | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.hubble.redact }}
|
||||
{{- if eq .Values.hubble.redact.enabled true }}
|
||||
# Enables hubble redact capabilities
|
||||
@@ -1004,10 +1013,6 @@ data:
|
||||
hubble-flowlogs-config-path: /flowlog-config/flowlogs.yaml
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.hubble.metrics.dynamic.enabled }}
|
||||
hubble-dynamic-metrics-config-path: /dynamic-metrics-config/dynamic-metrics.yaml
|
||||
hubble-metrics-server: ":{{ .Values.hubble.metrics.port }}"
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.hubble "listenAddress" }}
|
||||
# An additional address for Hubble server to listen to (e.g. ":4244").
|
||||
hubble-listen-address: {{ .Values.hubble.listenAddress | quote }}
|
||||
@@ -1041,8 +1046,8 @@ data:
|
||||
{{- else }}
|
||||
ipam: {{ $ipam | quote }}
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.ipam "multiPoolPreAllocation" }}
|
||||
ipam-multi-pool-pre-allocation: {{ .Values.ipam.multiPoolPreAllocation }}
|
||||
{{- if .Values.ipam.multiPoolPreAllocation }}
|
||||
ipam-multi-pool-pre-allocation: {{ .Values.ipam.multiPoolPreAllocation | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.ipam.ciliumNodeUpdateRate }}
|
||||
@@ -1335,6 +1340,10 @@ data:
|
||||
external-envoy-proxy: {{ include "envoyDaemonSetEnabled" . | quote }}
|
||||
envoy-base-id: {{ .Values.envoy.baseID | quote }}
|
||||
|
||||
{{- if .Values.envoy.policyRestoreTimeoutDuration }}
|
||||
envoy-policy-restore-timeout: {{ .Values.envoy.policyRestoreTimeoutDuration | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.envoy.log.path }}
|
||||
envoy-log: {{ .Values.envoy.log.path | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -41,6 +41,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -66,6 +69,9 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
||||
@@ -7,24 +7,23 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-ingress-secrets
|
||||
namespace: {{ .Values.ingressController.secretsNamespace.name | quote }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: cilium-operator-ingress-secrets
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceAccounts.operator.name | quote }}
|
||||
namespace: {{ include "cilium.namespace" . }}
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceAccounts.operator.name | quote }}
|
||||
namespace: {{ include "cilium.namespace" . }}
|
||||
{{- end }}
|
||||
|
||||
{{- if and .Values.operator.enabled .Values.serviceAccounts.operator.create .Values.gatewayAPI.enabled .Values.gatewayAPI.secretsNamespace.sync .Values.gatewayAPI.secretsNamespace.name }}
|
||||
@@ -34,12 +33,15 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-gateway-secrets
|
||||
namespace: {{ .Values.gatewayAPI.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -57,12 +59,15 @@ kind: RoleBinding
|
||||
metadata:
|
||||
name: cilium-operator-tlsinterception-secrets
|
||||
namespace: {{ .Values.tls.secretsNamespace.name | quote }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.operator.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/part-of: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- if and .Values.hubble.enabled .Values.hubble.metrics.enabled .Values.hubble.metrics.serviceMonitor.enabled }}
|
||||
{{- if and .Values.hubble.enabled (or .Values.hubble.metrics.enabled .Values.hubble.metrics.dynamic.enabled) .Values.hubble.metrics.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
|
||||
@@ -4,10 +4,13 @@ kind: Service
|
||||
metadata:
|
||||
name: spire-server
|
||||
namespace: {{ .Values.authentication.mutual.spire.install.namespace }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.service.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.authentication.mutual.spire.install.server.service.annotations .Values.authentication.mutual.spire.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.authentication.mutual.spire.annotations }}
|
||||
@@ -17,10 +20,6 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.service.labels }}
|
||||
labels:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.authentication.mutual.spire.install.server.service.type }}
|
||||
ports:
|
||||
|
||||
@@ -4,10 +4,6 @@ kind: StatefulSet
|
||||
metadata:
|
||||
name: spire-server
|
||||
namespace: {{ .Values.authentication.mutual.spire.install.namespace }}
|
||||
{{- with .Values.commonLabels }}
|
||||
labels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.authentication.mutual.spire.install.server.annotations .Values.authentication.mutual.spire.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.authentication.mutual.spire.annotations }}
|
||||
@@ -19,9 +15,12 @@ metadata:
|
||||
{{- end }}
|
||||
labels:
|
||||
app: spire-server
|
||||
{{- with .Values.authentication.mutual.spire.install.server.labels }}
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.authentication.mutual.spire.install.server.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user