Compare commits

..

1 Commits

Author SHA1 Message Date
Andrei Kvapil
b193bd96d4 Add Kubernetes test
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2025-05-24 19:54:47 +02:00
56 changed files with 517 additions and 780 deletions

View File

@@ -80,7 +80,6 @@ jobs:
- name: Ensure maintenance branch release-X.Y
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GH_PAT }}
script: |
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
@@ -90,45 +89,21 @@ jobs:
}
const line = `${match[1]}.${match[2]}`;
const branch = `release-${line}`;
// Get main branch commit for the tag
const ref = await github.rest.git.getRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `tags/${tag}`
});
const commitSha = ref.data.object.sha;
try {
await github.rest.repos.getBranch({
owner: context.repo.owner,
repo: context.repo.repo,
repo: context.repo.repo,
branch
});
await github.rest.git.updateRef({
console.log(`Branch '${branch}' already exists`);
} catch (_) {
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `heads/${branch}`,
sha: commitSha,
force: true
repo: context.repo.repo,
ref: `refs/heads/${branch}`,
sha: context.sha
});
console.log(`🔁 Force-updated '${branch}' to ${commitSha}`);
} catch (err) {
if (err.status === 404) {
await github.rest.git.createRef({
owner: context.repo.owner,
repo: context.repo.repo,
ref: `refs/heads/${branch}`,
sha: commitSha
});
console.log(`✅ Created branch '${branch}' at ${commitSha}`);
} else {
console.error('Unexpected error --', err);
core.setFailed(`Unexpected error creating/updating branch: ${err.message}`);
throw err;
}
console.log(`✅ Branch '${branch}' created at ${context.sha}`);
}
# Get the latest published release
@@ -162,12 +137,12 @@ jobs:
with:
script: |
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1
const m = tag.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/);
const m = tag.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/);
if (!m) {
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
return;
}
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
const version = m[1] + (m[2] ?? ''); // 0.31.5rc.1
const isRc = Boolean(m[2]);
core.setOutput('is_rc', isRc);
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';

View File

@@ -3,10 +3,9 @@ name: Versioned Tag
on:
push:
tags:
- 'v*.*.*' # vX.Y.Z
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
- 'v*.*.*-beta.*' # vX.Y.Z-beta.N
- 'v*.*.*-alpha.*' # vX.Y.Z-alpha.N
- 'v*.*.*' # vX.Y.Z
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
concurrency:
group: tags-${{ github.workflow }}-${{ github.ref }}
@@ -43,7 +42,7 @@ jobs:
if: steps.check_release.outputs.skip == 'true'
run: echo "Release already exists, skipping workflow."
# Parse tag meta-data (rc?, maintenance line, etc.)
# Parse tag metadata (rc?, maintenance line, etc.)
- name: Parse tag
if: steps.check_release.outputs.skip == 'false'
id: tag
@@ -51,12 +50,12 @@ jobs:
with:
script: |
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1
const m = ref.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/); // ['0.31.5', '-rc.1' | '-beta.1' | …]
const m = ref.match(/^v(\d+\.\d+\.\d+)(-rc\.\d+)?$/); // ['0.31.5', '-rc.1']
if (!m) {
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-rc.N'`);
return;
}
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
const version = m[1] + (m[2] ?? ''); // 0.31.5rc.1
const isRc = Boolean(m[2]);
const [maj, min] = m[1].split('.');
core.setOutput('tag', ref); // v0.31.5-rc.1
@@ -64,7 +63,7 @@ jobs:
core.setOutput('is_rc', isRc); // true
core.setOutput('line', `${maj}.${min}`); // 0.31
# Detect base branch (main or release-X.Y) the tag was pushed from
# Detect base branch (main or releaseX.Y) the tag was pushed from
- name: Get base branch
if: steps.check_release.outputs.skip == 'false'
id: get_base
@@ -169,7 +168,7 @@ jobs:
});
console.log(`Draft release created for ${tag}`);
} else {
console.log(`Re-using existing release ${tag}`);
console.log(`Reusing existing release ${tag}`);
}
core.setOutput('upload_url', rel.upload_url);
@@ -182,7 +181,7 @@ jobs:
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Create release-X.Y.Z branch and push (force-update)
# Create releaseX.Y.Z branch and push (forceupdate)
- name: Create release branch
if: steps.check_release.outputs.skip == 'false'
run: |

View File

@@ -29,8 +29,10 @@ build: build-deps
repos:
rm -rf _out
make -C packages/library check-version-map
make -C packages/apps check-version-map
make -C packages/extra check-version-map
make -C packages/library repo
make -C packages/system repo
make -C packages/apps repo
make -C packages/extra repo

View File

@@ -1,5 +1,5 @@
This is the third release candidate for the upcoming Cozystack v0.31.0 release.
The release notes show changes accumulated since the release of previous version, Cozystack v0.30.0.
This is the second release candidate for the upcoming Cozystack v0.31.0 release.
The release notes show changes accumulated since the release of Cozystack v0.30.0.
Cozystack 0.31.0 further advances GPU support, monitoring, and all-around convenience features.
@@ -12,21 +12,18 @@ Cozystack 0.31.0 further advances GPU support, monitoring, and all-around conven
* [platform] Cozystack etcd-operator (@klinch0 in https://github.com/cozystack/cozystack/pull/850)
* Introduce support for cross-architecture builds and Cozystack on ARM:
* [build] Refactor Makefiles introducing build variables. (@nbykov0 in https://github.com/cozystack/cozystack/pull/907)
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970)
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932)
* [platform] Introduce a new controller to synchronize tenant HelmReleases and propagate configuration changes. (@klinch0 in https://github.com/cozystack/cozystack/pull/870)
* [platform] Introduce options `expose-services`, `expose-ingress` and `expose-external-ips` to the ingress service. (@kvaps in https://github.com/cozystack/cozystack/pull/929)
* [kubevirt] Enable exporting VMs. (@kvaps in https://github.com/cozystack/cozystack/pull/808)
* [kubevirt] Make KubeVirt's CPU allocation ratio configurable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
* [virtual-machine] Add support for various storages. (@kvaps in https://github.com/cozystack/cozystack/pull/974)
* [cozystack-controller] Record the IP address pool and storage class in Workload objects. (@lllamnyp in https://github.com/cozystack/cozystack/pull/831)
* [cilium] Enable Cilium Gateway API. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/924)
* [cilium] Enable user-added parameters in a tenant cluster Cilium. (@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
* [apps] Remove user-facing config of limits and requests. (@lllamnyp in https://github.com/cozystack/cozystack/pull/935)
* Update the Cozystack release policy to include long-lived release branches and start with release candidates. Update CI workflows and docs accordingly.
* Use release branches `release-X.Y` for gathering and releasing fixes after initial `vX.Y.0` release. (@kvaps in https://github.com/cozystack/cozystack/pull/816)
* Automatically create release branches after initial `vX.Y.0` release is published. (@kvaps in https://github.com/cozystack/cozystack/pull/886)
* Introduce Release Candidate versions. Automate patch backporting by applying patches from pull requests labeled `[backport]` to the current release branch. (@kvaps in https://github.com/cozystack/cozystack/pull/841 and https://github.com/cozystack/cozystack/pull/901, @nickvolynkin in https://github.com/cozystack/cozystack/pull/890)
* Support alpha and beta pre-releases. (@kvaps in https://github.com/cozystack/cozystack/pull/978)
* Commit changes in release pipelines under `github-actions <github-actions@github.com>`. (@kvaps in https://github.com/cozystack/cozystack/pull/823)
* Describe the Cozystack release workflow. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/817 and https://github.com/cozystack/cozystack/pull/897)
@@ -45,7 +42,6 @@ Cozystack 0.31.0 further advances GPU support, monitoring, and all-around conven
* [kubernetes] Fix merging `valuesOverride` for tenant clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/879)
* [kubernetes] Fix `ubuntu-container-disk` tag. (@kvaps in https://github.com/cozystack/cozystack/pull/887)
* [kubernetes] Refactor Helm manifests for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/866)
* [kubernetes] Fix Ingress-NGINX depends on Cert-Manager . (@kvaps in https://github.com/cozystack/cozystack/pull/976)
* [tenant] Fix an issue with accessing external IPs of a cluster from the cluster itself. (@kvaps in https://github.com/cozystack/cozystack/pull/854)
* [cluster-api] Remove the no longer necessary workaround for Kamaji. (@kvaps in https://github.com/cozystack/cozystack/pull/867, patched in https://github.com/cozystack/cozystack/pull/956)
* [monitoring] Remove legacy label "POD" from the exclude filter in metrics. (@xy2 in https://github.com/cozystack/cozystack/pull/826)
@@ -66,8 +62,6 @@ Cozystack 0.31.0 further advances GPU support, monitoring, and all-around conven
* [ci] Fix release branch creation. (@kvaps in https://github.com/cozystack/cozystack/pull/884)
* [ci, dx] Reduce noise in the test logs by suppressing the `wget` progress bar. (@lllamnyp in https://github.com/cozystack/cozystack/pull/865)
* [ci] Revert "automatically trigger tests in releasing PR". (@kvaps in https://github.com/cozystack/cozystack/pull/900)
* [ci] Force-update release branch on tagged main commits . (@kvaps in https://github.com/cozystack/cozystack/pull/977)
* [docs] Explain that tenants cannot have dashes in the names. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/980)
## Dependencies
@@ -80,8 +74,7 @@ Cozystack 0.31.0 further advances GPU support, monitoring, and all-around conven
* Update tenant Kubernetes to v1.32. (@kvaps in https://github.com/cozystack/cozystack/pull/871)
* Update flux-operator to 0.20.0. (@kingdonb in https://github.com/cozystack/cozystack/pull/880 and https://github.com/cozystack/cozystack/pull/934)
* Update multiple Cluster API components. (@kvaps in https://github.com/cozystack/cozystack/pull/867 and https://github.com/cozystack/cozystack/pull/947)
* Update KamajiControlPlane to edge-25.4.1. (@kvaps in https://github.com/cozystack/cozystack/pull/953, fixed by @nbykov0 in https://github.com/cozystack/cozystack/pull/983)
* Update cert-manager to v1.17.2. (@kvaps in https://github.com/cozystack/cozystack/pull/975)
* Update KamajiControlPlane to edge-25.4.1. (@kvaps in https://github.com/cozystack/cozystack/pull/953)
## Maintenance
@@ -94,4 +87,4 @@ Cozystack 0.31.0 further advances GPU support, monitoring, and all-around conven
* @zdenekjanda made their first contribution in https://github.com/cozystack/cozystack/pull/924
* @gwynbleidd2106 made their first contribution in https://github.com/cozystack/cozystack/pull/962
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0-rc.3
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0-rc.2

View File

@@ -1,117 +0,0 @@
#!/bin/sh
###############################################################################
# cozytest.sh - Bats-compatible test runner with live trace and enhanced #
# output, written in pure shell #
###############################################################################
set -eu
TEST_FILE=${1:?Usage: ./cozytest.sh <file.bats> [pattern]}
PATTERN=${2:-*}
LINE='----------------------------------------------------------------'
cols() { stty size 2>/dev/null | awk '{print $2}' || echo 80; }
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
BEGIN=$(date +%s)
timestamp() { s=$(( $(date +%s) - BEGIN )); printf '[%02d:%02d]' $((s/60)) $((s%60)); }
###############################################################################
# run_one <fn> <title> #
###############################################################################
run_one() {
fn=$1 title=$2
tmp=$(mktemp -d) || { echo "Failed to create temp directory" >&2; exit 1; }
log="$tmp/log"
echo "╭ » Run test: $title"
START=$(date +%s)
skip_next="+ $fn" # первую строку трассировки с именем функции пропустим
{
(
PS4='+ ' # prefix for set -x
set -eu -x # strict + trace
"$fn"
)
printf '__RC__%s\n' "$?"
} 2>&1 | tee "$log" | while IFS= read -r line; do
case "$line" in
'__RC__'*) : ;;
'+ '*) cmd=${line#'+ '}
[ "$cmd" = "${skip_next#+ }" ] && continue
case "$cmd" in
'set -e'|'set -x'|'set -u'|'return 0') continue ;;
esac
out=$cmd ;;
*) out=$line ;;
esac
now=$(( $(date +%s) - START ))
[ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
printf '┊[%02d:%02d] %s\n' $((now/60)) $((now%60)) "$out"
done
rc=$(awk '/^__RC__/ {print substr($0,7)}' "$log" | tail -n1)
[ -z "$rc" ] && rc=1
now=$(( $(date +%s) - START ))
if [ "$rc" -eq 0 ]; then
printf '╰[%02d:%02d] ✅ Test OK: %s\n' $((now/60)) $((now%60)) "$title"
else
printf '╰[%02d:%02d] ❌ Test failed: %s (exit %s)\n' \
$((now/60)) $((now%60)) "$title" "$rc"
echo "----- captured output -----------------------------------------"
grep -v '^__RC__' "$log"
echo "$LINE"
exit "$rc"
fi
rm -rf "$tmp"
}
###############################################################################
# convert .bats -> shell-functions #
###############################################################################
TMP_SH=$(mktemp) || { echo "Failed to create temp file" >&2; exit 1; }
trap 'rm -f "$TMP_SH"' EXIT
awk '
/^@test[[:space:]]+"/ {
line = substr($0, index($0, "\"") + 1)
title = substr(line, 1, index(line, "\"") - 1)
fname = "test_"
for (i = 1; i <= length(title); i++) {
c = substr(title, i, 1)
fname = fname (c ~ /[A-Za-z0-9]/ ? c : "_")
}
printf("### %s\n", title)
printf("%s() {\n", fname)
print " set -e" # ошибка → падение теста
next
}
/^}$/ {
print " return 0" # если автор не сделал exit 1 — тест ОК
print "}"
next
}
{ print }
' "$TEST_FILE" > "$TMP_SH"
[ -f "$TMP_SH" ] || { echo "Failed to generate test functions" >&2; exit 1; }
# shellcheck disable=SC1090
. "$TMP_SH"
###############################################################################
# run selected tests #
###############################################################################
awk -v pat="$PATTERN" '
/^### / {
title = substr($0, 5)
name = "test_"
for (i = 1; i <= length(title); i++) {
c = substr(title, i, 1)
name = name (c ~ /[A-Za-z0-9]/ ? c : "_")
}
if (pat == "*" || index(title, pat) > 0)
printf("%s %s\n", name, title)
}
' "$TMP_SH" | while IFS=' ' read -r fn title; do
run_one "$fn" "$title"
done

View File

@@ -1,387 +0,0 @@
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# Cozystack endtoend provisioning test (Bats)
# -----------------------------------------------------------------------------
@test "Environment variable COZYSTACK_INSTALLER_YAML is defined" {
if [ -z "${COZYSTACK_INSTALLER_YAML:-}" ]; then
echo 'COZYSTACK_INSTALLER_YAML environment variable is not set!' >&2
echo >&2
echo 'Please export it with the following command:' >&2
echo ' export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
exit 1
fi
}
@test "IPv4 forwarding is enabled" {
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
echo "IPv4 forwarding is disabled!" >&2
echo >&2
echo "Enable it with:" >&2
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
exit 1
fi
}
@test "Clean previous VMs" {
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
rm -rf srv1 srv2 srv3
}
@test "Prepare networking and masquerading" {
ip link del cozy-br0 2>/dev/null || true
ip link add cozy-br0 type bridge
ip link set cozy-br0 up
ip address add 192.168.123.1/24 dev cozy-br0
# Masquerading rule idempotent (delete first, then add)
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
}
@test "Prepare cloudinit drive for VMs" {
mkdir -p srv1 srv2 srv3
# Generate cloudinit ISOs
for i in 1 2 3; do
echo "hostname: srv${i}" > "srv${i}/meta-data"
cat > "srv${i}/user-data" <<'EOF'
#cloud-config
EOF
cat > "srv${i}/network-config" <<EOF
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- "192.168.123.1${i}/26"
gateway4: "192.168.123.1"
nameservers:
search: [cluster.local]
addresses: [8.8.8.8]
EOF
( cd "srv${i}" && genisoimage \
-output seed.img \
-volid cidata -rational-rock -joliet \
user-data meta-data network-config )
done
}
@test "Download Talos NoCloud image" {
if [ ! -f nocloud-amd64.raw ]; then
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
rm -f nocloud-amd64.raw
xz --decompress nocloud-amd64.raw.xz
fi
}
@test "Prepare VM disks" {
for i in 1 2 3; do
cp nocloud-amd64.raw srv${i}/system.img
qemu-img resize srv${i}/system.img 20G
qemu-img create srv${i}/data.img 100G
done
}
@test "Create tap devices" {
for i in 1 2 3; do
ip link del cozy-srv${i} 2>/dev/null || true
ip tuntap add dev cozy-srv${i} mode tap
ip link set cozy-srv${i} up
ip link set cozy-srv${i} master cozy-br0
done
}
@test "Boot QEMU VMs" {
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
-drive file=srv${i}/system.img,if=virtio,format=raw \
-drive file=srv${i}/seed.img,if=virtio,format=raw \
-drive file=srv${i}/data.img,if=virtio,format=raw \
-display none -daemonize -pidfile srv${i}/qemu.pid
done
# Give qemu a few seconds to start up networking
sleep 5
}
@test "Wait until Talos API port 50000 is reachable on all machines" {
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
}
@test "Generate Talos cluster configuration" {
# Clusterwide patches
cat > patch.yaml <<'EOF'
machine:
kubelet:
nodeIP:
validSubnets:
- 192.168.123.0/24
extraConfig:
maxPods: 512
kernel:
modules:
- name: openvswitch
- name: drbd
parameters:
- usermode_helper=disabled
- name: zfs
- name: spl
registries:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
files:
- content: |
[plugins]
[plugins."io.containerd.cri.v1.runtime"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
cluster:
apiServer:
extraArgs:
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
oidc-client-id: "kubernetes"
oidc-username-claim: "preferred_username"
oidc-groups-claim: "groups"
network:
cni:
name: none
dnsDomain: cozy.local
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.96.0.0/16
EOF
# Controlplaneonly patches
cat > patch-controlplane.yaml <<'EOF'
machine:
nodeLabels:
node.kubernetes.io/exclude-from-external-load-balancers:
$patch: delete
network:
interfaces:
- interface: eth0
vip:
ip: 192.168.123.10
cluster:
allowSchedulingOnControlPlanes: true
controllerManager:
extraArgs:
bind-address: 0.0.0.0
scheduler:
extraArgs:
bind-address: 0.0.0.0
apiServer:
certSANs:
- 127.0.0.1
proxy:
disabled: true
discovery:
enabled: false
etcd:
advertisedSubnets:
- 192.168.123.0/24
EOF
# Generate secrets once
if [ ! -f secrets.yaml ]; then
talosctl gen secrets
fi
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
}
@test "Apply Talos configuration to the node" {
# Apply the configuration to all three nodes
for node in 11 12 13; do
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
done
# Wait for Talos services to come up again
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
}
@test "Bootstrap Talos cluster" {
# Bootstrap etcd on the first node
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
# Wait until etcd is healthy
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
# Retrieve kubeconfig
rm -f kubeconfig
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
# Wait until all three nodes register in Kubernetes
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
}
@test "Install Cozystack" {
# Create namespace & configmap required by installer
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
kubectl create configmap cozystack -n cozy-system \
--from-literal=bundle-name=paas-full \
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
--from-literal=ipv4-pod-gateway=10.244.0.1 \
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
--from-literal=root-host=example.org \
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
--dry-run=client -o yaml | kubectl apply -f -
# Apply installer manifests from env variable
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
# Wait for the installer deployment to become available
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
# Wait until HelmReleases appear & reconcile them
timeout 60 sh -ec 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
sleep 5
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
# Fail the test if any HelmRelease is not Ready
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
kubectl get hr -A
fail "Some HelmReleases failed to reconcile"
fi
}
@test "Wait for ClusterAPI provider deployments" {
# Wait for ClusterAPI provider deployments
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
}
@test "Wait for LINSTOR and configure storage" {
# Linstor controller and nodes
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
for node in srv1 srv2 srv3; do
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
done
# Storage classes
kubectl apply -f - <<'EOF'
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/layerList: "storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: replicated
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/autoPlace: "3"
linstor.csi.linbit.com/layerList: "drbd storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
EOF
}
@test "Wait for MetalLB and configure address pool" {
# MetalLB address pool
kubectl apply -f - <<'EOF'
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: cozystack
namespace: cozy-metallb
spec:
ipAddressPools: [cozystack]
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: cozystack
namespace: cozy-metallb
spec:
addresses: [192.168.123.200-192.168.123.250]
autoAssign: true
avoidBuggyIPs: false
EOF
}
@test "Check Cozystack API service" {
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
}
@test "Configure Tenant and wait for applications" {
# Patch root tenant and wait for its releases
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
flux reconcile hr monitoring -n tenant-root --force
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
fi
# Expose Cozystack services through ingress
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
# NGINX ingress controller
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
# etcd statefulset
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
# VictoriaMetrics components
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
# Grafana
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
# Verify Grafana via ingress
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
exit 1
fi
}
@test "Keycloak OIDC stack is healthy" {
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
}

370
hack/e2e.sh Executable file
View File

@@ -0,0 +1,370 @@
#!/bin/bash
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
echo 'please set it with following command:' >&2
echo >&2
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
echo >&2
exit 1
fi
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
echo "IPv4 forwarding is not enabled!" >&2
echo 'please enable forwarding with the following command:' >&2
echo >&2
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
echo >&2
exit 1
fi
set -x
set -e
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
ip link del cozy-br0 || true
ip link add cozy-br0 type bridge
ip link set cozy-br0 up
ip addr add 192.168.123.1/24 dev cozy-br0
# Enable masquerading
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
rm -rf srv1 srv2 srv3
mkdir -p srv1 srv2 srv3
# Prepare cloud-init
for i in 1 2 3; do
echo "hostname: srv$i" > "srv$i/meta-data"
echo '#cloud-config' > "srv$i/user-data"
cat > "srv$i/network-config" <<EOT
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- "192.168.123.1$i/26"
gateway4: "192.168.123.1"
nameservers:
search: [cluster.local]
addresses: [8.8.8.8]
EOT
( cd srv$i && genisoimage \
-output seed.img \
-volid cidata -rational-rock -joliet \
user-data meta-data network-config
)
done
# Prepare system drive
if [ ! -f nocloud-amd64.raw ]; then
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
rm -f nocloud-amd64.raw
xz --decompress nocloud-amd64.raw.xz
fi
for i in 1 2 3; do
cp nocloud-amd64.raw srv$i/system.img
qemu-img resize srv$i/system.img 20G
done
# Prepare data drives
for i in 1 2 3; do
qemu-img create srv$i/data.img 100G
done
# Prepare networking
for i in 1 2 3; do
ip link del cozy-srv$i || true
ip tuntap add dev cozy-srv$i mode tap
ip link set cozy-srv$i up
ip link set cozy-srv$i master cozy-br0
done
# Start VMs
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i \
-netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
-drive file=srv$i/system.img,if=virtio,format=raw \
-drive file=srv$i/seed.img,if=virtio,format=raw \
-drive file=srv$i/data.img,if=virtio,format=raw \
-display none -daemonize -pidfile srv$i/qemu.pid
done
sleep 5
# Wait for VM to start up
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
cat > patch.yaml <<\EOT
machine:
kubelet:
nodeIP:
validSubnets:
- 192.168.123.0/24
extraConfig:
maxPods: 512
kernel:
modules:
- name: openvswitch
- name: drbd
parameters:
- usermode_helper=disabled
- name: zfs
- name: spl
registries:
mirrors:
docker.io:
endpoints:
- https://mirror.gcr.io
files:
- content: |
[plugins]
[plugins."io.containerd.cri.v1.runtime"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
cluster:
apiServer:
extraArgs:
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
oidc-client-id: "kubernetes"
oidc-username-claim: "preferred_username"
oidc-groups-claim: "groups"
network:
cni:
name: none
dnsDomain: cozy.local
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.96.0.0/16
EOT
cat > patch-controlplane.yaml <<\EOT
machine:
nodeLabels:
node.kubernetes.io/exclude-from-external-load-balancers:
$patch: delete
network:
interfaces:
- interface: eth0
vip:
ip: 192.168.123.10
cluster:
allowSchedulingOnControlPlanes: true
controllerManager:
extraArgs:
bind-address: 0.0.0.0
scheduler:
extraArgs:
bind-address: 0.0.0.0
apiServer:
certSANs:
- 127.0.0.1
proxy:
disabled: true
discovery:
enabled: false
etcd:
advertisedSubnets:
- 192.168.123.0/24
EOT
# Gen configuration
if [ ! -f secrets.yaml ]; then
talosctl gen secrets
fi
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
export TALOSCONFIG=$PWD/talosconfig
# Apply configuration
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
# Wait for VM to be configured
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
# Bootstrap
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
# Wait for etcd
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
rm -f kubeconfig
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
export KUBECONFIG=$PWD/kubeconfig
# Wait for kubernetes nodes appear
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
kubectl create ns cozy-system -o yaml | kubectl apply -f -
kubectl create -f - <<\EOT
apiVersion: v1
kind: ConfigMap
metadata:
name: cozystack
namespace: cozy-system
data:
bundle-name: "paas-full"
ipv4-pod-cidr: "10.244.0.0/16"
ipv4-pod-gateway: "10.244.0.1"
ipv4-svc-cidr: "10.96.0.0/16"
ipv4-join-cidr: "100.64.0.0/16"
root-host: example.org
api-server-endpoint: https://192.168.123.10:6443
EOT
#
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
# wait for cozystack pod to start
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
# wait for helmreleases appear
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
sleep 5
# Wait for all HelmReleases to be installed
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
failed_hrs=$(kubectl get hr -A | grep -v True)
if [ -n "$(echo "$failed_hrs" | grep -v NAME)" ]; then
printf 'Failed HelmReleases:\n%s\n' "$failed_hrs" >&2
exit 1
fi
# Wait for Cluster-API providers
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
# Wait for linstor controller
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
# Wait for all linstor nodes become Online
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
kubectl create -f- <<EOT
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/layerList: "storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: replicated
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/autoPlace: "3"
linstor.csi.linbit.com/layerList: "drbd storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
EOT
kubectl create -f- <<EOT
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: cozystack
namespace: cozy-metallb
spec:
ipAddressPools:
- cozystack
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: cozystack
namespace: cozy-metallb
spec:
addresses:
- 192.168.123.200-192.168.123.250
autoAssign: true
avoidBuggyIPs: false
EOT
# Wait for cozystack-api
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
"host": "example.org",
"ingress": true,
"monitoring": true,
"etcd": true,
"isolated": true
}}'
# Wait for HelmRelease be created
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
# Wait for HelmReleases be installed
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
flux reconcile hr monitoring -n tenant-root --force
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
fi
kubectl patch -n cozy-system cm cozystack --type=merge -p '{"data":{
"expose-services": "api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"
}}'
# Wait for nginx-ingress-controller
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
# Wait for etcd
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
# Wait for Victoria metrics
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
# Wait for grafana
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
# Get IP of nginx-ingress
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
# Check Grafana
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
# Test OIDC
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
"oidc-enabled": "true"
}}'
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator

View File

@@ -61,4 +61,4 @@ resolved_miss_map=$(
done < $miss_map
)
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '!seen[$1, $2]++' > "$file"
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/clickhouse-backup:0.9.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
ghcr.io/cozystack/cozystack/clickhouse-backup:0.8.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/nginx-cache:0.5.0@sha256:158c35dd6a512bd14e86a423be5c8c7ca91ac71999c73cce2714e4db60a2db43
ghcr.io/cozystack/cozystack/nginx-cache:0.5.0@sha256:785bd69cb593dc1509875d1e3128dac1a013b099fbb02f39330298d798706a0e

View File

@@ -1,3 +1,4 @@
.helmignore
/logos
/Makefile
/hack

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.20.1
version: 0.20.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1,14 +1,11 @@
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# Cozystack endtoend provisioning test (Bats)
# -----------------------------------------------------------------------------
@test "Create tenant with isolated mode enabled" {
@test "Create a tenant with isolated mode enabled" {
kubectl create -f - <<EOF
apiVersion: apps.cozystack.io/v1alpha1
kind: Tenant
metadata:
name: test
name: test4kubernetes
namespace: tenant-root
spec:
etcd: false
@@ -19,8 +16,7 @@ spec:
resourceQuotas: {}
seaweedfs: false
EOF
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
kubectl wait namespace tenant-test4kubernetes --timeout=20s --for=jsonpath='{.status.phase}'=Active
}
@test "Create a tenant Kubernetes control plane" {
@@ -29,7 +25,7 @@ apiVersion: apps.cozystack.io/v1alpha1
kind: Kubernetes
metadata:
name: test
namespace: tenant-test
namespace: tenant-test4kubernetes
spec:
addons:
certManager:
@@ -84,11 +80,11 @@ spec:
- ingress-nginx
storageClass: replicated
EOF
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=5m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
kubectl wait namespace tenant-test4kubernetes --timeout=20s --for=jsonpath='{.status.phase}'=Active
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test4kubernetes kubernetes-test; do sleep 1; done'
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test4kubernetes kubernetes-test --timeout=4m
kubectl wait tcp -n tenant-test4kubernetes kubernetes-test --timeout=20s --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready --timeout=2m
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test4kubernetes kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test4kubernetes --timeout=20s --for=jsonpath='{.status.replicas}'=2 --timeout=1m
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test4kubernetes --timeout=20s --for=jsonpath='{.status.v1beta2.readyReplicas}'=2 --timeout=2m
}

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.20.1@sha256:720148128917fa10f860a8b7e74f9428de72481c466c880c5ad894e1f0026d43
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.20.0@sha256:8dbbe95fe8b933a1d1a3c638120f386fec0c4950092d3be5ddd592375bb8a760

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.20.1@sha256:1b48a4725a33ccb48604bb2e1be3171271e7daac2726d3119228212d8a9da5bb
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.20.0@sha256:41fcdbd2f667f68bf554dd184ce362e65b88f350dc7b938c86079b719f5e5099

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.20.1@sha256:fb6d3ce9d6d948285a6d399c852e15259d6922162ec7c44177d2274243f59d1f
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.20.0@sha256:61580fea56b745580989d85e3ef2563e9bb1accc9c4185f8e636bacd02551319

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:184b81529ae72684279799b12f436cc7a511d8ff5bd1e9a30478799c7707c625
ghcr.io/cozystack/cozystack/ubuntu-container-disk:v1.32@sha256:186af6f71891bfc6d6948454802c08922baa508c30e7f79e330b7d26ffceff03

View File

@@ -6,11 +6,6 @@ ingress-nginx:
hostNetwork: true
service:
enabled: false
{{- if not .Values.addons.certManager.enabled }}
admissionWebhooks:
certManager:
enabled: false
{{- end }}
nodeSelector:
node-role.kubernetes.io/ingress-nginx: ""
{{- end }}

View File

@@ -4,55 +4,36 @@ A tenant is the main unit of security on the platform. The closest analogy would
Tenants can be created recursively and are subject to the following rules:
### Tenant naming
### Higher-level tenants can access lower-level ones.
Tenant names must be alphanumeric.
Using dashes (`-`) in tenant names is not allowed, unlike with other services.
This limitation exists to keep consistent naming in tenants, nested tenants, and services deployed in them.
Higher-level tenants can view and manage the applications of all their children.
For example:
### Each tenant has its own domain
- The root tenant is named `root`, but internally it's referenced as `tenant-root`.
- A nested tenant could be named `foo`, which would result in `tenant-foo` in service names and URLs.
- However, a tenant can not be named `foo-bar`, because parsing names such as `tenant-foo-bar` would be ambiguous.
### Unique domains
Each tenant has its own domain.
By default, (unless otherwise specified), it inherits the domain of its parent with a prefix of its name.
For example, if the parent had the domain `example.org`, then `tenant-foo` would get the domain `foo.example.org` by default.
By default (unless otherwise specified), it inherits the domain of its parent with a prefix of its name, for example, if the parent had the domain `example.org`, then `tenant-foo` would get the domain `foo.example.org` by default.
Kubernetes clusters created in this tenant namespace would get domains like: `kubernetes-cluster.foo.example.org`
Example:
```text
```
tenant-root (example.org)
└── tenant-foo (foo.example.org)
└── kubernetes-cluster1 (kubernetes-cluster1.foo.example.org)
```
### Nesting tenants and reusing parent services
### Lower-level tenants can access the cluster services of their parent (provided they do not run their own)
Tenants can be nested.
A tenant administrator can create nested tenants using the "Tenant" application from the catalogue.
Higher-level tenants can view and manage the applications of all their children tenants.
If a tenant does not run their own cluster services, it can access ones of its parent.
For example, you create:
- Tenant `tenant-u1` with a set of services like `etcd`, `ingress`, `monitoring`.
- Tenant `tenant-u2` nested in `tenant-u1`.
Thus, you can create `tenant-u1` with a set of services like `etcd`, `ingress`, `monitoring`. And create another tenant namespace `tenant-u2` inside of `tenant-u1`.
Let's see what will happen when you run Kubernetes and Postgres under `tenant-u2` namespace.
Since `tenant-u2` does not have its own cluster services like `etcd`, `ingress`, and `monitoring`,
the applications running in `tenant-u2` will use the cluster services of the parent tenant.
Since `tenant-u2` does not have its own cluster services like `etcd`, `ingress`, and `monitoring`, the applications will use the cluster services of the parent tenant.
This in turn means:
- The Kubernetes cluster data will be stored in `etcd` for `tenant-u1`.
- Access to the cluster will be through the common `ingress` of `tenant-u1`.
- Essentially, all metrics will be collected in the `monitoring` from `tenant-u1`, and only that tenant will have access to them.
- The Kubernetes cluster data will be stored in etcd for `tenant-u1`.
- Access to the cluster will be through the common ingress of `tenant-u1`.
- Essentially, all metrics will be collected in the monitoring from `tenant-u1`, and only it will have access to them.
Example:
```

View File

@@ -64,8 +64,7 @@ kubernetes 0.17.0 1fbbfcd0
kubernetes 0.17.1 fd240701
kubernetes 0.18.0 721c12a7
kubernetes 0.19.0 93bdf411
kubernetes 0.20.0 609e7ede
kubernetes 0.20.1 HEAD
kubernetes 0.20.0 HEAD
mysql 0.1.0 263e47be
mysql 0.2.0 c24a103f
mysql 0.3.0 53f2365e
@@ -158,8 +157,7 @@ virtual-machine 0.8.0 3fa4dd3a
virtual-machine 0.8.1 93c46161
virtual-machine 0.8.2 de19450f
virtual-machine 0.9.0 721c12a7
virtual-machine 0.9.1 93bdf411
virtual-machine 0.10.0 HEAD
virtual-machine 0.9.1 HEAD
vm-disk 0.1.0 d971f2ff
vm-disk 0.1.1 HEAD
vm-instance 0.1.0 1ec10165
@@ -170,7 +168,7 @@ vm-instance 0.4.1 0ab39f20
vm-instance 0.5.0 3fa4dd3a
vm-instance 0.5.1 de19450f
vm-instance 0.6.0 721c12a7
vm-instance 0.7.0 HEAD
vm-instance 0.6.1 HEAD
vpn 0.1.0 263e47be
vpn 0.2.0 53f2365e
vpn 0.3.0 6c5cf5bf

View File

@@ -17,10 +17,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.10.0
version: 0.9.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 0.10.0
appVersion: 0.9.0

View File

@@ -9,4 +9,4 @@ generate:
&& yq -i -o json ".properties.instanceProfile.optional=true | .properties.instanceProfile.enum = $${PREFERENCES}" values.schema.json
yq -i -o json '.properties.externalPorts.items.type = "integer"' values.schema.json
yq -i -o json '.properties.systemDisk.properties.image.enum = ["ubuntu", "cirros", "alpine", "fedora", "talos"]' values.schema.json
yq -i -o json '.properties.externalMethod.enum = ["PortList", "WholeIP"]' values.schema.json
yq -i -o json '.properties.externalMethod.enum = ["WholeIP", "PortList"]' values.schema.json

View File

@@ -39,7 +39,7 @@ virtctl ssh <user>@<vm>
| Name | Description | Value |
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ------------ |
| `external` | Enable external access from outside the cluster | `false` |
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `PortList` |
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
| `running` | Determines if the virtual machine should be running | `true` |
| `instanceType` | Virtual Machine instance type | `u1.medium` |

View File

@@ -27,7 +27,10 @@ spec:
- metadata:
name: {{ include "virtual-machine.fullname" . }}
spec:
storage:
pvc:
volumeMode: Block
accessModes:
- ReadWriteMany
resources:
requests:
storage: {{ .Values.systemDisk.storage | quote }}

View File

@@ -10,10 +10,10 @@
"externalMethod": {
"type": "string",
"description": "specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`",
"default": "PortList",
"default": "WholeIP",
"enum": [
"PortList",
"WholeIP"
"WholeIP",
"PortList"
]
},
"externalPorts": {

View File

@@ -4,7 +4,7 @@
## @param externalMethod specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`
## @param externalPorts [array] Specify ports to forward from outside the cluster
external: false
externalMethod: PortList
externalMethod: WholeIP
externalPorts:
- 22

View File

@@ -17,10 +17,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.7.0
version: 0.6.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 0.7.0
appVersion: 0.6.0

View File

@@ -9,4 +9,4 @@ generate:
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
&& yq -i -o json ".properties.instanceProfile.optional=true | .properties.instanceProfile.enum = $${PREFERENCES}" values.schema.json
yq -i -o json '.properties.externalPorts.items.type = "integer"' values.schema.json
yq -i -o json '.properties.externalMethod.enum = ["PortList", "WholeIP"]' values.schema.json
yq -i -o json '.properties.externalMethod.enum = ["WholeIP", "PortList"]' values.schema.json

View File

@@ -39,7 +39,7 @@ virtctl ssh <user>@<vm>
| Name | Description | Value |
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ----------- |
| `external` | Enable external access from outside the cluster | `false` |
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `PortList` |
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
| `running` | Determines if the virtual machine should be running | `true` |
| `instanceType` | Virtual Machine instance type | `u1.medium` |

View File

@@ -10,10 +10,10 @@
"externalMethod": {
"type": "string",
"description": "specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`",
"default": "PortList",
"default": "WholeIP",
"enum": [
"PortList",
"WholeIP"
"WholeIP",
"PortList"
]
},
"externalPorts": {

View File

@@ -4,7 +4,7 @@
## @param externalMethod specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`
## @param externalPorts [array] Specify ports to forward from outside the cluster
external: false
externalMethod: PortList
externalMethod: WholeIP
externalPorts:
- 22

View File

@@ -1,2 +1,2 @@
cozystack:
image: ghcr.io/cozystack/cozystack/installer:v0.31.0-rc.3@sha256:5fc6b88de670878b66f2b5bf381b89b68253ab3e69ff1cb7359470bc65beb3fa
image: ghcr.io/cozystack/cozystack/installer:v0.31.0-rc.2@sha256:05d812a6ac1df86c614b528e8d171af05c0080545ceee383d6cb043f415a4372

View File

@@ -30,13 +30,8 @@ image-e2e-sandbox:
yq -i '.e2e.image = strenv(IMAGE)' values.yaml
rm -f images/e2e-sandbox.json
test: test-cluster test-apps ## Run the end-to-end tests in existing sandbox
test-cluster: ## Run the end-to-end for creating a cluster
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/cozytest.sh hack/e2e-cluster.bats'
test-apps: ## Run the end-to-end tests for apps
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && hack/cozytest.sh hack/e2e-apps.bats'
test: ## Run the end-to-end tests in existing sandbox.
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/e2e.sh'
delete: ## Remove sandbox from existing Kubernetes cluster.
docker rm -f "${SANDBOX_NAME}" || true
@@ -45,10 +40,5 @@ exec: ## Opens an interactive shell in the sandbox container.
docker exec -ti "${SANDBOX_NAME}" bash
apply: delete
docker run \
-d --rm --name "${SANDBOX_NAME}" --privileged \
-e TALOSCONFIG=/workspace/talosconfig \
-e KUBECONFIG=/workspace/kubeconfig \
"$$(yq .e2e.image values.yaml)" \
sleep infinity
docker run -d --rm --name "${SANDBOX_NAME}" --privileged "$$(yq .e2e.image values.yaml)" sleep infinity
docker cp "${ROOT_DIR}" "${SANDBOX_NAME}":/workspace

View File

@@ -1,2 +1,2 @@
e2e:
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.31.0-rc.3@sha256:8de0a8900994cb55f74ba25d265eeecac9958b07cdb8f86b9284b9f23668d2bb
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.31.0-rc.2@sha256:2d35b2cce2f093c5c3145a08d9981e2bf28cf45a6804117d50bd6345a15ecd1a

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/matchbox:v0.31.0-rc.3@sha256:8b65a160333830bf4711246ae78f26095e3b33667440bf1bbdd36db60a7f92e2
ghcr.io/cozystack/cozystack/matchbox:v0.31.0-rc.2@sha256:78e5a28badd3c804e55e5c1376f12dad77e04b9f6f80637596939ba348f7104b

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:24382d445bf7a39ed988ef4dc7a0d9f084db891fcb5f42fd2e64622710b9457e
ghcr.io/cozystack/cozystack/grafana:1.9.2@sha256:c63978e1ed0304e8518b31ddee56c4e8115541b997d8efbe1c0a74da57140399

View File

@@ -6,3 +6,10 @@ repo:
fix-chartnames:
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
gen-versions-map: fix-chartnames
../../hack/gen_versions_map.sh
check-version-map: gen-versions-map
git diff --exit-code -- versions_map

View File

@@ -0,0 +1 @@
cozy-lib 0.1.0 HEAD

View File

@@ -1 +1 @@
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:4399c240ce1f99660d5d1be9d6d7b3e8157c50e4aba58345d51a1d9ac25779a3
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:a219040fed290492f047818e5c5a864a30112ff418ad4b12b24de9709302427a

View File

@@ -6,7 +6,7 @@ annotations:
fingerprint: 1020CF3C033D4F35BAE1C19E1226061C665DF13E
url: https://cert-manager.io/public-keys/cert-manager-keyring-2021-09-20-1020CF3C033D4F35BAE1C19E1226061C665DF13E.gpg
apiVersion: v2
appVersion: v1.17.2
appVersion: v1.16.3
description: A Helm chart for cert-manager
home: https://cert-manager.io
icon: https://raw.githubusercontent.com/cert-manager/community/4d35a69437d21b76322157e6284be4cd64e6d2b7/logo/logo-small.png
@@ -23,4 +23,4 @@ maintainers:
name: cert-manager
sources:
- https://github.com/cert-manager/cert-manager
version: v1.17.2
version: v1.16.3

View File

@@ -19,7 +19,7 @@ Before installing the chart, you must first install the cert-manager CustomResou
This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources.
```bash
$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.crds.yaml
$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.crds.yaml
```
To install the chart with the release name `cert-manager`:
@@ -29,7 +29,7 @@ To install the chart with the release name `cert-manager`:
$ helm repo add jetstack https://charts.jetstack.io --force-update
## Install the cert-manager helm chart
$ helm install cert-manager --namespace cert-manager --version v1.17.2 jetstack/cert-manager
$ helm install cert-manager --namespace cert-manager --version v1.16.3 jetstack/cert-manager
```
In order to begin issuing certificates, you will need to set up a ClusterIssuer
@@ -65,7 +65,7 @@ If you want to completely uninstall cert-manager from your cluster, you will als
delete the previously installed CustomResourceDefinition resources:
```console
$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.crds.yaml
$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.3/cert-manager.crds.yaml
```
## Configuration
@@ -316,13 +316,7 @@ If not set and create is true, a name is generated using the fullname template.
#### **serviceAccount.annotations** ~ `object`
Optional additional annotations to add to the controller's Service Account. Templates are allowed for both keys and values.
Example using templating:
```yaml
annotations:
"{{ .Chart.Name }}-helm-chart/version": "{{ .Chart.Version }}"
```
Optional additional annotations to add to the controller's Service Account.
#### **serviceAccount.labels** ~ `object`
@@ -370,24 +364,17 @@ config:
kubernetesAPIQPS: 9000
kubernetesAPIBurst: 9000
numberOfConcurrentWorkers: 200
enableGatewayAPI: true
# Feature gates as of v1.17.0. Listed with their default values.
# See https://cert-manager.io/docs/cli/controller/
featureGates:
AdditionalCertificateOutputFormats: true # BETA - default=true
AllAlpha: false # ALPHA - default=false
AllBeta: false # BETA - default=false
ExperimentalCertificateSigningRequestControllers: false # ALPHA - default=false
ExperimentalGatewayAPISupport: true # BETA - default=true
LiteralCertificateSubject: true # BETA - default=true
NameConstraints: true # BETA - default=true
OtherNames: false # ALPHA - default=false
SecretsFilteredCaching: true # BETA - default=true
ServerSideApply: false # ALPHA - default=false
StableCertificateRequestName: true # BETA - default=true
UseCertificateRequestBasicConstraints: false # ALPHA - default=false
UseDomainQualifiedFinalizer: true # BETA - default=false
ValidateCAA: false # ALPHA - default=false
AdditionalCertificateOutputFormats: true
DisallowInsecureCSRUsageDefinition: true
ExperimentalCertificateSigningRequestControllers: true
ExperimentalGatewayAPISupport: true
LiteralCertificateSubject: true
SecretsFilteredCaching: true
ServerSideApply: true
StableCertificateRequestName: true
UseCertificateRequestBasicConstraints: true
ValidateCAA: true
# Configure the metrics server for TLS
# See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls
metricsTLSConfig:

View File

@@ -53,12 +53,6 @@ spec:
prometheus.io/port: '9402'
{{- end }}
spec:
{{- if not .Values.cainjector.serviceAccount.create }}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
serviceAccountName: {{ template "cainjector.serviceAccountName" . }}
{{- if hasKey .Values.cainjector "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.cainjector.automountServiceAccountToken }}

View File

@@ -1,4 +1,3 @@
{{- if .Values.cainjector.enabled }}
{{- if and .Values.prometheus.enabled (not .Values.prometheus.podmonitor.enabled) }}
apiVersion: v1
kind: Service
@@ -29,4 +28,3 @@ spec:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- end }}
{{- end }}

View File

@@ -514,6 +514,7 @@ spec:
type: object
required:
- create
- passwordSecretRef
properties:
alias:
description: |-
@@ -525,25 +526,17 @@ spec:
Create enables JKS keystore creation for the Certificate.
If true, a file named `keystore.jks` will be created in the target
Secret resource, encrypted using the password stored in
`passwordSecretRef` or `password`.
`passwordSecretRef`.
The keystore file will be updated immediately.
If the issuer provided a CA certificate, a file named `truststore.jks`
will also be created in the target Secret resource, encrypted using the
password stored in `passwordSecretRef`
containing the issuing Certificate Authority
type: boolean
password:
description: |-
Password provides a literal password used to encrypt the JKS keystore.
Mutually exclusive with passwordSecretRef.
One of password or passwordSecretRef must provide a password with a non-zero length.
type: string
passwordSecretRef:
description: |-
PasswordSecretRef is a reference to a non-empty key in a Secret resource
PasswordSecretRef is a reference to a key in a Secret resource
containing the password used to encrypt the JKS keystore.
Mutually exclusive with password.
One of password or passwordSecretRef must provide a password with a non-zero length.
type: object
required:
- name
@@ -566,31 +559,24 @@ spec:
type: object
required:
- create
- passwordSecretRef
properties:
create:
description: |-
Create enables PKCS12 keystore creation for the Certificate.
If true, a file named `keystore.p12` will be created in the target
Secret resource, encrypted using the password stored in
`passwordSecretRef` or in `password`.
`passwordSecretRef`.
The keystore file will be updated immediately.
If the issuer provided a CA certificate, a file named `truststore.p12` will
also be created in the target Secret resource, encrypted using the
password stored in `passwordSecretRef` containing the issuing Certificate
Authority
type: boolean
password:
description: |-
Password provides a literal password used to encrypt the PKCS#12 keystore.
Mutually exclusive with passwordSecretRef.
One of password or passwordSecretRef must provide a password with a non-zero length.
type: string
passwordSecretRef:
description: |-
PasswordSecretRef is a reference to a non-empty key in a Secret resource
containing the password used to encrypt the PKCS#12 keystore.
Mutually exclusive with password.
One of password or passwordSecretRef must provide a password with a non-zero length.
PasswordSecretRef is a reference to a key in a Secret resource
containing the password used to encrypt the PKCS12 keystore.
type: object
required:
- name
@@ -1390,9 +1376,6 @@ spec:
resource ID of the managed identity, can not be used at the same time as clientID
Cannot be used for Azure Managed Service Identity
type: string
tenantID:
description: tenant ID of the managed identity, can not be used at the same time as resourceID
type: string
resourceGroupName:
description: resource group the DNS zone is located in
type: string
@@ -4706,9 +4689,6 @@ spec:
resource ID of the managed identity, can not be used at the same time as clientID
Cannot be used for Azure Managed Service Identity
type: string
tenantID:
description: tenant ID of the managed identity, can not be used at the same time as resourceID
type: string
resourceGroupName:
description: resource group the DNS zone is located in
type: string
@@ -8435,9 +8415,6 @@ spec:
resource ID of the managed identity, can not be used at the same time as clientID
Cannot be used for Azure Managed Service Identity
type: string
tenantID:
description: tenant ID of the managed identity, can not be used at the same time as resourceID
type: string
resourceGroupName:
description: resource group the DNS zone is located in
type: string

View File

@@ -52,12 +52,6 @@ spec:
prometheus.io/port: '9402'
{{- end }}
spec:
{{- if not .Values.serviceAccount.create }}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
serviceAccountName: {{ template "cert-manager.serviceAccountName" . }}
{{- if hasKey .Values "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}

View File

@@ -11,9 +11,7 @@ metadata:
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- range $k, $v := . }}
{{- printf "%s: %s" (tpl $k $) (tpl $v $) | nindent 4 }}
{{- end }}
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app: {{ include "cert-manager.name" . }}

View File

@@ -52,12 +52,6 @@ spec:
prometheus.io/port: '9402'
{{- end }}
spec:
{{- if not .Values.webhook.serviceAccount.create }}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
serviceAccountName: {{ template "webhook.serviceAccountName" . }}
{{- if hasKey .Values.webhook "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.webhook.automountServiceAccountToken }}

View File

@@ -579,7 +579,7 @@
},
"helm-values.config": {
"default": {},
"description": "This property is used to configure options for the controller pod. This allows setting options that would usually be provided using flags.\n\nIf `apiVersion` and `kind` are unspecified they default to the current latest version (currently `controller.config.cert-manager.io/v1alpha1`). You can pin the version by specifying the `apiVersion` yourself.\n\nFor example:\nconfig:\n apiVersion: controller.config.cert-manager.io/v1alpha1\n kind: ControllerConfiguration\n logging:\n verbosity: 2\n format: text\n leaderElectionConfig:\n namespace: kube-system\n kubernetesAPIQPS: 9000\n kubernetesAPIBurst: 9000\n numberOfConcurrentWorkers: 200\n enableGatewayAPI: true\n # Feature gates as of v1.17.0. Listed with their default values.\n # See https://cert-manager.io/docs/cli/controller/\n featureGates:\n AdditionalCertificateOutputFormats: true # BETA - default=true\n AllAlpha: false # ALPHA - default=false\n AllBeta: false # BETA - default=false\n ExperimentalCertificateSigningRequestControllers: false # ALPHA - default=false\n ExperimentalGatewayAPISupport: true # BETA - default=true\n LiteralCertificateSubject: true # BETA - default=true\n NameConstraints: true # BETA - default=true\n OtherNames: false # ALPHA - default=false\n SecretsFilteredCaching: true # BETA - default=true\n ServerSideApply: false # ALPHA - default=false\n StableCertificateRequestName: true # BETA - default=true\n UseCertificateRequestBasicConstraints: false # ALPHA - default=false\n UseDomainQualifiedFinalizer: true # BETA - default=false\n ValidateCAA: false # ALPHA - default=false\n # Configure the metrics server for TLS\n # See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls\n metricsTLSConfig:\n dynamic:\n secretNamespace: \"cert-manager\"\n secretName: \"cert-manager-metrics-ca\"\n dnsNames:\n - cert-manager-metrics",
"description": "This property is used to configure options for the controller pod. This allows setting options that would usually be provided using flags.\n\nIf `apiVersion` and `kind` are unspecified they default to the current latest version (currently `controller.config.cert-manager.io/v1alpha1`). You can pin the version by specifying the `apiVersion` yourself.\n\nFor example:\nconfig:\n apiVersion: controller.config.cert-manager.io/v1alpha1\n kind: ControllerConfiguration\n logging:\n verbosity: 2\n format: text\n leaderElectionConfig:\n namespace: kube-system\n kubernetesAPIQPS: 9000\n kubernetesAPIBurst: 9000\n numberOfConcurrentWorkers: 200\n featureGates:\n AdditionalCertificateOutputFormats: true\n DisallowInsecureCSRUsageDefinition: true\n ExperimentalCertificateSigningRequestControllers: true\n ExperimentalGatewayAPISupport: true\n LiteralCertificateSubject: true\n SecretsFilteredCaching: true\n ServerSideApply: true\n StableCertificateRequestName: true\n UseCertificateRequestBasicConstraints: true\n ValidateCAA: true\n # Configure the metrics server for TLS\n # See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls\n metricsTLSConfig:\n dynamic:\n secretNamespace: \"cert-manager\"\n secretName: \"cert-manager-metrics-ca\"\n dnsNames:\n - cert-manager-metrics",
"type": "object"
},
"helm-values.containerSecurityContext": {
@@ -1223,7 +1223,7 @@
"type": "object"
},
"helm-values.serviceAccount.annotations": {
"description": "Optional additional annotations to add to the controller's Service Account. Templates are allowed for both keys and values.\nExample using templating:\nannotations:\n \"{{ .Chart.Name }}-helm-chart/version\": \"{{ .Chart.Version }}\"",
"description": "Optional additional annotations to add to the controller's Service Account.",
"type": "object"
},
"helm-values.serviceAccount.automountServiceAccountToken": {

View File

@@ -190,10 +190,7 @@ serviceAccount:
# +docs:property
# name: ""
# Optional additional annotations to add to the controller's Service Account. Templates are allowed for both keys and values.
# Example using templating:
# annotations:
# "{{ .Chart.Name }}-helm-chart/version": "{{ .Chart.Version }}"
# Optional additional annotations to add to the controller's Service Account.
# +docs:property
# annotations: {}
@@ -230,24 +227,17 @@ enableCertificateOwnerRef: false
# kubernetesAPIQPS: 9000
# kubernetesAPIBurst: 9000
# numberOfConcurrentWorkers: 200
# enableGatewayAPI: true
# # Feature gates as of v1.17.0. Listed with their default values.
# # See https://cert-manager.io/docs/cli/controller/
# featureGates:
# AdditionalCertificateOutputFormats: true # BETA - default=true
# AllAlpha: false # ALPHA - default=false
# AllBeta: false # BETA - default=false
# ExperimentalCertificateSigningRequestControllers: false # ALPHA - default=false
# ExperimentalGatewayAPISupport: true # BETA - default=true
# LiteralCertificateSubject: true # BETA - default=true
# NameConstraints: true # BETA - default=true
# OtherNames: false # ALPHA - default=false
# SecretsFilteredCaching: true # BETA - default=true
# ServerSideApply: false # ALPHA - default=false
# StableCertificateRequestName: true # BETA - default=true
# UseCertificateRequestBasicConstraints: false # ALPHA - default=false
# UseDomainQualifiedFinalizer: true # BETA - default=false
# ValidateCAA: false # ALPHA - default=false
# AdditionalCertificateOutputFormats: true
# DisallowInsecureCSRUsageDefinition: true
# ExperimentalCertificateSigningRequestControllers: true
# ExperimentalGatewayAPISupport: true
# LiteralCertificateSubject: true
# SecretsFilteredCaching: true
# ServerSideApply: true
# StableCertificateRequestName: true
# UseCertificateRequestBasicConstraints: true
# ValidateCAA: true
# # Configure the metrics server for TLS
# # See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls
# metricsTLSConfig:

View File

@@ -1,2 +1,2 @@
cozystackAPI:
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.31.0-rc.3@sha256:9940cffabedb510397e3c330887aee724c4d232c011df60f4c16891fcfe1d9bf
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.31.0-rc.2@sha256:d3794a5ebd49ee28ef7108213d3bb5053f5247ef62855f4731c7cafb8059a635

View File

@@ -1,5 +1,5 @@
cozystackController:
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.31.0-rc.3@sha256:b2f0de3ae2d7f15956eb7cdec78d2267aeba7e56a7781c70473757df4989a05a
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.31.0-rc.2@sha256:fb7cfdf62a128103954f0cb711b2d21650c0d2f7ff639d41a56f68d454a1e4ea
debug: false
disableTelemetry: false
cozystackVersion: "v0.31.0-rc.3"
cozystackVersion: "v0.31.0-rc.2"

View File

@@ -76,7 +76,7 @@ data:
"kubeappsNamespace": {{ .Release.Namespace | quote }},
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
"appVersion": "v0.31.0-rc.3",
"appVersion": "v0.31.0-rc.2",
"authProxyEnabled": {{ .Values.authProxy.enabled }},
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},

View File

@@ -19,7 +19,7 @@ kubeapps:
image:
registry: ghcr.io/cozystack/cozystack
repository: dashboard
tag: v0.31.0-rc.3
tag: v0.31.0-rc.2
digest: "sha256:a83fe4654f547469cfa469a02bda1273c54bca103a41eb007fdb2e18a7a91e93"
redis:
master:
@@ -35,8 +35,8 @@ kubeapps:
image:
registry: ghcr.io/cozystack/cozystack
repository: kubeapps-apis
tag: v0.31.0-rc.3
digest: "sha256:1447c10fcc9a8de426ec381bce565aa56267d0c9f3bab8fe26ac502d433283c5"
tag: v0.31.0-rc.2
digest: "sha256:db4f33e9ca6969459c9baf0131c2c342cb6c366df16df7361e7cbdeb4a854cea"
pluginConfig:
flux:
packages:

View File

@@ -3,7 +3,7 @@ kamaji:
deploy: false
image:
pullPolicy: IfNotPresent
tag: v0.31.0-rc.3@sha256:5f828637ebd1717a5c2b828352fff7fc14c218c7bbfc2cb2ce55737f9b5bf500
tag: v0.31.0-rc.2@sha256:beb066f5c45cda520e5028222ec26a5e39c2c3c63bc9016e8a6ec49a2379e00c
repository: ghcr.io/cozystack/cozystack/kamaji
resources:
limits:

View File

@@ -1,3 +1,3 @@
portSecurity: true
routes: ""
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.31.0-rc.3@sha256:f3acc1c6dd87cebd76be5afe1789c19780cb24f9518c8bdafa46f823ae4ba46e
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.31.0-rc.2@sha256:f9b464a94bd1a1fa116bbf77c4bbece3931d03dac1489eb820f94d98176ed5c9

View File

@@ -16,8 +16,6 @@ image-controller image-speaker:
$(eval VERSION := $(shell yq '.appVersion' charts/metallb/Chart.yaml))
docker buildx build images/metallb \
--provenance false \
--builder=$(BUILDER) \
--platform=$(PLATFORM) \
--target $(TARGET) \
--build-arg VERSION=$(VERSION) \
--tag $(REGISTRY)/metallb-$(TARGET):$(VERSION) \
@@ -25,8 +23,8 @@ image-controller image-speaker:
--cache-to type=inline \
--metadata-file images/$(TARGET).json \
--push=$(PUSH) \
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
--load=$(LOAD)
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack"
--load=1
REPOSITORY="$(REGISTRY)/metallb-$(TARGET)" \
yq -i '.metallb.$(TARGET).image.repository = strenv(REPOSITORY)' values.yaml
TAG=$(VERSION)@$$(yq e '."containerimage.digest"' images/$(TARGET).json -o json -r) \