mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-04-05 07:05:39 +00:00
Compare commits
1 Commits
kubernetes
...
bats
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
79ef4f7399 |
2
Makefile
2
Makefile
@@ -29,10 +29,8 @@ build: build-deps
|
|||||||
|
|
||||||
repos:
|
repos:
|
||||||
rm -rf _out
|
rm -rf _out
|
||||||
make -C packages/library check-version-map
|
|
||||||
make -C packages/apps check-version-map
|
make -C packages/apps check-version-map
|
||||||
make -C packages/extra check-version-map
|
make -C packages/extra check-version-map
|
||||||
make -C packages/library repo
|
|
||||||
make -C packages/system repo
|
make -C packages/system repo
|
||||||
make -C packages/apps repo
|
make -C packages/apps repo
|
||||||
make -C packages/extra repo
|
make -C packages/extra repo
|
||||||
|
|||||||
395
hack/e2e.bats
Executable file
395
hack/e2e.bats
Executable file
@@ -0,0 +1,395 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Cozystack end‑to‑end provisioning test (Bats)
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
export TALOSCONFIG=$PWD/talosconfig
|
||||||
|
export KUBECONFIG=$PWD/kubeconfig
|
||||||
|
|
||||||
|
# Runs before each @test
|
||||||
|
setup() {
|
||||||
|
[ ! -f ${BATS_RUN_TMPDIR}/.skip ] || skip "skip remaining tests" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Runs after each @test
|
||||||
|
teardown() {
|
||||||
|
[ -n "$BATS_TEST_COMPLETED" ] || touch ${BATS_RUN_TMPDIR}/.skip
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Environment variable COZYSTACK_INSTALLER_YAML is defined" {
|
||||||
|
if [ -z "${COZYSTACK_INSTALLER_YAML:-}" ]; then
|
||||||
|
echo 'COZYSTACK_INSTALLER_YAML environment variable is not set!' >&2
|
||||||
|
echo >&2
|
||||||
|
echo 'Please export it with the following command:' >&2
|
||||||
|
echo ' export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "IPv4 forwarding is enabled" {
|
||||||
|
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||||
|
echo "IPv4 forwarding is disabled!" >&2
|
||||||
|
echo >&2
|
||||||
|
echo "Enable it with:" >&2
|
||||||
|
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Clean previous VMs" {
|
||||||
|
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||||
|
rm -rf srv1 srv2 srv3
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare networking and masquerading" {
|
||||||
|
ip link del cozy-br0 2>/dev/null || true
|
||||||
|
ip link add cozy-br0 type bridge
|
||||||
|
ip link set cozy-br0 up
|
||||||
|
ip address add 192.168.123.1/24 dev cozy-br0
|
||||||
|
|
||||||
|
# Masquerading rule – idempotent (delete first, then add)
|
||||||
|
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||||
|
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare cloud‑init drive for VMs" {
|
||||||
|
mkdir -p srv1 srv2 srv3
|
||||||
|
|
||||||
|
# Generate cloud‑init ISOs
|
||||||
|
for i in 1 2 3; do
|
||||||
|
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||||
|
|
||||||
|
cat > "srv${i}/user-data" <<'EOF'
|
||||||
|
#cloud-config
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > "srv${i}/network-config" <<EOF
|
||||||
|
version: 2
|
||||||
|
ethernets:
|
||||||
|
eth0:
|
||||||
|
dhcp4: false
|
||||||
|
addresses:
|
||||||
|
- "192.168.123.1${i}/26"
|
||||||
|
gateway4: "192.168.123.1"
|
||||||
|
nameservers:
|
||||||
|
search: [cluster.local]
|
||||||
|
addresses: [8.8.8.8]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
( cd "srv${i}" && genisoimage \
|
||||||
|
-output seed.img \
|
||||||
|
-volid cidata -rational-rock -joliet \
|
||||||
|
user-data meta-data network-config )
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Download Talos NoCloud image" {
|
||||||
|
if [ ! -f nocloud-amd64.raw ]; then
|
||||||
|
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
|
||||||
|
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
||||||
|
rm -f nocloud-amd64.raw
|
||||||
|
xz --decompress nocloud-amd64.raw.xz
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare VM disks" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
cp nocloud-amd64.raw srv${i}/system.img
|
||||||
|
qemu-img resize srv${i}/system.img 20G
|
||||||
|
qemu-img create srv${i}/data.img 100G
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create tap devices" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
ip link del cozy-srv${i} 2>/dev/null || true
|
||||||
|
ip tuntap add dev cozy-srv${i} mode tap
|
||||||
|
ip link set cozy-srv${i} up
|
||||||
|
ip link set cozy-srv${i} master cozy-br0
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Boot QEMU VMs" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
||||||
|
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||||
|
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||||
|
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||||
|
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||||
|
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||||
|
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||||
|
done
|
||||||
|
|
||||||
|
# Give qemu a few seconds to start up networking
|
||||||
|
sleep 5
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||||
|
timeout 60 bash -c 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Generate Talos cluster configuration" {
|
||||||
|
# Cluster‑wide patches
|
||||||
|
cat > patch.yaml <<'EOF'
|
||||||
|
machine:
|
||||||
|
kubelet:
|
||||||
|
nodeIP:
|
||||||
|
validSubnets:
|
||||||
|
- 192.168.123.0/24
|
||||||
|
extraConfig:
|
||||||
|
maxPods: 512
|
||||||
|
kernel:
|
||||||
|
modules:
|
||||||
|
- name: openvswitch
|
||||||
|
- name: drbd
|
||||||
|
parameters:
|
||||||
|
- usermode_helper=disabled
|
||||||
|
- name: zfs
|
||||||
|
- name: spl
|
||||||
|
registries:
|
||||||
|
mirrors:
|
||||||
|
docker.io:
|
||||||
|
endpoints:
|
||||||
|
- https://mirror.gcr.io
|
||||||
|
files:
|
||||||
|
- content: |
|
||||||
|
[plugins]
|
||||||
|
[plugins."io.containerd.cri.v1.runtime"]
|
||||||
|
device_ownership_from_security_context = true
|
||||||
|
path: /etc/cri/conf.d/20-customization.part
|
||||||
|
op: create
|
||||||
|
|
||||||
|
cluster:
|
||||||
|
apiServer:
|
||||||
|
extraArgs:
|
||||||
|
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||||
|
oidc-client-id: "kubernetes"
|
||||||
|
oidc-username-claim: "preferred_username"
|
||||||
|
oidc-groups-claim: "groups"
|
||||||
|
network:
|
||||||
|
cni:
|
||||||
|
name: none
|
||||||
|
dnsDomain: cozy.local
|
||||||
|
podSubnets:
|
||||||
|
- 10.244.0.0/16
|
||||||
|
serviceSubnets:
|
||||||
|
- 10.96.0.0/16
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Control‑plane‑only patches
|
||||||
|
cat > patch-controlplane.yaml <<'EOF'
|
||||||
|
machine:
|
||||||
|
nodeLabels:
|
||||||
|
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||||
|
$patch: delete
|
||||||
|
network:
|
||||||
|
interfaces:
|
||||||
|
- interface: eth0
|
||||||
|
vip:
|
||||||
|
ip: 192.168.123.10
|
||||||
|
cluster:
|
||||||
|
allowSchedulingOnControlPlanes: true
|
||||||
|
controllerManager:
|
||||||
|
extraArgs:
|
||||||
|
bind-address: 0.0.0.0
|
||||||
|
scheduler:
|
||||||
|
extraArgs:
|
||||||
|
bind-address: 0.0.0.0
|
||||||
|
apiServer:
|
||||||
|
certSANs:
|
||||||
|
- 127.0.0.1
|
||||||
|
proxy:
|
||||||
|
disabled: true
|
||||||
|
discovery:
|
||||||
|
enabled: false
|
||||||
|
etcd:
|
||||||
|
advertisedSubnets:
|
||||||
|
- 192.168.123.0/24
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Generate secrets once
|
||||||
|
if [ ! -f secrets.yaml ]; then
|
||||||
|
talosctl gen secrets
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||||
|
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||||
|
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Apply Talos configuration to the node" {
|
||||||
|
# Apply the configuration to all three nodes
|
||||||
|
for node in 11 12 13; do
|
||||||
|
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for Talos services to come up again
|
||||||
|
timeout 60 bash -c 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Bootstrap Talos cluster" {
|
||||||
|
# Bootstrap etcd on the first node
|
||||||
|
timeout 10 bash -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||||
|
|
||||||
|
# Wait until etcd is healthy
|
||||||
|
timeout 180 bash -c 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
timeout 60 bash -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||||
|
|
||||||
|
# Retrieve kubeconfig
|
||||||
|
rm -f kubeconfig
|
||||||
|
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||||
|
|
||||||
|
# Wait until all three nodes register in Kubernetes
|
||||||
|
timeout 60 bash -c 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Install Cozystack" {
|
||||||
|
# Create namespace & configmap required by installer
|
||||||
|
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
kubectl create configmap cozystack -n cozy-system \
|
||||||
|
--from-literal=bundle-name=paas-full \
|
||||||
|
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||||
|
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||||
|
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||||
|
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||||
|
--from-literal=root-host=example.org \
|
||||||
|
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||||
|
--dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
|
||||||
|
# Apply installer manifests from env variable
|
||||||
|
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
|
||||||
|
|
||||||
|
# Wait for the installer deployment to become available
|
||||||
|
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||||
|
|
||||||
|
# Wait until HelmReleases appear & reconcile them
|
||||||
|
timeout 60 bash -c 'until kubectl get hr -A | grep -q cozys; do sleep 1; done'
|
||||||
|
sleep 5
|
||||||
|
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | bash -x
|
||||||
|
|
||||||
|
# Fail the test if any HelmRelease is not Ready
|
||||||
|
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||||
|
kubectl get hr -A
|
||||||
|
fail "Some HelmReleases failed to reconcile"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for Cluster‑API provider deployments" {
|
||||||
|
# Wait for Cluster‑API provider deployments
|
||||||
|
timeout 60 bash -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for LINSTOR and configure storage" {
|
||||||
|
# Linstor controller and nodes
|
||||||
|
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||||
|
timeout 60 bash -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||||
|
|
||||||
|
for node in srv1 srv2 srv3; do
|
||||||
|
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||||
|
done
|
||||||
|
|
||||||
|
# Storage classes
|
||||||
|
kubectl apply -f - <<'EOF'
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: local
|
||||||
|
annotations:
|
||||||
|
storageclass.kubernetes.io/is-default-class: "true"
|
||||||
|
provisioner: linstor.csi.linbit.com
|
||||||
|
parameters:
|
||||||
|
linstor.csi.linbit.com/storagePool: "data"
|
||||||
|
linstor.csi.linbit.com/layerList: "storage"
|
||||||
|
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: replicated
|
||||||
|
provisioner: linstor.csi.linbit.com
|
||||||
|
parameters:
|
||||||
|
linstor.csi.linbit.com/storagePool: "data"
|
||||||
|
linstor.csi.linbit.com/autoPlace: "3"
|
||||||
|
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||||
|
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for MetalLB and configure address pool" {
|
||||||
|
# MetalLB address pool
|
||||||
|
kubectl apply -f - <<'EOF'
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: L2Advertisement
|
||||||
|
metadata:
|
||||||
|
name: cozystack
|
||||||
|
namespace: cozy-metallb
|
||||||
|
spec:
|
||||||
|
ipAddressPools: [cozystack]
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: IPAddressPool
|
||||||
|
metadata:
|
||||||
|
name: cozystack
|
||||||
|
namespace: cozy-metallb
|
||||||
|
spec:
|
||||||
|
addresses: [192.168.123.200-192.168.123.250]
|
||||||
|
autoAssign: true
|
||||||
|
avoidBuggyIPs: false
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Check Cozystack API service" {
|
||||||
|
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Configure Tenant and wait for applications" {
|
||||||
|
# Patch root tenant and wait for its releases
|
||||||
|
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||||
|
|
||||||
|
timeout 60 bash -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||||
|
|
||||||
|
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||||
|
flux reconcile hr monitoring -n tenant-root --force
|
||||||
|
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Expose Cozystack services through ingress
|
||||||
|
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||||
|
|
||||||
|
# NGINX ingress controller
|
||||||
|
timeout 60 bash -c 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||||
|
|
||||||
|
# etcd statefulset
|
||||||
|
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||||
|
|
||||||
|
# VictoriaMetrics components
|
||||||
|
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||||
|
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||||
|
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||||
|
|
||||||
|
# Grafana
|
||||||
|
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||||
|
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||||
|
|
||||||
|
# Verify Grafana via ingress
|
||||||
|
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||||
|
curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' | grep -q Found
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Keycloak OIDC stack is healthy" {
|
||||||
|
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||||
|
|
||||||
|
timeout 120 bash -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||||
|
}
|
||||||
370
hack/e2e.sh
370
hack/e2e.sh
@@ -1,370 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
|
|
||||||
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
|
|
||||||
echo 'please set it with following command:' >&2
|
|
||||||
echo >&2
|
|
||||||
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
|
|
||||||
echo >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
|
||||||
echo "IPv4 forwarding is not enabled!" >&2
|
|
||||||
echo 'please enable forwarding with the following command:' >&2
|
|
||||||
echo >&2
|
|
||||||
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
|
|
||||||
echo >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -x
|
|
||||||
set -e
|
|
||||||
|
|
||||||
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
|
|
||||||
|
|
||||||
ip link del cozy-br0 || true
|
|
||||||
ip link add cozy-br0 type bridge
|
|
||||||
ip link set cozy-br0 up
|
|
||||||
ip addr add 192.168.123.1/24 dev cozy-br0
|
|
||||||
|
|
||||||
# Enable masquerading
|
|
||||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
|
||||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
|
||||||
|
|
||||||
rm -rf srv1 srv2 srv3
|
|
||||||
mkdir -p srv1 srv2 srv3
|
|
||||||
|
|
||||||
# Prepare cloud-init
|
|
||||||
for i in 1 2 3; do
|
|
||||||
echo "hostname: srv$i" > "srv$i/meta-data"
|
|
||||||
echo '#cloud-config' > "srv$i/user-data"
|
|
||||||
cat > "srv$i/network-config" <<EOT
|
|
||||||
version: 2
|
|
||||||
ethernets:
|
|
||||||
eth0:
|
|
||||||
dhcp4: false
|
|
||||||
addresses:
|
|
||||||
- "192.168.123.1$i/26"
|
|
||||||
gateway4: "192.168.123.1"
|
|
||||||
nameservers:
|
|
||||||
search: [cluster.local]
|
|
||||||
addresses: [8.8.8.8]
|
|
||||||
EOT
|
|
||||||
|
|
||||||
( cd srv$i && genisoimage \
|
|
||||||
-output seed.img \
|
|
||||||
-volid cidata -rational-rock -joliet \
|
|
||||||
user-data meta-data network-config
|
|
||||||
)
|
|
||||||
done
|
|
||||||
|
|
||||||
# Prepare system drive
|
|
||||||
if [ ! -f nocloud-amd64.raw ]; then
|
|
||||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz \
|
|
||||||
-O nocloud-amd64.raw.xz --show-progress --output-file /dev/stdout --progress=dot:giga 2>/dev/null
|
|
||||||
rm -f nocloud-amd64.raw
|
|
||||||
xz --decompress nocloud-amd64.raw.xz
|
|
||||||
fi
|
|
||||||
for i in 1 2 3; do
|
|
||||||
cp nocloud-amd64.raw srv$i/system.img
|
|
||||||
qemu-img resize srv$i/system.img 20G
|
|
||||||
done
|
|
||||||
|
|
||||||
# Prepare data drives
|
|
||||||
for i in 1 2 3; do
|
|
||||||
qemu-img create srv$i/data.img 100G
|
|
||||||
done
|
|
||||||
|
|
||||||
# Prepare networking
|
|
||||||
for i in 1 2 3; do
|
|
||||||
ip link del cozy-srv$i || true
|
|
||||||
ip tuntap add dev cozy-srv$i mode tap
|
|
||||||
ip link set cozy-srv$i up
|
|
||||||
ip link set cozy-srv$i master cozy-br0
|
|
||||||
done
|
|
||||||
|
|
||||||
# Start VMs
|
|
||||||
for i in 1 2 3; do
|
|
||||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
|
||||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i \
|
|
||||||
-netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
|
||||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
|
||||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
|
||||||
-drive file=srv$i/data.img,if=virtio,format=raw \
|
|
||||||
-display none -daemonize -pidfile srv$i/qemu.pid
|
|
||||||
done
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# Wait for VM to start up
|
|
||||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
|
||||||
|
|
||||||
cat > patch.yaml <<\EOT
|
|
||||||
machine:
|
|
||||||
kubelet:
|
|
||||||
nodeIP:
|
|
||||||
validSubnets:
|
|
||||||
- 192.168.123.0/24
|
|
||||||
extraConfig:
|
|
||||||
maxPods: 512
|
|
||||||
kernel:
|
|
||||||
modules:
|
|
||||||
- name: openvswitch
|
|
||||||
- name: drbd
|
|
||||||
parameters:
|
|
||||||
- usermode_helper=disabled
|
|
||||||
- name: zfs
|
|
||||||
- name: spl
|
|
||||||
registries:
|
|
||||||
mirrors:
|
|
||||||
docker.io:
|
|
||||||
endpoints:
|
|
||||||
- https://mirror.gcr.io
|
|
||||||
files:
|
|
||||||
- content: |
|
|
||||||
[plugins]
|
|
||||||
[plugins."io.containerd.cri.v1.runtime"]
|
|
||||||
device_ownership_from_security_context = true
|
|
||||||
path: /etc/cri/conf.d/20-customization.part
|
|
||||||
op: create
|
|
||||||
|
|
||||||
cluster:
|
|
||||||
apiServer:
|
|
||||||
extraArgs:
|
|
||||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
|
||||||
oidc-client-id: "kubernetes"
|
|
||||||
oidc-username-claim: "preferred_username"
|
|
||||||
oidc-groups-claim: "groups"
|
|
||||||
network:
|
|
||||||
cni:
|
|
||||||
name: none
|
|
||||||
dnsDomain: cozy.local
|
|
||||||
podSubnets:
|
|
||||||
- 10.244.0.0/16
|
|
||||||
serviceSubnets:
|
|
||||||
- 10.96.0.0/16
|
|
||||||
EOT
|
|
||||||
|
|
||||||
cat > patch-controlplane.yaml <<\EOT
|
|
||||||
machine:
|
|
||||||
nodeLabels:
|
|
||||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
|
||||||
$patch: delete
|
|
||||||
network:
|
|
||||||
interfaces:
|
|
||||||
- interface: eth0
|
|
||||||
vip:
|
|
||||||
ip: 192.168.123.10
|
|
||||||
cluster:
|
|
||||||
allowSchedulingOnControlPlanes: true
|
|
||||||
controllerManager:
|
|
||||||
extraArgs:
|
|
||||||
bind-address: 0.0.0.0
|
|
||||||
scheduler:
|
|
||||||
extraArgs:
|
|
||||||
bind-address: 0.0.0.0
|
|
||||||
apiServer:
|
|
||||||
certSANs:
|
|
||||||
- 127.0.0.1
|
|
||||||
proxy:
|
|
||||||
disabled: true
|
|
||||||
discovery:
|
|
||||||
enabled: false
|
|
||||||
etcd:
|
|
||||||
advertisedSubnets:
|
|
||||||
- 192.168.123.0/24
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# Gen configuration
|
|
||||||
if [ ! -f secrets.yaml ]; then
|
|
||||||
talosctl gen secrets
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
|
||||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
|
||||||
export TALOSCONFIG=$PWD/talosconfig
|
|
||||||
|
|
||||||
# Apply configuration
|
|
||||||
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
|
|
||||||
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
|
|
||||||
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
|
|
||||||
|
|
||||||
# Wait for VM to be configured
|
|
||||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
|
||||||
|
|
||||||
# Bootstrap
|
|
||||||
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
|
||||||
|
|
||||||
# Wait for etcd
|
|
||||||
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
|
|
||||||
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
|
||||||
|
|
||||||
rm -f kubeconfig
|
|
||||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
|
||||||
export KUBECONFIG=$PWD/kubeconfig
|
|
||||||
|
|
||||||
# Wait for kubernetes nodes appear
|
|
||||||
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
|
||||||
kubectl create ns cozy-system -o yaml | kubectl apply -f -
|
|
||||||
kubectl create -f - <<\EOT
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-system
|
|
||||||
data:
|
|
||||||
bundle-name: "paas-full"
|
|
||||||
ipv4-pod-cidr: "10.244.0.0/16"
|
|
||||||
ipv4-pod-gateway: "10.244.0.1"
|
|
||||||
ipv4-svc-cidr: "10.96.0.0/16"
|
|
||||||
ipv4-join-cidr: "100.64.0.0/16"
|
|
||||||
root-host: example.org
|
|
||||||
api-server-endpoint: https://192.168.123.10:6443
|
|
||||||
EOT
|
|
||||||
|
|
||||||
#
|
|
||||||
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
|
|
||||||
|
|
||||||
# wait for cozystack pod to start
|
|
||||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
|
|
||||||
|
|
||||||
# wait for helmreleases appear
|
|
||||||
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# Wait for all HelmReleases to be installed
|
|
||||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
|
||||||
|
|
||||||
failed_hrs=$(kubectl get hr -A | grep -v True)
|
|
||||||
if [ -n "$(echo "$failed_hrs" | grep -v NAME)" ]; then
|
|
||||||
printf 'Failed HelmReleases:\n%s\n' "$failed_hrs" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Wait for Cluster-API providers
|
|
||||||
timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
|
||||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
|
||||||
|
|
||||||
# Wait for linstor controller
|
|
||||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
|
||||||
|
|
||||||
# Wait for all linstor nodes become Online
|
|
||||||
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
|
|
||||||
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
|
|
||||||
|
|
||||||
kubectl create -f- <<EOT
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: local
|
|
||||||
annotations:
|
|
||||||
storageclass.kubernetes.io/is-default-class: "true"
|
|
||||||
provisioner: linstor.csi.linbit.com
|
|
||||||
parameters:
|
|
||||||
linstor.csi.linbit.com/storagePool: "data"
|
|
||||||
linstor.csi.linbit.com/layerList: "storage"
|
|
||||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: replicated
|
|
||||||
provisioner: linstor.csi.linbit.com
|
|
||||||
parameters:
|
|
||||||
linstor.csi.linbit.com/storagePool: "data"
|
|
||||||
linstor.csi.linbit.com/autoPlace: "3"
|
|
||||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
|
||||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
|
||||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
|
||||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
|
||||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
|
||||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
EOT
|
|
||||||
kubectl create -f- <<EOT
|
|
||||||
---
|
|
||||||
apiVersion: metallb.io/v1beta1
|
|
||||||
kind: L2Advertisement
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-metallb
|
|
||||||
spec:
|
|
||||||
ipAddressPools:
|
|
||||||
- cozystack
|
|
||||||
---
|
|
||||||
apiVersion: metallb.io/v1beta1
|
|
||||||
kind: IPAddressPool
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-metallb
|
|
||||||
spec:
|
|
||||||
addresses:
|
|
||||||
- 192.168.123.200-192.168.123.250
|
|
||||||
autoAssign: true
|
|
||||||
avoidBuggyIPs: false
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# Wait for cozystack-api
|
|
||||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
|
||||||
|
|
||||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
|
||||||
"host": "example.org",
|
|
||||||
"ingress": true,
|
|
||||||
"monitoring": true,
|
|
||||||
"etcd": true,
|
|
||||||
"isolated": true
|
|
||||||
}}'
|
|
||||||
|
|
||||||
# Wait for HelmRelease be created
|
|
||||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
|
||||||
|
|
||||||
# Wait for HelmReleases be installed
|
|
||||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress tenant-root
|
|
||||||
|
|
||||||
if ! kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring; then
|
|
||||||
flux reconcile hr monitoring -n tenant-root --force
|
|
||||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr monitoring
|
|
||||||
fi
|
|
||||||
|
|
||||||
kubectl patch -n cozy-system cm cozystack --type=merge -p '{"data":{
|
|
||||||
"expose-services": "api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"
|
|
||||||
}}'
|
|
||||||
|
|
||||||
# Wait for nginx-ingress-controller
|
|
||||||
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
|
||||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
|
|
||||||
|
|
||||||
# Wait for etcd
|
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
|
||||||
|
|
||||||
# Wait for Victoria metrics
|
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vlogs/generic
|
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
|
||||||
|
|
||||||
# Wait for grafana
|
|
||||||
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
|
|
||||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
|
|
||||||
|
|
||||||
# Get IP of nginx-ingress
|
|
||||||
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
|
|
||||||
|
|
||||||
# Check Grafana
|
|
||||||
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
|
||||||
|
|
||||||
|
|
||||||
# Test OIDC
|
|
||||||
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
|
||||||
"oidc-enabled": "true"
|
|
||||||
}}'
|
|
||||||
|
|
||||||
timeout 120 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
|
||||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
|
||||||
@@ -16,15 +16,13 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
|||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
miss_map=$(mktemp)
|
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
||||||
trap 'rm -f "$miss_map"' EXIT
|
|
||||||
echo -n "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file" > $miss_map
|
|
||||||
|
|
||||||
# search accross all tags sorted by version
|
# search accross all tags sorted by version
|
||||||
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||||
|
|
||||||
resolved_miss_map=$(
|
resolved_miss_map=$(
|
||||||
while read -r chart version commit; do
|
echo "$miss_map" | while read -r chart version commit; do
|
||||||
# if version is found in HEAD, it's HEAD
|
# if version is found in HEAD, it's HEAD
|
||||||
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||||
echo "$chart $version HEAD"
|
echo "$chart $version HEAD"
|
||||||
@@ -58,7 +56,7 @@ resolved_miss_map=$(
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$chart $version $found_tag"
|
echo "$chart $version $found_tag"
|
||||||
done < $miss_map
|
done
|
||||||
)
|
)
|
||||||
|
|
||||||
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"
|
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"
|
||||||
|
|||||||
@@ -1,65 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
printf "%s\n" "Usage:" >&2 ;
|
|
||||||
printf -- "%s\n" '---' >&2 ;
|
|
||||||
printf "%s %s\n" "$0" "INPUT_DIR OUTPUT_DIR TMP_DIR [DEPENDENCY_DIR]" >&2 ;
|
|
||||||
printf -- "%s\n" '---' >&2 ;
|
|
||||||
printf "%s\n" "Takes a helm repository from INPUT_DIR, with an optional library repository in" >&2 ;
|
|
||||||
printf "%s\n" "DEPENDENCY_DIR, prepares a view of the git archive at select points in history" >&2 ;
|
|
||||||
printf "%s\n" "in TMP_DIR and packages helm charts, outputting the tarballs to OUTPUT_DIR" >&2 ;
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ "x$(basename $PWD)" != "xpackages" ]
|
|
||||||
then
|
|
||||||
echo "Error: This script must run from the ./packages/ directory" >&2
|
|
||||||
echo >&2
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "x$#" != "x3" ] && [ "x$#" != "x4" ]
|
|
||||||
then
|
|
||||||
echo "Error: This script takes 3 or 4 arguments" >&2
|
|
||||||
echo "Got $# arguments:" "$@" >&2
|
|
||||||
echo >&2
|
|
||||||
usage
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
input_dir=$1
|
|
||||||
output_dir=$2
|
|
||||||
tmp_dir=$3
|
|
||||||
|
|
||||||
if [ "x$#" = "x4" ]
|
|
||||||
then
|
|
||||||
dependency_dir=$4
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf "${output_dir:?}"
|
|
||||||
mkdir -p "${output_dir}"
|
|
||||||
while read package _ commit
|
|
||||||
do
|
|
||||||
# this lets devs build the packages from a dirty repo for quick local testing
|
|
||||||
if [ "x$commit" = "xHEAD" ]
|
|
||||||
then
|
|
||||||
helm package "${input_dir}/${package}" -d "${output_dir}"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
git archive --format tar "${commit}" "${input_dir}/${package}" | tar -xf- -C "${tmp_dir}/"
|
|
||||||
|
|
||||||
# the library chart is not present in older commits and git archive doesn't fail gracefully if the path is not found
|
|
||||||
if [ "x${dependency_dir}" != "x" ] && git ls-tree --name-only "${commit}" "${dependency_dir}" | grep -qx "${dependency_dir}"
|
|
||||||
then
|
|
||||||
git archive --format tar "${commit}" "${dependency_dir}" | tar -xf- -C "${tmp_dir}/"
|
|
||||||
fi
|
|
||||||
helm package "${tmp_dir}/${input_dir}/${package}" -d "${output_dir}"
|
|
||||||
rm -rf "${tmp_dir:?}/${input_dir:?}/${package:?}"
|
|
||||||
if [ "x${dependency_dir}" != "x" ]
|
|
||||||
then
|
|
||||||
rm -rf "${tmp_dir:?}/${dependency_dir:?}"
|
|
||||||
fi
|
|
||||||
done < "${input_dir}/versions_map"
|
|
||||||
helm repo index "${output_dir}"
|
|
||||||
@@ -1,8 +1,14 @@
|
|||||||
OUT=../_out/repos/apps
|
OUT=../../_out/repos/apps
|
||||||
TMP := $(shell mktemp -d)
|
TMP=../../_out/repos/apps/historical
|
||||||
|
|
||||||
repo:
|
repo:
|
||||||
cd .. && ../hack/package_chart.sh apps $(OUT) $(TMP) library
|
rm -rf "$(OUT)"
|
||||||
|
mkdir -p "$(OUT)"
|
||||||
|
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
|
||||||
|
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
|
||||||
|
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
|
||||||
|
cd "$(OUT)" && helm repo index . --url http://cozystack.cozy-system.svc/repos/apps
|
||||||
|
rm -rf "$(TMP)"
|
||||||
|
|
||||||
fix-chartnames:
|
fix-chartnames:
|
||||||
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.9.0
|
version: 0.8.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
../../../library/cozy-lib
|
|
||||||
@@ -122,9 +122,9 @@ spec:
|
|||||||
- name: clickhouse
|
- name: clickhouse
|
||||||
image: clickhouse/clickhouse-server:24.9.2.42
|
image: clickhouse/clickhouse-server:24.9.2.42
|
||||||
{{- if .Values.resources }}
|
{{- if .Values.resources }}
|
||||||
resources: {{- include "cozy-lib.resources.sanitize" .Values.resources | nindent 16 }}
|
resources: {{- toYaml .Values.resources | nindent 16 }}
|
||||||
{{- else if ne .Values.resourcesPreset "none" }}
|
{{- else if ne .Values.resourcesPreset "none" }}
|
||||||
resources: {{- include "cozy-lib.resources.preset" .Values.resourcesPreset | nindent 16 }}
|
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: data-volume-template
|
- name: data-volume-template
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
.helmignore
|
.helmignore
|
||||||
/logos
|
/logos
|
||||||
/Makefile
|
/Makefile
|
||||||
/hack
|
|
||||||
|
|||||||
@@ -1,90 +0,0 @@
|
|||||||
#!/usr/bin/env bats
|
|
||||||
|
|
||||||
@test "Create a tenant with isolated mode enabled" {
|
|
||||||
kubectl create -f - <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: Tenant
|
|
||||||
metadata:
|
|
||||||
name: test4kubernetes
|
|
||||||
namespace: tenant-root
|
|
||||||
spec:
|
|
||||||
etcd: false
|
|
||||||
host: ""
|
|
||||||
ingress: false
|
|
||||||
isolated: true
|
|
||||||
monitoring: false
|
|
||||||
resourceQuotas: {}
|
|
||||||
seaweedfs: false
|
|
||||||
EOF
|
|
||||||
kubectl wait namespace tenant-test4kubernetes --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "Create a tenant Kubernetes control plane" {
|
|
||||||
kubectl create -f - <<EOF
|
|
||||||
apiVersion: apps.cozystack.io/v1alpha1
|
|
||||||
kind: Kubernetes
|
|
||||||
metadata:
|
|
||||||
name: test
|
|
||||||
namespace: tenant-test4kubernetes
|
|
||||||
spec:
|
|
||||||
addons:
|
|
||||||
certManager:
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
cilium:
|
|
||||||
valuesOverride: {}
|
|
||||||
fluxcd:
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
gatewayAPI:
|
|
||||||
enabled: false
|
|
||||||
gpuOperator:
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
ingressNginx:
|
|
||||||
enabled: true
|
|
||||||
hosts: []
|
|
||||||
valuesOverride: {}
|
|
||||||
monitoringAgents:
|
|
||||||
enabled: false
|
|
||||||
valuesOverride: {}
|
|
||||||
verticalPodAutoscaler:
|
|
||||||
valuesOverride: {}
|
|
||||||
controlPlane:
|
|
||||||
apiServer:
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: small
|
|
||||||
controllerManager:
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: micro
|
|
||||||
konnectivity:
|
|
||||||
server:
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: micro
|
|
||||||
replicas: 2
|
|
||||||
scheduler:
|
|
||||||
resources: {}
|
|
||||||
resourcesPreset: micro
|
|
||||||
host: ""
|
|
||||||
nodeGroups:
|
|
||||||
md0:
|
|
||||||
ephemeralStorage: 20Gi
|
|
||||||
gpus: []
|
|
||||||
instanceType: u1.medium
|
|
||||||
maxReplicas: 10
|
|
||||||
minReplicas: 0
|
|
||||||
resources:
|
|
||||||
cpu: ""
|
|
||||||
memory: ""
|
|
||||||
roles:
|
|
||||||
- ingress-nginx
|
|
||||||
storageClass: replicated
|
|
||||||
EOF
|
|
||||||
kubectl wait namespace tenant-test4kubernetes --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
|
||||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test4kubernetes kubernetes-test; do sleep 1; done'
|
|
||||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test4kubernetes kubernetes-test --timeout=4m
|
|
||||||
kubectl wait tcp -n tenant-test4kubernetes kubernetes-test --timeout=20s --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready --timeout=2m
|
|
||||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test4kubernetes kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
|
||||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test4kubernetes --timeout=20s --for=jsonpath='{.status.replicas}'=2 --timeout=1m
|
|
||||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test4kubernetes --timeout=20s --for=jsonpath='{.status.v1beta2.readyReplicas}'=2 --timeout=2m
|
|
||||||
}
|
|
||||||
@@ -9,7 +9,7 @@ clickhouse 0.6.0 1ec10165
|
|||||||
clickhouse 0.6.1 c62a83a7
|
clickhouse 0.6.1 c62a83a7
|
||||||
clickhouse 0.6.2 8267072d
|
clickhouse 0.6.2 8267072d
|
||||||
clickhouse 0.7.0 93bdf411
|
clickhouse 0.7.0 93bdf411
|
||||||
clickhouse 0.9.0 HEAD
|
clickhouse 0.8.0 HEAD
|
||||||
ferretdb 0.1.0 e9716091
|
ferretdb 0.1.0 e9716091
|
||||||
ferretdb 0.1.1 91b0499a
|
ferretdb 0.1.1 91b0499a
|
||||||
ferretdb 0.2.0 6c5cf5bf
|
ferretdb 0.2.0 6c5cf5bf
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ image-e2e-sandbox:
|
|||||||
rm -f images/e2e-sandbox.json
|
rm -f images/e2e-sandbox.json
|
||||||
|
|
||||||
test: ## Run the end-to-end tests in existing sandbox.
|
test: ## Run the end-to-end tests in existing sandbox.
|
||||||
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/e2e.sh'
|
docker exec "${SANDBOX_NAME}" sh -c 'cd /workspace && export COZYSTACK_INSTALLER_YAML=$$(helm template -n cozy-system installer ./packages/core/installer) && hack/e2e.bats'
|
||||||
|
|
||||||
delete: ## Remove sandbox from existing Kubernetes cluster.
|
delete: ## Remove sandbox from existing Kubernetes cluster.
|
||||||
docker rm -f "${SANDBOX_NAME}" || true
|
docker rm -f "${SANDBOX_NAME}" || true
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ ARG TARGETOS
|
|||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
RUN apt update -q
|
RUN apt update -q
|
||||||
RUN apt install -yq --no-install-recommends genisoimage ca-certificates qemu-kvm qemu-utils iproute2 iptables wget xz-utils netcat curl jq make git
|
RUN apt install -yq --no-install-recommends genisoimage ca-certificates qemu-kvm qemu-utils iproute2 iptables wget xz-utils netcat curl jq make git bats
|
||||||
RUN curl -sSL "https://github.com/siderolabs/talos/releases/download/v${TALOSCTL_VERSION}/talosctl-${TARGETOS}-${TARGETARCH}" -o /usr/local/bin/talosctl \
|
RUN curl -sSL "https://github.com/siderolabs/talos/releases/download/v${TALOSCTL_VERSION}/talosctl-${TARGETOS}-${TARGETARCH}" -o /usr/local/bin/talosctl \
|
||||||
&& chmod +x /usr/local/bin/talosctl
|
&& chmod +x /usr/local/bin/talosctl
|
||||||
RUN curl -sSL "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/${TARGETOS}/${TARGETARCH}/kubectl" -o /usr/local/bin/kubectl \
|
RUN curl -sSL "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/${TARGETOS}/${TARGETARCH}/kubectl" -o /usr/local/bin/kubectl \
|
||||||
|
|||||||
@@ -1,8 +1,14 @@
|
|||||||
OUT=../_out/repos/extra
|
OUT=../../_out/repos/extra
|
||||||
TMP := $(shell mktemp -d)
|
TMP=../../_out/repos/extra/historical
|
||||||
|
|
||||||
repo:
|
repo:
|
||||||
cd .. && ../hack/package_chart.sh extra $(OUT) $(TMP) library
|
rm -rf "$(OUT)"
|
||||||
|
mkdir -p "$(OUT)"
|
||||||
|
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
|
||||||
|
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
|
||||||
|
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
|
||||||
|
cd "$(OUT)" && helm repo index . --url http://cozystack.cozy-system.svc/repos/extra
|
||||||
|
rm -rf "$(TMP)"
|
||||||
|
|
||||||
fix-chartnames:
|
fix-chartnames:
|
||||||
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
OUT=../_out/repos/library
|
|
||||||
TMP := $(shell mktemp -d)
|
|
||||||
|
|
||||||
repo:
|
|
||||||
cd .. && ../hack/package_chart.sh library $(OUT) $(TMP)
|
|
||||||
|
|
||||||
fix-chartnames:
|
|
||||||
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
|
||||||
|
|
||||||
gen-versions-map: fix-chartnames
|
|
||||||
../../hack/gen_versions_map.sh
|
|
||||||
|
|
||||||
check-version-map: gen-versions-map
|
|
||||||
git diff --exit-code -- versions_map
|
|
||||||
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# Patterns to ignore when building packages.
|
|
||||||
# This supports shell glob matching, relative path matching, and
|
|
||||||
# negation (prefixed with !). Only one pattern per line.
|
|
||||||
.DS_Store
|
|
||||||
# Common VCS dirs
|
|
||||||
.git/
|
|
||||||
.gitignore
|
|
||||||
.bzr/
|
|
||||||
.bzrignore
|
|
||||||
.hg/
|
|
||||||
.hgignore
|
|
||||||
.svn/
|
|
||||||
# Common backup files
|
|
||||||
*.swp
|
|
||||||
*.bak
|
|
||||||
*.tmp
|
|
||||||
*.orig
|
|
||||||
*~
|
|
||||||
# Various IDEs
|
|
||||||
.project
|
|
||||||
.idea/
|
|
||||||
*.tmproj
|
|
||||||
.vscode/
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
apiVersion: v2
|
|
||||||
name: cozy-lib
|
|
||||||
description: Common Cozystack templates
|
|
||||||
|
|
||||||
# A chart can be either an 'application' or a 'library' chart.
|
|
||||||
#
|
|
||||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
|
||||||
# to be deployed.
|
|
||||||
#
|
|
||||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
|
||||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
|
||||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
|
||||||
type: library
|
|
||||||
|
|
||||||
# This is the chart version. This version number should be incremented each time you make changes
|
|
||||||
# to the chart and its templates, including the app version.
|
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
|
||||||
version: 0.1.0
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
include ../../../scripts/common-envs.mk
|
|
||||||
include ../../../scripts/package.mk
|
|
||||||
|
|
||||||
generate:
|
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
|
||||||
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
## Parameters
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
{{/*
|
|
||||||
Copyright Broadcom, Inc. All Rights Reserved.
|
|
||||||
SPDX-License-Identifier: APACHE-2.0
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Return a resource request/limit object based on a given preset.
|
|
||||||
These presets are for basic testing and not meant to be used in production
|
|
||||||
{{ include "cozy-lib.resources.preset" "nano" -}}
|
|
||||||
*/}}
|
|
||||||
{{- define "cozy-lib.resources.preset" -}}
|
|
||||||
{{- $presets := dict
|
|
||||||
"nano" (dict
|
|
||||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
|
||||||
)
|
|
||||||
"micro" (dict
|
|
||||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
|
||||||
)
|
|
||||||
"small" (dict
|
|
||||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
|
||||||
)
|
|
||||||
"medium" (dict
|
|
||||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
|
||||||
)
|
|
||||||
"large" (dict
|
|
||||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
|
||||||
)
|
|
||||||
"xlarge" (dict
|
|
||||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
|
||||||
)
|
|
||||||
"2xlarge" (dict
|
|
||||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
{{- if hasKey $presets . -}}
|
|
||||||
{{- index $presets . | toYaml -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" . (join "," (keys $presets)) | fail -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
{{- /*
|
|
||||||
A sanitized resource map is a dict with resource-name => resource-quantity.
|
|
||||||
If not in such a form, requests are used, then limits. All resources are set
|
|
||||||
to have equal requests and limits, except CPU, that has only requests. The
|
|
||||||
template expects to receive a dict {"requests":{...}, "limits":{...}} as
|
|
||||||
input, e.g. {{ include "cozy-lib.resources.sanitize" .Values.resources }}.
|
|
||||||
Example input:
|
|
||||||
==============
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 1024Mi
|
|
||||||
requests:
|
|
||||||
cpu: 200m
|
|
||||||
memory: 512Mi
|
|
||||||
memory: 256Mi
|
|
||||||
devices.com/nvidia: "1"
|
|
||||||
|
|
||||||
Example output:
|
|
||||||
===============
|
|
||||||
limits:
|
|
||||||
devices.com/nvidia: "1"
|
|
||||||
memory: 256Mi
|
|
||||||
requests:
|
|
||||||
cpu: 200m
|
|
||||||
devices.com/nvidia: "1"
|
|
||||||
memory: 256Mi
|
|
||||||
*/}}
|
|
||||||
{{- define "cozy-lib.resources.sanitize" }}
|
|
||||||
{{- $sanitizedMap := dict }}
|
|
||||||
{{- if hasKey . "limits" }}
|
|
||||||
{{- range $k, $v := .limits }}
|
|
||||||
{{- $_ := set $sanitizedMap $k $v }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if hasKey . "requests" }}
|
|
||||||
{{- range $k, $v := .requests }}
|
|
||||||
{{- $_ := set $sanitizedMap $k $v }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- range $k, $v := . }}
|
|
||||||
{{- if not (or (eq $k "requests") (eq $k "limits")) }}
|
|
||||||
{{- $_ := set $sanitizedMap $k $v }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- $output := dict "requests" dict "limits" dict }}
|
|
||||||
{{- range $k, $v := $sanitizedMap }}
|
|
||||||
{{- $_ := set $output.requests $k $v }}
|
|
||||||
{{- if not (eq $k "cpu") }}
|
|
||||||
{{- $_ := set $output.limits $k $v }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- $output | toYaml }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
{
|
|
||||||
"title": "Chart Values",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {}
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
{}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
cozy-lib 0.1.0 HEAD
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# Build the manager binary
|
# Build the manager binary
|
||||||
FROM golang:1.24 as builder
|
FROM golang:1.24 as builder
|
||||||
|
|
||||||
ARG VERSION=edge-25.4.1
|
ARG VERSION=edge-25.3.2
|
||||||
ARG TARGETOS
|
ARG TARGETOS
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
|
||||||
@@ -23,4 +23,4 @@ WORKDIR /
|
|||||||
COPY --from=builder /workspace/kamaji .
|
COPY --from=builder /workspace/kamaji .
|
||||||
USER 65532:65532
|
USER 65532:65532
|
||||||
|
|
||||||
ENTRYPOINT ["/kamaji"]
|
ENTRYPOINT ["/kamaji"]
|
||||||
Reference in New Issue
Block a user