Merge branch 'main' into upd-flux

This commit is contained in:
Andrei Kvapil
2024-06-24 13:13:54 +02:00
committed by GitHub
14 changed files with 372 additions and 16 deletions

318
hack/e2e.sh Executable file
View File

@@ -0,0 +1,318 @@
#!/bin/bash
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
echo 'please set it with following command:' >&2
echo >&2
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
echo >&2
exit 1
fi
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
echo "IPv4 forwarding is not enabled!" >&2
echo 'please enable forwarding with the following command:' >&2
echo >&2
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
echo >&2
exit 1
fi
set -x
set -e
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
ip link del cozy-br0 || true
ip link add cozy-br0 type bridge
ip link set cozy-br0 up
ip addr add 192.168.123.1/24 dev cozy-br0
# Enable forward & masquerading
echo 1 > /proc/sys/net/ipv4/ip_forward
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 -j MASQUERADE
rm -rf srv1 srv2 srv3
mkdir -p srv1 srv2 srv3
# Prepare cloud-init
for i in 1 2 3; do
echo "local-hostname: srv$i" > "srv$i/meta-data"
echo '#cloud-config' > "srv$i/user-data"
cat > "srv$i/network-config" <<EOT
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- "192.168.123.1$i/26"
gateway4: "192.168.123.1"
nameservers:
search: [cluster.local]
addresses: [8.8.8.8]
EOT
( cd srv$i && genisoimage \
-output seed.img \
-volid cidata -rational-rock -joliet \
user-data meta-data network-config
)
done
# Prepare system drive
if [ ! -f nocloud-amd64.raw ]; then
wget https://github.com/aenix-io/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
rm -f nocloud-amd64.raw
xz --decompress nocloud-amd64.raw.xz
fi
for i in 1 2 3; do
cp nocloud-amd64.raw srv$i/system.img
qemu-img resize srv$i/system.img 20G
done
# Prepare data drives
for i in 1 2 3; do
qemu-img create srv$i/data.img 100G
done
# Prepare networking
for i in 1 2 3; do
ip link del cozy-srv$i || true
ip tuntap add dev cozy-srv$i mode tap
ip link set cozy-srv$i up
ip link set cozy-srv$i master cozy-br0
done
# Start VMs
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
-drive file=srv$i/system.img,if=virtio,format=raw \
-drive file=srv$i/seed.img,if=virtio,format=raw \
-drive file=srv$i/data.img,if=virtio,format=raw \
-display none -daemonize -pidfile srv$i/qemu.pid
done
sleep 5
# Wait for VM to start up
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
cat > patch.yaml <<\EOT
machine:
kubelet:
nodeIP:
validSubnets:
- 192.168.123.0/24
extraConfig:
maxPods: 512
kernel:
modules:
- name: openvswitch
- name: drbd
parameters:
- usermode_helper=disabled
- name: zfs
- name: spl
install:
image: ghcr.io/aenix-io/cozystack/talos:v1.7.1
files:
- content: |
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
cluster:
network:
cni:
name: none
dnsDomain: cozy.local
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.96.0.0/16
EOT
cat > patch-controlplane.yaml <<\EOT
machine:
network:
interfaces:
- interface: eth0
vip:
ip: 192.168.123.10
cluster:
allowSchedulingOnControlPlanes: true
controllerManager:
extraArgs:
bind-address: 0.0.0.0
scheduler:
extraArgs:
bind-address: 0.0.0.0
apiServer:
certSANs:
- 127.0.0.1
proxy:
disabled: true
discovery:
enabled: false
etcd:
advertisedSubnets:
- 192.168.123.0/24
EOT
# Gen configuration
if [ ! -f secrets.yaml ]; then
talosctl gen secrets
fi
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
export TALOSCONFIG=$PWD/talosconfig
# Apply configuration
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
# Wait for VM to be configured
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
# Bootstrap
talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11
# Wait for etcd
timeout 120 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
rm -f kubeconfig
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
export KUBECONFIG=$PWD/kubeconfig
# Wait for kubernetes nodes appear
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
kubectl create ns cozy-system
kubectl create -f - <<\EOT
apiVersion: v1
kind: ConfigMap
metadata:
name: cozystack
namespace: cozy-system
data:
bundle-name: "paas-full"
ipv4-pod-cidr: "10.244.0.0/16"
ipv4-pod-gateway: "10.244.0.1"
ipv4-svc-cidr: "10.96.0.0/16"
ipv4-join-cidr: "100.64.0.0/16"
EOT
#
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
# wait for cozystack pod to start
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
# wait for helmreleases appear
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
sleep 5
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
# Wait for linstor controller
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
# Wait for all linstor nodes become Online
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
kubectl create -f- <<EOT
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/layerList: "storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: replicated
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/autoPlace: "3"
linstor.csi.linbit.com/layerList: "drbd storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
EOT
kubectl create -f- <<EOT
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: cozystack
namespace: cozy-metallb
spec:
ipAddressPools:
- cozystack
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: cozystack
namespace: cozy-metallb
spec:
addresses:
- 192.168.123.200-192.168.123.250
autoAssign: true
avoidBuggyIPs: false
EOT
kubectl patch -n tenant-root hr/tenant-root --type=merge -p '{"spec":{ "values":{
"host": "example.org",
"ingress": true,
"monitoring": true,
"etcd": true
}}}'
# Wait for HelmRelease be created
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
# Wait for HelmReleases be installed
kubectl wait --timeout=2m --for=condition=ready hr -n tenant-root etcd ingress monitoring tenant-root
# Wait for nginx-ingress-controller
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
kubectl wait deploy -n tenant-root --timeout=5m --for=condition=available root-ingress-controller
# Wait for etcd
kubectl wait --for=jsonpath=.status.readyReplicas=3 -n tenant-root --timeout=5m sts etcd
# Wait for Victoria metrics
kubectl wait deploy -n tenant-root --timeout=5m --for=condition=available vmalert-vmalert vminsert-longterm vminsert-shortterm
kubectl wait --for=jsonpath=.status.readyReplicas=2 -n tenant-root --timeout=5m sts vmalertmanager-alertmanager vmselect-longterm vmselect-shortterm vmstorage-longterm vmstorage-shortterm
# Wait for grafana
kubectl wait --for=condition=ready clusters.postgresql.cnpg.io -n tenant-root grafana-db
kubectl wait deploy -n tenant-root --timeout=5m --for=condition=available grafana-deployment
# Get IP of nginx-ingress
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
# Check Grafana
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.1
version: 0.3.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -35,11 +35,13 @@ more details:
### Common parameters
| Name | Description | Value |
| ---------- | ----------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `replicas` | Number of MariaDB replicas | `2` |
| Name | Description | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `replicas` | Number of Postgres replicas | `2` |
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed. | `0` |
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances). | `0` |
### Configuration parameters

View File

@@ -11,6 +11,9 @@ spec:
parameters:
max_wal_senders: "30"
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
monitoring:
enablePodMonitor: true

View File

@@ -14,9 +14,24 @@
},
"replicas": {
"type": "number",
"description": "Number of MariaDB replicas",
"description": "Number of Postgres replicas",
"default": 2
},
"quorum": {
"type": "object",
"properties": {
"minSyncReplicas": {
"type": "number",
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.",
"default": 0
},
"maxSyncReplicas": {
"type": "number",
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).",
"default": 0
}
}
},
"databases": {
"type": "object",
"description": "Databases configuration",

View File

@@ -2,12 +2,19 @@
## @param external Enable external access from outside the cluster
## @param size Persistent Volume size
## @param replicas Number of MariaDB replicas
## @param replicas Number of Postgres replicas
##
external: false
size: 10Gi
replicas: 2
## Configuration for the quorum-based synchronous replication
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).
quorum:
minSyncReplicas: 0
maxSyncReplicas: 0
## @section Configuration parameters
## @param users [object] Users configuration

View File

@@ -14,7 +14,8 @@ mysql 0.2.0 8b975ff0
mysql 0.3.0 HEAD
postgres 0.1.0 f642698
postgres 0.2.0 7cd7de73
postgres 0.2.1 HEAD
postgres 0.2.1 4a97e297
postgres 0.3.0 HEAD
rabbitmq 0.1.0 f642698
rabbitmq 0.2.0 HEAD
redis 0.1.1 f642698

View File

@@ -25,6 +25,7 @@ image-cozystack:
--provenance false \
--tag $(REGISTRY)/cozystack:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/cozystack:latest \
--platform linux/amd64,linux/arm64 \
--cache-to type=inline \
--metadata-file images/cozystack.json \
--push=$(PUSH) \

View File

@@ -1,4 +1,10 @@
{
"containerimage.config.digest": "sha256:c5641297fd8c314272047ae02461871e73c6eb3bdf64d5acd7f971917a28d48e",
"containerimage.digest": "sha256:e4838be7d0032f45213ccd0044deae263cd79f87cb1aa1c2a5c1efcc8337aad8"
"buildx.build.ref": "youthful_hertz/youthful_hertz0/aafwjh8j28i98f59smgh3qe86",
"containerimage.descriptor": {
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
"digest": "sha256:e0c0defb9f5b10f5187d4002ccec7d01841e96c7350963f253003c0efeff6cef",
"size": 685
},
"containerimage.digest": "sha256:e0c0defb9f5b10f5187d4002ccec7d01841e96c7350963f253003c0efeff6cef",
"image.name": "ghcr.io/aenix-io/cozystack/cozystack:latest"
}

View File

@@ -3,12 +3,15 @@ FROM golang:alpine3.19 as k8s-await-election-builder
ARG K8S_AWAIT_ELECTION_GITREPO=https://github.com/LINBIT/k8s-await-election
ARG K8S_AWAIT_ELECTION_VERSION=0.4.1
# TARGETARCH is a docker special variable: https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
ARG TARGETARCH
RUN apk add --no-cache git make
RUN git clone ${K8S_AWAIT_ELECTION_GITREPO} /usr/local/go/k8s-await-election/ \
&& cd /usr/local/go/k8s-await-election \
&& git reset --hard v${K8S_AWAIT_ELECTION_VERSION} \
&& make \
&& mv ./out/k8s-await-election-amd64 /k8s-await-election
&& mv ./out/k8s-await-election-${TARGETARCH} /k8s-await-election
FROM alpine:3.19 AS builder

View File

@@ -27,4 +27,4 @@ image:
--metadata-file images/cilium.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/cilium:$(call settag,$(TAG))" > images/cilium.tag
echo "$(REGISTRY)/cilium:$(call settag,$(CILIUM_TAG))" > images/cilium.tag

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/cilium:v0.7.0
ghcr.io/aenix-io/cozystack/cilium:latest

View File

@@ -24,4 +24,4 @@ image:
--metadata-file images/kubeovn.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/kubeovn:$(call settag,$(TAG))" > images/kubeovn.tag
echo "$(REGISTRY)/kubeovn:$(call settag,$(KUBEOVN_TAG))" > images/kubeovn.tag

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/kubeovn:v0.7.0
ghcr.io/aenix-io/cozystack/kubeovn:v1.13.0