#!/bin/bash if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2 echo 'please set it with following command:' >&2 echo >&2 echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2 echo >&2 exit 1 fi if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then echo "IPv4 forwarding is not enabled!" >&2 echo 'please enable forwarding with the following command:' >&2 echo >&2 echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2 echo >&2 exit 1 fi set -x set -e kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true ip link del cozy-br0 || true ip link add cozy-br0 type bridge ip link set cozy-br0 up ip addr add 192.168.123.1/24 dev cozy-br0 # Enable masquerading iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE rm -rf srv1 srv2 srv3 mkdir -p srv1 srv2 srv3 # Prepare cloud-init for i in 1 2 3; do echo "hostname: srv$i" > "srv$i/meta-data" echo '#cloud-config' > "srv$i/user-data" cat > "srv$i/network-config" </dev/null rm -f nocloud-amd64.raw xz --decompress nocloud-amd64.raw.xz fi for i in 1 2 3; do cp nocloud-amd64.raw srv$i/system.img qemu-img resize srv$i/system.img 20G done # Prepare data drives for i in 1 2 3; do qemu-img create srv$i/data.img 100G done # Prepare networking for i in 1 2 3; do ip link del cozy-srv$i || true ip tuntap add dev cozy-srv$i mode tap ip link set cozy-srv$i up ip link set cozy-srv$i master cozy-br0 done # Start VMs for i in 1 2 3; do qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \ -device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i \ -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \ -drive file=srv$i/system.img,if=virtio,format=raw \ -drive file=srv$i/seed.img,if=virtio,format=raw \ -drive file=srv$i/data.img,if=virtio,format=raw \ -display none -daemonize -pidfile srv$i/qemu.pid done sleep 5 # Wait for VM to start up timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done' cat > patch.yaml <<\EOT machine: kubelet: nodeIP: validSubnets: - 192.168.123.0/24 extraConfig: maxPods: 512 kernel: modules: - name: openvswitch - name: drbd parameters: - usermode_helper=disabled - name: zfs - name: spl registries: mirrors: docker.io: endpoints: - https://mirror.gcr.io files: - content: | [plugins] [plugins."io.containerd.cri.v1.runtime"] device_ownership_from_security_context = true path: /etc/cri/conf.d/20-customization.part op: create cluster: apiServer: extraArgs: oidc-issuer-url: "https://keycloak.example.org/realms/cozy" oidc-client-id: "kubernetes" oidc-username-claim: "preferred_username" oidc-groups-claim: "groups" network: cni: name: none dnsDomain: cozy.local podSubnets: - 10.244.0.0/16 serviceSubnets: - 10.96.0.0/16 EOT cat > patch-controlplane.yaml <<\EOT machine: nodeLabels: node.kubernetes.io/exclude-from-external-load-balancers: $patch: delete network: interfaces: - interface: eth0 vip: ip: 192.168.123.10 cluster: allowSchedulingOnControlPlanes: true controllerManager: extraArgs: bind-address: 0.0.0.0 scheduler: extraArgs: bind-address: 0.0.0.0 apiServer: certSANs: - 127.0.0.1 proxy: disabled: true discovery: enabled: false etcd: advertisedSubnets: - 192.168.123.0/24 EOT # Gen configuration if [ ! -f secrets.yaml ]; then talosctl gen secrets fi rm -f controlplane.yaml worker.yaml talosconfig kubeconfig talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml export TALOSCONFIG=$PWD/talosconfig # Apply configuration talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i # Wait for VM to be configured timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done' # Bootstrap timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done' # Wait for etcd timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done' timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done' rm -f kubeconfig talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10 export KUBECONFIG=$PWD/kubeconfig # Wait for kubernetes nodes appear timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done' kubectl create ns cozy-system -o yaml | kubectl apply -f - kubectl create -f - <<\EOT apiVersion: v1 kind: ConfigMap metadata: name: cozystack namespace: cozy-system data: bundle-name: "paas-full" ipv4-pod-cidr: "10.244.0.0/16" ipv4-pod-gateway: "10.244.0.1" ipv4-svc-cidr: "10.96.0.0/16" ipv4-join-cidr: "100.64.0.0/16" root-host: example.org api-server-endpoint: https://192.168.123.10:6443 EOT # echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f - # wait for cozystack pod to start kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack # wait for helmreleases appear timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done' sleep 5 # Wait for all HelmReleases to be installed kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x failed_hrs=$(kubectl get hr -A | grep -v True) if [ -n "$(echo "$failed_hrs" | grep -v NAME)" ]; then printf 'Failed HelmReleases:\n%s\n' "$failed_hrs" >&2 exit 1 fi # Wait for Cluster-API providers timeout 60 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done' kubectl wait deploy --timeout=1m --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager # Wait for linstor controller kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller # Wait for all linstor nodes become Online timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done' kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data kubectl create -f- <