Compare commits

..

1 Commits

Author SHA1 Message Date
Andrei Kvapil
27f4ad9b87 Add NoCloud asset for Hetzner installation 2024-04-16 13:10:32 +02:00
831 changed files with 64015 additions and 91336 deletions

View File

@@ -1,48 +0,0 @@
---
name: CI/CD Workflow
on:
push:
branches:
- main
paths:
- '**.yaml'
- '**/Dockerfile'
- '**/charts/**'
tags:
- 'v*'
env:
IMAGE_NGINX_CACHE: nginx-cache
REGISTRY: ghcr.io/${{ github.repository_owner }}
PUSH: 1
LOAD: 1
NGINX_CACHE_TAG: v0.1.0
TAG: v0.3.1
PLATFORM_ARCH: linux/amd64
jobs:
build-and-push:
name: Build Cozystack
runs-on: ubuntu-latest
services:
registry:
image: registry:2
ports:
- 5000:5000
steps:
- name: Set up Docker Registry
run: |
if [ "$GITHUB_ACTIONS" = "true" ]; then
echo "REGISTRY=ghcr.io/${{ github.repository_owner }}" >> $GITHUB_ENV
else
echo "REGISTRY=localhost:5000/cozystack_local" >> $GITHUB_ENV
fi
- uses: actions/checkout@v3
- name: Build usig make
run: |
make
- name: Set up QEMU
uses: docker/setup-qemu-action@v3

View File

@@ -1,22 +0,0 @@
name: Run E2E Tests
on:
pull_request:
branches:
- main
jobs:
e2e-tests:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up SSH
uses: webfactory/ssh-agent@v0.5.3
with:
ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }}
- name: Run E2E Tests on Remote Server
run: ssh -p 2222 root@mgr.cp.if.ua 'bash -s' < /home/cozystack/hack/e2e.sh

View File

@@ -1,48 +0,0 @@
name: Lint
on:
push:
branches: [ main ] # Lint only on pushes to the main branch
pull_request:
branches: [ main ] # Lint on PRs targeting the main branch
permissions:
contents: read
jobs:
lint:
name: Super-Linter
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Run Super-Linter
uses: github/super-linter@v4
env:
# To report GitHub Actions status checks
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VALIDATE_ALL_CODEBASE: false # Lint only changed files
VALIDATE_TERRAFORM: false # Disable Terraform linting (remove if you need it)
DEFAULT_BRANCH: main # Set your default branch
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Enable only the linters you need for your project
VALIDATE_JAVASCRIPT_ES: true
VALIDATE_PYTHON_BLACK: true
VALIDATE_HTML: false
VALIDATE_GO: false
VALIDATE_XML: false
VALIDATE_JAVA: false
VALIDATE_DOCKERFILE: false
# turn off JSCPD copy/paste detection, which results in lots of results for examples and devops repos
VALIDATE_JSCPD: false
# turn off shfmt shell formatter as we already have shellcheck
VALIDATE_SHELL_SHFMT: false
VALIDATE_EDITORCONFIG: false
# prevent Kubernetes CRD API's from causing kubeval to fail
# also change schema location to an up-to-date list
# https://github.com/yannh/kubernetes-json-schema/#kubeval
KUBERNETES_KUBEVAL_OPTIONS: --ignore-missing-schemas --schema-location https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/

View File

@@ -1,7 +0,0 @@
---
# MD013/line-length - Line length
MD013:
# Number of characters, default is 80
line_length: 9999
# check code blocks?
code_blocks: false

View File

@@ -1,55 +0,0 @@
yaml-files:
- '*.yaml'
- '*.yml'
- '.yamllint'
rules:
braces:
level: warning
min-spaces-inside: 0
max-spaces-inside: 0
min-spaces-inside-empty: 1
max-spaces-inside-empty: 5
brackets:
level: warning
min-spaces-inside: 0
max-spaces-inside: 0
min-spaces-inside-empty: 1
max-spaces-inside-empty: 5
colons:
level: warning
max-spaces-before: 0
max-spaces-after: 1
commas:
level: warning
max-spaces-before: 0
min-spaces-after: 1
max-spaces-after: 1
comments: disable
comments-indentation: disable
document-end: disable
document-start: disable
empty-lines:
level: warning
max: 2
max-start: 0
max-end: 0
hyphens:
level: warning
max-spaces-after: 1
indentation:
level: warning
spaces: consistent
indent-sequences: true
check-multi-line-strings: false
key-duplicates: enable
line-length: disable
new-line-at-end-of-file: disable
new-lines:
type: unix
trailing-spaces: disable
line-length:
max: 130
allow-non-breakable-words: true
allow-non-breakable-inline-mappings: false

View File

@@ -1,73 +0,0 @@
name: Pull Request Workflow
on:
pull_request:
types: [opened, synchronize, reopened]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build images
run: make build
env:
DOCKER_BUILDKIT: 1
- name: Tag and push images
run: |
PR_NUMBER=${{ github.event.pull_request.number }}
BRANCH_NAME="test-pr${PR_NUMBER}"
git checkout -b ${BRANCH_NAME}
git push origin ${BRANCH_NAME}
# Tag images with PR number
for image in $(docker images --format "{{.Repository}}:{{.Tag}}" | grep ${IMAGE_NAME}); do
docker tag ${image} ${image}-pr${PR_NUMBER}
docker push ${image}-pr${PR_NUMBER}
done
- name: Run tests
run: make test
cleanup:
needs: build-and-test
if: github.event.action == 'closed'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Delete PR-tagged images
run: |
PR_NUMBER=${{ github.event.pull_request.number }}
for image in $(docker images --format "{{.Repository}}:{{.Tag}}" | grep ${IMAGE_NAME} | grep "pr${PR_NUMBER}"); do
docker rmi ${image}
docker push ${image} --delete
done

View File

@@ -1,51 +0,0 @@
name: Release Workflow
on:
release:
types: [published]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
test-and-release:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Run tests
run: make test
- name: Build images
run: make build
env:
DOCKER_BUILDKIT: 1
- name: Tag and push release images
run: |
VERSION=${{ github.event.release.tag_name }}
for image in $(docker images --format "{{.Repository}}:{{.Tag}}" | grep ${IMAGE_NAME}); do
docker tag ${image} ${image}:${VERSION}
docker push ${image}:${VERSION}
done
- name: Create release notes
uses: softprops/action-gh-release@v1
with:
files: |
README.md
CHANGELOG.md
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -3,8 +3,6 @@
build:
make -C packages/apps/http-cache image
make -C packages/apps/kubernetes image
make -C packages/system/cilium image
make -C packages/system/kubeovn image
make -C packages/system/dashboard image
make -C packages/core/installer image
make manifests
@@ -20,8 +18,6 @@ repos:
make -C packages/system repo
make -C packages/apps repo
make -C packages/extra repo
mkdir -p _out/logos
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
assets:
make -C packages/core/installer/ assets
make -C packages/core/talos/ assets

View File

@@ -1,318 +0,0 @@
#!/bin/bash
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
echo 'please set it with following command:' >&2
echo >&2
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
echo >&2
exit 1
fi
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
echo "IPv4 forwarding is not enabled!" >&2
echo 'please enable forwarding with the following command:' >&2
echo >&2
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
echo >&2
exit 1
fi
set -x
set -e
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
ip link del cozy-br0 || true
ip link add cozy-br0 type bridge
ip link set cozy-br0 up
ip addr add 192.168.123.1/24 dev cozy-br0
# Enable forward & masquerading
echo 1 > /proc/sys/net/ipv4/ip_forward
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 -j MASQUERADE
rm -rf srv1 srv2 srv3
mkdir -p srv1 srv2 srv3
# Prepare cloud-init
for i in 1 2 3; do
echo "local-hostname: srv$i" > "srv$i/meta-data"
echo '#cloud-config' > "srv$i/user-data"
cat > "srv$i/network-config" <<EOT
version: 2
ethernets:
eth0:
dhcp4: false
addresses:
- "192.168.123.1$i/26"
gateway4: "192.168.123.1"
nameservers:
search: [cluster.local]
addresses: [8.8.8.8]
EOT
( cd srv$i && genisoimage \
-output seed.img \
-volid cidata -rational-rock -joliet \
user-data meta-data network-config
)
done
# Prepare system drive
if [ ! -f nocloud-amd64.raw ]; then
wget https://github.com/aenix-io/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
rm -f nocloud-amd64.raw
xz --decompress nocloud-amd64.raw.xz
fi
for i in 1 2 3; do
cp nocloud-amd64.raw srv$i/system.img
qemu-img resize srv$i/system.img 20G
done
# Prepare data drives
for i in 1 2 3; do
qemu-img create srv$i/data.img 100G
done
# Prepare networking
for i in 1 2 3; do
ip link del cozy-srv$i || true
ip tuntap add dev cozy-srv$i mode tap
ip link set cozy-srv$i up
ip link set cozy-srv$i master cozy-br0
done
# Start VMs
for i in 1 2 3; do
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
-drive file=srv$i/system.img,if=virtio,format=raw \
-drive file=srv$i/seed.img,if=virtio,format=raw \
-drive file=srv$i/data.img,if=virtio,format=raw \
-display none -daemonize -pidfile srv$i/qemu.pid
done
sleep 5
# Wait for VM to start up
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
cat > patch.yaml <<\EOT
machine:
kubelet:
nodeIP:
validSubnets:
- 192.168.123.0/24
extraConfig:
maxPods: 512
kernel:
modules:
- name: openvswitch
- name: drbd
parameters:
- usermode_helper=disabled
- name: zfs
- name: spl
install:
image: ghcr.io/aenix-io/cozystack/talos:v1.7.1
files:
- content: |
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
device_ownership_from_security_context = true
path: /etc/cri/conf.d/20-customization.part
op: create
cluster:
network:
cni:
name: none
dnsDomain: cozy.local
podSubnets:
- 10.244.0.0/16
serviceSubnets:
- 10.96.0.0/16
EOT
cat > patch-controlplane.yaml <<\EOT
machine:
network:
interfaces:
- interface: eth0
vip:
ip: 192.168.123.10
cluster:
allowSchedulingOnControlPlanes: true
controllerManager:
extraArgs:
bind-address: 0.0.0.0
scheduler:
extraArgs:
bind-address: 0.0.0.0
apiServer:
certSANs:
- 127.0.0.1
proxy:
disabled: true
discovery:
enabled: false
etcd:
advertisedSubnets:
- 192.168.123.0/24
EOT
# Gen configuration
if [ ! -f secrets.yaml ]; then
talosctl gen secrets
fi
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
export TALOSCONFIG=$PWD/talosconfig
# Apply configuration
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
# Wait for VM to be configured
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
# Bootstrap
talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11
# Wait for etcd
timeout 120 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
rm -f kubeconfig
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
export KUBECONFIG=$PWD/kubeconfig
# Wait for kubernetes nodes appear
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
kubectl create ns cozy-system
kubectl create -f - <<\EOT
apiVersion: v1
kind: ConfigMap
metadata:
name: cozystack
namespace: cozy-system
data:
bundle-name: "paas-full"
ipv4-pod-cidr: "10.244.0.0/16"
ipv4-pod-gateway: "10.244.0.1"
ipv4-svc-cidr: "10.96.0.0/16"
ipv4-join-cidr: "100.64.0.0/16"
EOT
#
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
# wait for cozystack pod to start
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
# wait for helmreleases appear
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
sleep 5
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
# Wait for linstor controller
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
# Wait for all linstor nodes become Online
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
kubectl create -f- <<EOT
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: local
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/layerList: "storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: replicated
provisioner: linstor.csi.linbit.com
parameters:
linstor.csi.linbit.com/storagePool: "data"
linstor.csi.linbit.com/autoPlace: "3"
linstor.csi.linbit.com/layerList: "drbd storage"
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true
EOT
kubectl create -f- <<EOT
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: cozystack
namespace: cozy-metallb
spec:
ipAddressPools:
- cozystack
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: cozystack
namespace: cozy-metallb
spec:
addresses:
- 192.168.123.200-192.168.123.250
autoAssign: true
avoidBuggyIPs: false
EOT
kubectl patch -n tenant-root hr/tenant-root --type=merge -p '{"spec":{ "values":{
"host": "example.org",
"ingress": true,
"monitoring": true,
"etcd": true
}}}'
# Wait for HelmRelease be created
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
# Wait for HelmReleases be installed
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
# Wait for nginx-ingress-controller
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
# Wait for etcd
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
# Wait for Victoria metrics
kubectl wait --timeout=5m --for=condition=available deploy -n tenant-root vmalert-vmalert vminsert-longterm vminsert-shortterm
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=2 -n tenant-root sts vmalertmanager-alertmanager vmselect-longterm vmselect-shortterm vmstorage-longterm vmstorage-shortterm
# Wait for grafana
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
# Get IP of nginx-ingress
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
# Check Grafana
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found

View File

@@ -20,28 +20,9 @@ miss_map=$(echo "$new_map" | awk 'NR==FNR { new_map[$1 " " $2] = $3; next } { if
resolved_miss_map=$(
echo "$miss_map" | while read chart version commit; do
if [ "$commit" = HEAD ]; then
line=$(awk '/^version:/ {print NR; exit}' "./$chart/Chart.yaml")
change_commit=$(git --no-pager blame -L"$line",+1 -- "$chart/Chart.yaml" | awk '{print $1}')
if [ "$change_commit" = "00000000" ]; then
# Not commited yet, use previus commit
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
if [ $(echo $commit | cut -c1) = "^" ]; then
# Previus commit not exists
commit=$(echo $commit | cut -c2-)
fi
else
# Commited, but version_map wasn't updated
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
change_commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
if [ $(echo $change_commit | cut -c1) = "^" ]; then
# Previus commit not exists
commit=$(echo $change_commit | cut -c2-)
else
commit=$(git describe --always "$change_commit~1")
fi
fi
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
change_commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
commit=$(git describe --always "$change_commit~1")
fi
echo "$chart $version $commit"
done

25
hack/prepare_release.sh Executable file
View File

@@ -0,0 +1,25 @@
#!/bin/sh
set -e
if [ -e $1 ]; then
echo "Please pass version in the first argument"
echo "Example: $0 0.2.0"
exit 1
fi
version=$1
talos_version=$(awk '/^version:/ {print $2}' packages/core/installer/images/talos/profiles/installer.yaml)
set -x
sed -i "/^TAG / s|=.*|= v${version}|" \
packages/apps/http-cache/Makefile \
packages/apps/kubernetes/Makefile \
packages/core/installer/Makefile \
packages/system/dashboard/Makefile
sed -i "/^VERSION / s|=.*|= ${version}|" \
packages/core/Makefile \
packages/system/Makefile
make -C packages/core fix-chartnames
make -C packages/system fix-chartnames

View File

@@ -15,6 +15,13 @@ metadata:
namespace: cozy-system
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cozystack
namespace: cozy-system
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@@ -54,11 +61,6 @@ spec:
selector:
matchLabels:
app: cozystack
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
@@ -68,7 +70,7 @@ spec:
serviceAccountName: cozystack
containers:
- name: cozystack
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.9.0"
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.2.0"
env:
- name: KUBERNETES_SERVICE_HOST
value: localhost
@@ -87,7 +89,7 @@ spec:
fieldRef:
fieldPath: metadata.name
- name: darkhttpd
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.9.0"
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.2.0"
command:
- /usr/bin/darkhttpd
- /cozystack/assets

View File

@@ -7,11 +7,11 @@ repo:
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
cd "$(OUT)" && helm repo index . --url http://cozystack.cozy-system.svc/repos/apps
cd "$(OUT)" && helm repo index .
rm -rf "$(TMP)"
fix-chartnames:
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
gen-versions-map: fix-chartnames
../../hack/gen_versions_map.sh

View File

@@ -1,3 +0,0 @@
.helmignore
/logos
/Makefile

View File

@@ -1,25 +0,0 @@
apiVersion: v2
name: clickhouse
description: Managed ClickHouse service
icon: /logos/clickhouse.svg
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "24.3.0"

View File

@@ -1,2 +0,0 @@
generate:
readme-generator -v values.yaml -s values.schema.json -r README.md

View File

@@ -1,17 +0,0 @@
# Managed Clickhouse Service
## Parameters
### Common parameters
| Name | Description | Value |
| ---------- | ----------------------------- | ------ |
| `size` | Persistent Volume size | `10Gi` |
| `shards` | Number of Clickhouse replicas | `1` |
| `replicas` | Number of Clickhouse shards | `2` |
### Configuration parameters
| Name | Description | Value |
| ------- | ------------------- | ----- |
| `users` | Users configuration | `{}` |

View File

@@ -1 +0,0 @@
<svg height="2222" viewBox="0 0 9 8" width="2500" xmlns="http://www.w3.org/2000/svg"><path d="m0 7h1v1h-1z" fill="#f00"/><path d="m0 0h1v7h-1zm2 0h1v8h-1zm2 0h1v8h-1zm2 0h1v8h-1zm2 3.25h1v1.5h-1z" fill="#fc0"/></svg>

Before

Width:  |  Height:  |  Size: 216 B

View File

@@ -1,37 +0,0 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "{{ .Release.Name }}"
spec:
{{- with .Values.size }}
defaults:
templates:
dataVolumeClaimTemplate: data-volume-template
{{- end }}
configuration:
{{- with .Values.users }}
users:
{{- range $name, $u := . }}
{{ $name }}/password_sha256_hex: {{ sha256sum $u.password }}
{{ $name }}/profile: {{ ternary "readonly" "default" (index $u "readonly" | default false) }}
{{ $name }}/networks/ip: ["::/0"]
{{- end }}
{{- end }}
profiles:
readonly/readonly: "1"
clusters:
- name: "clickhouse"
layout:
shardsCount: {{ .Values.shards }}
replicasCount: {{ .Values.replicas }}
{{- with .Values.size }}
templates:
volumeClaimTemplates:
- name: data-volume-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ . }}
{{- end }}

View File

@@ -1,21 +0,0 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"size": {
"type": "string",
"description": "Persistent Volume size",
"default": "10Gi"
},
"shards": {
"type": "number",
"description": "Number of Clickhouse replicas",
"default": 1
},
"replicas": {
"type": "number",
"description": "Number of Clickhouse shards",
"default": 2
}
}
}

View File

@@ -1,22 +0,0 @@
## @section Common parameters
## @param size Persistent Volume size
## @param shards Number of Clickhouse replicas
## @param replicas Number of Clickhouse shards
##
size: 10Gi
shards: 1
replicas: 2
## @section Configuration parameters
## @param users [object] Users configuration
## Example:
## users:
## user1:
## password: strongpassword
## user2:
## readonly: true
## password: hackme
##
users: {}

View File

@@ -1,3 +0,0 @@
.helmignore
/logos
/Makefile

View File

@@ -1,25 +0,0 @@
apiVersion: v2
name: ferretdb
description: Managed FerretDB service
icon: /logos/ferretdb.svg
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.22.0"

View File

@@ -1,2 +0,0 @@
generate:
readme-generator -v values.yaml -s values.schema.json -r README.md

View File

@@ -1,34 +0,0 @@
# Managed FerretDB Service
## Parameters
### Common parameters
| Name | Description | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `replicas` | Number of Postgres replicas | `2` |
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed. | `0` |
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances). | `0` |
### Configuration parameters
| Name | Description | Value |
| ------- | ------------------- | ----- |
| `users` | Users configuration | `{}` |
### Backup parameters
| Name | Description | Value |
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable pereiodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |

View File

@@ -1,54 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="200mm"
height="195.323mm"
viewBox="0 0 200 195.323"
version="1.1"
id="svg948"
inkscape:version="1.1.1 (c3084ef, 2021-09-22)"
sodipodi:docname="ferretdb.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview950"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:document-units="mm"
showgrid="false"
inkscape:zoom="0.64052329"
inkscape:cx="-69.474445"
inkscape:cy="579.99452"
inkscape:window-width="3440"
inkscape:window-height="1387"
inkscape:window-x="0"
inkscape:window-y="25"
inkscape:window-maximized="1"
inkscape:current-layer="layer1" />
<defs
id="defs945" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<path
d="M 95.871302,0.25836635 C 73.52529,3.312081 51.107429,17.502874 38.138123,36.831094 c -2.083712,3.125567 -5.676318,9.628178 -5.676318,10.274847 0,0.0719 1.724451,-0.970003 3.808162,-2.335187 25.651206,-16.921175 56.260205,-20.046742 81.156963,-8.298921 5.42484,2.550751 8.83781,5.029648 13.68783,9.879665 8.15521,8.191137 14.11894,19.148592 18.25044,33.554942 2.15556,7.400765 3.95187,17.495992 4.4189,24.35786 0.10778,1.86816 0.39518,3.52075 0.57482,3.62853 1.00593,0.61075 5.53261,-5.96372 8.73003,-12.645965 5.06558,-10.634111 7.43669,-21.0886 7.40077,-32.692714 -0.036,-16.418213 -5.71224,-30.213814 -17.13674,-41.710153 C 143.22184,10.640997 130.43216,3.6354156 117.03174,0.90503536 113.90617,0.29429263 111.6069,0.11466224 105.75097,0.00688441 101.69132,-0.02904391 97.272414,0.07873086 95.871302,0.25836635 Z"
id="path824"
style="fill:#216778;stroke-width:0.0359261" />
<path
d="m 48.377049,48.219658 c -2.335194,1.149625 -6.251134,4.742233 -9.700036,8.873735 -1.54482,1.832222 -3.880014,4.095564 -5.604464,5.388902 -4.02372,3.017795 -10.885597,9.735963 -14.370424,14.083015 -18.1785821,22.525641 -23.2441594,48.21277 -14.585984,74.00768 7.113359,21.12453 23.567499,35.13569 48.859444,41.4946 9.843739,2.51482 24.60935,3.91593 30.788632,2.94593 l 1.580747,-0.25148 -2.442972,-1.43704 C 69.42972,185.49312 60.017093,172.27233 57.39449,157.57857 c -0.790373,-4.45483 -0.826299,-12.35856 -0.03593,-16.70562 1.760377,-9.77189 6.682247,-18.7534 13.364494,-24.35786 3.125567,-2.6226 8.586328,-5.31706 12.933381,-6.35891 6.538543,-1.58075 10.526335,-3.37705 14.657827,-6.64633 2.658538,-2.0837 4.993728,-5.2452 6.933738,-9.340763 1.65259,-3.484834 5.17335,-14.550063 5.17335,-16.310439 0,-1.221482 -1.25742,-2.874082 -3.05372,-3.987789 -0.93408,-0.574812 -2.40705,-0.898147 -6.17927,-1.293338 C 84.949773,70.888992 76.866409,67.943063 67.094521,60.218953 65.693406,59.105246 64.00488,57.847837 63.322285,57.416727 62.639691,57.021536 61.2745,55.512639 60.340423,54.111526 c -2.838159,-4.131492 -6.358912,-6.790025 -9.053367,-6.825953 -0.574817,0 -1.904081,0.431119 -2.910011,0.934085 z m 17.639695,16.633763 c 1.221486,0.610741 2.55075,1.401113 2.981863,1.724447 l 0.790373,0.646669 -1.257411,5.029649 c -1.077783,4.38298 -1.257413,5.496687 -1.149634,8.622257 0.107777,3.089642 0.215555,3.77223 0.934077,4.778161 1.18556,1.616673 3.233345,2.586676 5.532613,2.586676 3.269271,0 5.820021,-1.86815 10.059296,-7.436693 1.221486,-1.580744 2.19149,-2.442973 3.628532,-3.125571 2.227415,-1.113706 3.808162,-1.221481 8.765958,-0.790372 l 3.305202,0.323335 v 1.940007 c 0,3.053724 1.616677,4.814099 4.921857,5.317065 l 1.58075,0.21555 -0.57481,1.329266 c -2.51483,6.071499 -8.981521,12.93338 -15.05302,15.987093 -0.970004,0.46703 -3.161494,1.32926 -4.850018,1.90408 -2.766306,0.89815 -3.520754,1.00593 -8.262994,1.00593 -4.706313,0 -5.496687,-0.10778 -8.083363,-0.97001 -7.795954,-2.58667 -13.58005,-8.334832 -16.202652,-16.058942 -0.934077,-2.73038 -0.970004,-10.670039 -0.03593,-13.975231 1.257413,-4.562611 3.484828,-8.33485 5.820023,-9.80782 1.508893,-0.970003 4.311126,-0.646669 7.149285,0.754454 z"
id="path826"
style="fill:#216778;stroke-width:0.0359261" />
<path
d="m 181.55494,78.397542 c 0,1.616673 -1.7963,9.089295 -3.30519,13.759681 -5.67632,17.495987 -15.95117,33.195677 -29.35159,44.656087 -9.41263,8.08336 -16.09488,11.64004 -26.69306,14.26265 -6.82596,1.68852 -11.28078,2.22741 -19.93897,2.44297 -10.813737,0.2874 -21.483776,-0.6826 -31.040108,-2.76631 -1.832229,-0.39519 -3.377049,-0.64667 -3.484828,-0.53889 -0.431112,0.39519 1.221487,5.89187 2.658529,8.80189 2.622602,5.38891 5.604466,9.41262 10.921522,14.72968 5.604465,5.60446 9.771888,8.6941 16.238576,12.03522 16.023019,8.263 34.417169,9.37671 53.278339,3.1615 19.90304,-6.50262 34.52495,-18.25043 42.39275,-34.05791 5.24521,-10.4904 7.40077,-21.69934 6.6104,-34.489 -0.97001,-15.77155 -6.79003,-31.219754 -15.23265,-40.344967 -1.32926,-1.437041 -2.55075,-2.586676 -2.73038,-2.586676 -0.17963,0 -0.32334,0.431109 -0.32334,0.934075 z"
id="path828"
style="fill:#216778;stroke-width:0.0359261" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 5.2 KiB

View File

@@ -1,54 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="200mm"
height="195.323mm"
viewBox="0 0 200 195.323"
version="1.1"
id="svg948"
inkscape:version="1.1.1 (c3084ef, 2021-09-22)"
sodipodi:docname="ferretdb.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview950"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:document-units="mm"
showgrid="false"
inkscape:zoom="0.64052329"
inkscape:cx="-69.474445"
inkscape:cy="579.99452"
inkscape:window-width="3440"
inkscape:window-height="1387"
inkscape:window-x="0"
inkscape:window-y="25"
inkscape:window-maximized="1"
inkscape:current-layer="layer1" />
<defs
id="defs945" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<path
d="M 95.871302,0.25836635 C 73.52529,3.312081 51.107429,17.502874 38.138123,36.831094 c -2.083712,3.125567 -5.676318,9.628178 -5.676318,10.274847 0,0.0719 1.724451,-0.970003 3.808162,-2.335187 25.651206,-16.921175 56.260205,-20.046742 81.156963,-8.298921 5.42484,2.550751 8.83781,5.029648 13.68783,9.879665 8.15521,8.191137 14.11894,19.148592 18.25044,33.554942 2.15556,7.400765 3.95187,17.495992 4.4189,24.35786 0.10778,1.86816 0.39518,3.52075 0.57482,3.62853 1.00593,0.61075 5.53261,-5.96372 8.73003,-12.645965 5.06558,-10.634111 7.43669,-21.0886 7.40077,-32.692714 -0.036,-16.418213 -5.71224,-30.213814 -17.13674,-41.710153 C 143.22184,10.640997 130.43216,3.6354156 117.03174,0.90503536 113.90617,0.29429263 111.6069,0.11466224 105.75097,0.00688441 101.69132,-0.02904391 97.272414,0.07873086 95.871302,0.25836635 Z"
id="path824"
style="fill:#216778;stroke-width:0.0359261" />
<path
d="m 48.377049,48.219658 c -2.335194,1.149625 -6.251134,4.742233 -9.700036,8.873735 -1.54482,1.832222 -3.880014,4.095564 -5.604464,5.388902 -4.02372,3.017795 -10.885597,9.735963 -14.370424,14.083015 -18.1785821,22.525641 -23.2441594,48.21277 -14.585984,74.00768 7.113359,21.12453 23.567499,35.13569 48.859444,41.4946 9.843739,2.51482 24.60935,3.91593 30.788632,2.94593 l 1.580747,-0.25148 -2.442972,-1.43704 C 69.42972,185.49312 60.017093,172.27233 57.39449,157.57857 c -0.790373,-4.45483 -0.826299,-12.35856 -0.03593,-16.70562 1.760377,-9.77189 6.682247,-18.7534 13.364494,-24.35786 3.125567,-2.6226 8.586328,-5.31706 12.933381,-6.35891 6.538543,-1.58075 10.526335,-3.37705 14.657827,-6.64633 2.658538,-2.0837 4.993728,-5.2452 6.933738,-9.340763 1.65259,-3.484834 5.17335,-14.550063 5.17335,-16.310439 0,-1.221482 -1.25742,-2.874082 -3.05372,-3.987789 -0.93408,-0.574812 -2.40705,-0.898147 -6.17927,-1.293338 C 84.949773,70.888992 76.866409,67.943063 67.094521,60.218953 65.693406,59.105246 64.00488,57.847837 63.322285,57.416727 62.639691,57.021536 61.2745,55.512639 60.340423,54.111526 c -2.838159,-4.131492 -6.358912,-6.790025 -9.053367,-6.825953 -0.574817,0 -1.904081,0.431119 -2.910011,0.934085 z m 17.639695,16.633763 c 1.221486,0.610741 2.55075,1.401113 2.981863,1.724447 l 0.790373,0.646669 -1.257411,5.029649 c -1.077783,4.38298 -1.257413,5.496687 -1.149634,8.622257 0.107777,3.089642 0.215555,3.77223 0.934077,4.778161 1.18556,1.616673 3.233345,2.586676 5.532613,2.586676 3.269271,0 5.820021,-1.86815 10.059296,-7.436693 1.221486,-1.580744 2.19149,-2.442973 3.628532,-3.125571 2.227415,-1.113706 3.808162,-1.221481 8.765958,-0.790372 l 3.305202,0.323335 v 1.940007 c 0,3.053724 1.616677,4.814099 4.921857,5.317065 l 1.58075,0.21555 -0.57481,1.329266 c -2.51483,6.071499 -8.981521,12.93338 -15.05302,15.987093 -0.970004,0.46703 -3.161494,1.32926 -4.850018,1.90408 -2.766306,0.89815 -3.520754,1.00593 -8.262994,1.00593 -4.706313,0 -5.496687,-0.10778 -8.083363,-0.97001 -7.795954,-2.58667 -13.58005,-8.334832 -16.202652,-16.058942 -0.934077,-2.73038 -0.970004,-10.670039 -0.03593,-13.975231 1.257413,-4.562611 3.484828,-8.33485 5.820023,-9.80782 1.508893,-0.970003 4.311126,-0.646669 7.149285,0.754454 z"
id="path826"
style="fill:#216778;stroke-width:0.0359261" />
<path
d="m 181.55494,78.397542 c 0,1.616673 -1.7963,9.089295 -3.30519,13.759681 -5.67632,17.495987 -15.95117,33.195677 -29.35159,44.656087 -9.41263,8.08336 -16.09488,11.64004 -26.69306,14.26265 -6.82596,1.68852 -11.28078,2.22741 -19.93897,2.44297 -10.813737,0.2874 -21.483776,-0.6826 -31.040108,-2.76631 -1.832229,-0.39519 -3.377049,-0.64667 -3.484828,-0.53889 -0.431112,0.39519 1.221487,5.89187 2.658529,8.80189 2.622602,5.38891 5.604466,9.41262 10.921522,14.72968 5.604465,5.60446 9.771888,8.6941 16.238576,12.03522 16.023019,8.263 34.417169,9.37671 53.278339,3.1615 19.90304,-6.50262 34.52495,-18.25043 42.39275,-34.05791 5.24521,-10.4904 7.40077,-21.69934 6.6104,-34.489 -0.97001,-15.77155 -6.79003,-31.219754 -15.23265,-40.344967 -1.32926,-1.437041 -2.55075,-2.586676 -2.73038,-2.586676 -0.17963,0 -0.32334,0.431109 -0.32334,0.934075 z"
id="path828"
style="fill:#216778;stroke-width:0.0359261" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 5.2 KiB

View File

@@ -1,99 +0,0 @@
{{- if .Values.backup.enabled }}
{{ $image := .Files.Get "images/backup.json" | fromJson }}
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ .Release.Name }}-backup
spec:
schedule: "{{ .Values.backup.schedule }}"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 2
template:
spec:
restartPolicy: OnFailure
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/backup-script.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/backup-secret.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: mysqldump
image: "{{ index $image "image.name" }}@{{ index $image "containerimage.digest" }}"
command:
- /bin/sh
- /scripts/backup.sh
env:
- name: REPO_PREFIX
value: {{ required "s3Bucket is not specified!" .Values.backup.s3Bucket | quote }}
- name: CLEANUP_STRATEGY
value: {{ required "cleanupStrategy is not specified!" .Values.backup.cleanupStrategy | quote }}
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-postgres-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3AccessKey
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3SecretKey
- name: AWS_DEFAULT_REGION
value: {{ .Values.backup.s3Region }}
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: resticPassword
volumeMounts:
- mountPath: /scripts
name: scripts
- mountPath: /tmp
name: tmp
- mountPath: /.cache
name: cache
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumes:
- name: scripts
secret:
secretName: {{ .Release.Name }}-backup-script
- name: tmp
emptyDir: {}
- name: cache
emptyDir: {}
securityContext:
runAsNonRoot: true
runAsUser: 9000
runAsGroup: 9000
seccompProfile:
type: RuntimeDefault
{{- end }}

View File

@@ -1,50 +0,0 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup-script
stringData:
backup.sh: |
#!/bin/sh
set -e
set -o pipefail
JOB_ID="job-$(uuidgen|cut -f1 -d-)"
DB_LIST=$(psql -Atq -c 'SELECT datname FROM pg_catalog.pg_database;' | grep -v '^\(postgres\|app\|template.*\)$')
echo DB_LIST=$(echo "$DB_LIST" | shuf) # shuffle list
echo "Job ID: $JOB_ID"
echo "Target repo: $REPO_PREFIX"
echo "Cleanup strategy: $CLEANUP_STRATEGY"
echo "Start backup for:"
echo "$DB_LIST"
echo
echo "Backup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic -r "s3:${REPO_PREFIX}/$db" cat config >/dev/null 2>&1 || \
restic -r "s3:${REPO_PREFIX}/$db" init --repository-version 2
restic -r "s3:${REPO_PREFIX}/$db" unlock --remove-all >/dev/null 2>&1 || true # no locks, k8s takes care of it
pg_dump -Z0 -Ft -d "$db" | \
restic -r "s3:${REPO_PREFIX}/$db" backup --tag "$JOB_ID" --stdin --stdin-filename dump.tar
restic -r "s3:${REPO_PREFIX}/$db" tag --tag "$JOB_ID" --set "completed"
)
done
echo "Backup finished at `date +%Y-%m-%d\ %H:%M:%S`"
echo
echo "Run cleanup:"
echo
echo "Cleanup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags --keep-tag "completed" # keep completed snapshots only
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags $CLEANUP_STRATEGY
restic prune -r "s3:${REPO_PREFIX}/$db"
)
done
echo "Cleanup finished at `date +%Y-%m-%d\ %H:%M:%S`"
{{- end }}

View File

@@ -1,11 +0,0 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup
stringData:
s3AccessKey: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey }}
s3SecretKey: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey }}
resticPassword: {{ required "resticPassword is not specified!" .Values.backup.resticPassword }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}
externalTrafficPolicy: Local
allocateLoadBalancerNodePorts: false
{{- end }}
ports:
- name: ferretdb
port: 27017
selector:
app: {{ .Release.Name }}

View File

@@ -1,26 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ .Release.Name }}
spec:
containers:
- name: ferretdb
image: ghcr.io/ferretdb/ferretdb:1.22.0
ports:
- containerPort: 27017
env:
- name: FERRETDB_POSTGRESQL_URL
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-app
key: uri

View File

@@ -1,66 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-init-job
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
template:
metadata:
name: {{ .Release.Name }}-init-job
annotations:
checksum/config: {{ include (print $.Template.BasePath "/init-script.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: postgres
image: ghcr.io/cloudnative-pg/postgresql:15.3
command:
- bash
- /scripts/init.sh
env:
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-postgres-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /etc/secret
name: secret
- mountPath: /scripts
name: scripts
securityContext:
fsGroup: 26
runAsGroup: 26
runAsNonRoot: true
runAsUser: 26
seccompProfile:
type: RuntimeDefault
volumes:
- name: secret
secret:
secretName: {{ .Release.Name }}-postgres-superuser
- name: scripts
secret:
secretName: {{ .Release.Name }}-init-script

View File

@@ -1,101 +0,0 @@
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-init-script
stringData:
init.sh: |
#!/bin/bash
set -e
echo "== create users"
{{- if .Values.users }}
psql -v ON_ERROR_STOP=1 <<\EOT
{{- range $user, $u := .Values.users }}
SELECT 'CREATE ROLE {{ $user }} LOGIN INHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $user }}')\gexec
ALTER ROLE {{ $user }} WITH PASSWORD '{{ $u.password }}' LOGIN INHERIT {{ ternary "REPLICATION" "NOREPLICATION" (default false $u.replication) }};
COMMENT ON ROLE {{ $user }} IS 'user managed by helm';
{{- end }}
EOT
{{- end }}
echo "== delete users"
MANAGED_USERS=$(echo '\du+' | psql | awk -F'|' '$4 == " user managed by helm" {print $1}' | awk NF=NF RS= OFS=' ')
DEFINED_USERS="{{ join " " (keys .Values.users) }}"
DELETE_USERS=$(for user in $MANAGED_USERS; do case " $DEFINED_USERS " in *" $user "*) :;; *) echo $user;; esac; done)
echo "users to delete: $DELETE_USERS"
for user in $DELETE_USERS; do
# https://stackoverflow.com/a/51257346/2931267
psql -v ON_ERROR_STOP=1 --echo-all <<EOT
REASSIGN OWNED BY $user TO postgres;
DROP OWNED BY $user;
DROP USER $user;
EOT
done
echo "== create roles"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
SELECT 'CREATE ROLE app_admin NOINHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'app_admin')\gexec
COMMENT ON ROLE app_admin IS 'role managed by helm';
EOT
echo "== grant privileges on databases to roles"
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
ALTER DATABASE app OWNER TO app_admin;
DO $$
DECLARE
schema_record record;
BEGIN
-- Loop over all schemas
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
-- Changing Schema Ownership
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, 'app_admin');
-- Add rights for the admin role
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL SEQUENCES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL FUNCTIONS IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', schema_record.schema_name, 'app_admin');
END LOOP;
END$$;
EOT
echo "== setup event trigger for schema creation"
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
CREATE OR REPLACE FUNCTION auto_grant_schema_privileges()
RETURNS event_trigger LANGUAGE plpgsql AS $$
DECLARE
obj record;
BEGIN
FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE command_tag = 'CREATE SCHEMA' LOOP
-- Set owner for schema
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', obj.object_identity, 'app_admin');
-- Set privileges for admin role
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', obj.object_identity, 'app_admin');
END LOOP;
END;
$$;
DROP EVENT TRIGGER IF EXISTS trigger_auto_grant;
CREATE EVENT TRIGGER trigger_auto_grant ON ddl_command_end
WHEN TAG IN ('CREATE SCHEMA')
EXECUTE PROCEDURE auto_grant_schema_privileges();
EOT
echo "== assign roles to users"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
GRANT app_admin TO app;
{{- range $user, $u := $.Values.users }}
GRANT app_admin TO {{ $user }};
{{- end }}
EOT

View File

@@ -1,45 +0,0 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: {{ .Release.Name }}-postgres
spec:
instances: {{ .Values.replicas }}
enableSuperuserAccess: true
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
monitoring:
enablePodMonitor: true
storage:
size: {{ required ".Values.size is required" .Values.size }}
{{- if .Values.users }}
managed:
roles:
{{- range $user, $config := .Values.users }}
- name: {{ $user }}
ensure: present
passwordSecret:
name: {{ printf "%s-user-%s" $.Release.Name $user }}
login: true
inRoles:
- app
{{- end }}
{{- end }}
{{- range $user, $config := .Values.users }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-user-%s" $.Release.Name $user }}
labels:
cnpg.io/reload: "true"
type: kubernetes.io/basic-auth
data:
username: {{ $user | b64enc }}
password: {{ $config.password | b64enc }}
{{- end }}

View File

@@ -1,81 +0,0 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"description": "Enable external access from outside the cluster",
"default": false
},
"size": {
"type": "string",
"description": "Persistent Volume size",
"default": "10Gi"
},
"replicas": {
"type": "number",
"description": "Number of Postgres replicas",
"default": 2
},
"quorum": {
"type": "object",
"properties": {
"minSyncReplicas": {
"type": "number",
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.",
"default": 0
},
"maxSyncReplicas": {
"type": "number",
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).",
"default": 0
}
}
},
"backup": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable pereiodic backups",
"default": false
},
"s3Region": {
"type": "string",
"description": "The AWS S3 region where backups are stored",
"default": "us-east-1"
},
"s3Bucket": {
"type": "string",
"description": "The S3 bucket used for storing backups",
"default": "s3.example.org/postgres-backups"
},
"schedule": {
"type": "string",
"description": "Cron schedule for automated backups",
"default": "0 2 * * *"
},
"cleanupStrategy": {
"type": "string",
"description": "The strategy for cleaning up old backups",
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
},
"s3AccessKey": {
"type": "string",
"description": "The access key for S3, used for authentication",
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
},
"s3SecretKey": {
"type": "string",
"description": "The secret key for S3, used for authentication",
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
},
"resticPassword": {
"type": "string",
"description": "The password for Restic backup encryption",
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
}
}
}
}
}

View File

@@ -1,48 +0,0 @@
## @section Common parameters
## @param external Enable external access from outside the cluster
## @param size Persistent Volume size
## @param replicas Number of Postgres replicas
##
external: false
size: 10Gi
replicas: 2
## Configuration for the quorum-based synchronous replication
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).
quorum:
minSyncReplicas: 0
maxSyncReplicas: 0
## @section Configuration parameters
## @param users [object] Users configuration
## Example:
## users:
## user1:
## password: strongpassword
## user2:
## password: hackme
##
users: {}
## @section Backup parameters
## @param backup.enabled Enable pereiodic backups
## @param backup.s3Region The AWS S3 region where backups are stored
## @param backup.s3Bucket The S3 bucket used for storing backups
## @param backup.schedule Cron schedule for automated backups
## @param backup.cleanupStrategy The strategy for cleaning up old backups
## @param backup.s3AccessKey The access key for S3, used for authentication
## @param backup.s3SecretKey The secret key for S3, used for authentication
## @param backup.resticPassword The password for Restic backup encryption
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0

View File

@@ -1,56 +0,0 @@
## @section Common parameters
## @param external Enable external access from outside the cluster
## @param size Persistent Volume size
## @param replicas Number of Postgres replicas
##
external: false
size: 10Gi
replicas: 1
## Configuration for the quorum-based synchronous replication
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).
quorum:
minSyncReplicas: 0
maxSyncReplicas: 0
## @section Configuration parameters
## @param users [object] Users configuration
## Example:
## users:
## user1:
## password: strongpassword
## user2:
## password: hackme
##
users:
foo:
password: asd
bar:
password: asd
baz:
password: asd
boo:
password: asd
## @section Backup parameters
## @param backup.enabled Enable pereiodic backups
## @param backup.s3Region The AWS S3 region where backups are stored
## @param backup.s3Bucket The S3 bucket used for storing backups
## @param backup.schedule Cron schedule for automated backups
## @param backup.cleanupStrategy The strategy for cleaning up old backups
## @param backup.s3AccessKey The access key for S3, used for authentication
## @param backup.s3SecretKey The secret key for S3, used for authentication
## @param backup.resticPassword The password for Restic backup encryption
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0

View File

@@ -1,3 +1,23 @@
.helmignore
/logos
/Makefile
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,7 +1,7 @@
apiVersion: v2
name: http-cache
description: Layer7 load balacner and caching service
icon: /logos/nginx.svg
icon: https://www.svgrepo.com/show/373924/nginx.svg
# A chart can be either an 'application' or a 'library' chart.
#
@@ -16,10 +16,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.0
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.25.3"
appVersion: "1.16.0"

View File

@@ -1,23 +1,22 @@
PUSH := 1
LOAD := 0
REGISTRY := ghcr.io/aenix-io/cozystack
NGINX_CACHE_TAG = v0.1.0
include ../../../scripts/common-envs.mk
TAG := v0.2.0
image: image-nginx
image-nginx:
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/nginx-cache \
--provenance false \
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)) \
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:latest \
--tag $(REGISTRY)/nginx-cache:$(NGINX_CACHE_TAG) \
--tag $(REGISTRY)/nginx-cache:$(NGINX_CACHE_TAG)-$(TAG) \
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:$(NGINX_CACHE_TAG) \
--cache-to type=inline \
--metadata-file images/nginx-cache.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))" > images/nginx-cache.tag
generate:
readme-generator -v values.yaml -s values.schema.json -r README.md
echo "$(REGISTRY)/nginx-cache:$(NGINX_CACHE_TAG)" > images/nginx-cache.tag
update:
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/chrislim2888/IP2Location-C-Library | awk -F'[/^]' 'END{print $$3}') && \

View File

@@ -55,20 +55,3 @@ The deployment architecture is illustrated in the diagram below:
VTS module shows wrong upstream resonse time
- https://github.com/vozlt/nginx-module-vts/issues/198
## Parameters
### Common parameters
| Name | Description | Value |
| ------------------ | ----------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
| `nginx.replicas` | Number of Nginx replicas | `2` |
### Configuration parameters
| Name | Description | Value |
| ----------- | ----------------------- | ----- |
| `endpoints` | Endpoints configuration | `[]` |

View File

@@ -1,38 +1,4 @@
{
"buildx.build.provenance": {
"buildType": "https://mobyproject.org/buildkit@v1",
"materials": [
{
"uri": "pkg:docker/ubuntu@22.04?platform=linux%2Famd64",
"digest": {
"sha256": "340d9b015b194dc6e2a13938944e0d016e57b9679963fdeb9ce021daac430221"
}
}
],
"invocation": {
"configSource": {
"entryPoint": "Dockerfile"
},
"parameters": {
"frontend": "dockerfile.v0",
"args": {
"build-arg:ARCH": "amd64"
},
"locals": [
{
"name": "context"
},
{
"name": "dockerfile"
}
]
},
"environment": {
"platform": "linux/amd64"
}
}
},
"buildx.build.ref": "amd64/amd64/gaibgudlqaxqxufa236q5ffdk",
"containerimage.config.digest": "sha256:677b0b84d7a11a31971857863a6a83b5bb863583eca86a2c2b1b89c61659e549",
"containerimage.digest": "sha256:7f864e2c9c86b77e08953258521117503309f84783ea11c617db8c2534f8b545"
"containerimage.config.digest": "sha256:0487fc50bb5f870720b05e947185424a400fad38b682af8f1ca4b418ed3c5b4b",
"containerimage.digest": "sha256:be12f3834be0e2f129685f682fab83c871610985fc43668ce6a294c9de603798"
}

View File

@@ -1 +1 @@
mgr.cp.if.ua/nginx-cache:v0.1.0
ghcr.io/aenix-io/cozystack/nginx-cache:v0.1.0

View File

@@ -1,2 +0,0 @@
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 32 32" xmlns="http://www.w3.org/2000/svg"><title>file_type_nginx</title><path d="M15.948,2h.065a10.418,10.418,0,0,1,.972.528Q22.414,5.65,27.843,8.774a.792.792,0,0,1,.414.788c-.008,4.389,0,8.777-.005,13.164a.813.813,0,0,1-.356.507q-5.773,3.324-11.547,6.644a.587.587,0,0,1-.657.037Q9.912,26.6,4.143,23.274a.7.7,0,0,1-.4-.666q0-6.582,0-13.163a.693.693,0,0,1,.387-.67Q9.552,5.657,14.974,2.535c.322-.184.638-.379.974-.535" style="fill:#019639"/><path d="M8.767,10.538q0,5.429,0,10.859a1.509,1.509,0,0,0,.427,1.087,1.647,1.647,0,0,0,2.06.206,1.564,1.564,0,0,0,.685-1.293c0-2.62-.005-5.24,0-7.86q3.583,4.29,7.181,8.568a2.833,2.833,0,0,0,2.6.782,1.561,1.561,0,0,0,1.251-1.371q.008-5.541,0-11.081a1.582,1.582,0,0,0-3.152,0c0,2.662-.016,5.321,0,7.982-2.346-2.766-4.663-5.556-7-8.332A2.817,2.817,0,0,0,10.17,9.033,1.579,1.579,0,0,0,8.767,10.538Z" style="fill:#fff"/></svg>

Before

Width:  |  Height:  |  Size: 1.0 KiB

View File

@@ -74,7 +74,7 @@ data:
option redispatch 1
default-server observe layer7 error-limit 10 on-error mark-down
{{- range $i, $e := until (int $.Values.nginx.replicas) }}
{{- range $i, $e := until (int $.Values.replicas) }}
server cache{{ $i }} {{ $.Release.Name }}-nginx-cache-{{ $i }}:80 check
{{- end }}
{{- range $i, $e := $.Values.endpoints }}

View File

@@ -7,7 +7,7 @@ metadata:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: {{ .Values.haproxy.replicas }}
replicas: 2
selector:
matchLabels:
app: {{ .Release.Name }}-haproxy

View File

@@ -11,7 +11,7 @@ spec:
selector:
matchLabels:
app: {{ $.Release.Name }}-nginx-cache
{{- range $i := until (int $.Values.nginx.replicas) }}
{{- range $i := until 3 }}
---
apiVersion: apps/v1
kind: Deployment

View File

@@ -1,42 +0,0 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"description": "Enable external access from outside the cluster",
"default": false
},
"size": {
"type": "string",
"description": "Persistent Volume size",
"default": "10Gi"
},
"haproxy": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of HAProxy replicas",
"default": 2
}
}
},
"nginx": {
"type": "object",
"properties": {
"replicas": {
"type": "number",
"description": "Number of Nginx replicas",
"default": 2
}
}
},
"endpoints": {
"type": "array",
"description": "Endpoints configuration",
"default": [],
"items": {}
}
}
}

View File

@@ -1,28 +1,9 @@
## @section Common parameters
## @param external Enable external access from outside the cluster
## @param size Persistent Volume size
## @param haproxy.replicas Number of HAProxy replicas
## @param nginx.replicas Number of Nginx replicas
##
external: false
size: 10Gi
haproxy:
replicas: 2
nginx:
replicas: 2
## @section Configuration parameters
## @param endpoints Endpoints configuration
## Example:
## endpoints:
## - 10.100.3.1:80
## - 10.100.3.11:80
## - 10.100.3.2:80
## - 10.100.3.12:80
## - 10.100.3.3:80
## - 10.100.3.13:80
##
endpoints: []
endpoints:
- 10.100.3.1:80
- 10.100.3.11:80
- 10.100.3.2:80
- 10.100.3.12:80
- 10.100.3.3:80
- 10.100.3.13:80

View File

@@ -1,3 +0,0 @@
.helmignore
/logos
/Makefile

View File

@@ -1,25 +0,0 @@
apiVersion: v2
name: kafka
description: Managed Kafka service
icon: /logos/kafka.svg
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.2
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "3.7.0"

View File

@@ -1,2 +0,0 @@
generate:
readme-generator -v values.yaml -s values.schema.json -r README.md

View File

@@ -1,19 +0,0 @@
# Managed Kafka Service
## Parameters
### Common parameters
| Name | Description | Value |
| -------------------- | ----------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `kafka.size` | Persistent Volume size for Kafka | `10Gi` |
| `kafka.replicas` | Number of Kafka replicas | `3` |
| `zookeeper.size` | Persistent Volume size for ZooKeeper | `5Gi` |
| `zookeeper.replicas` | Number of ZooKeeper replicas | `3` |
### Configuration parameters
| Name | Description | Value |
| -------- | -------------------- | ----- |
| `topics` | Topics configuration | `[]` |

View File

@@ -1 +0,0 @@
<svg width="154" height="250" viewBox="0 0 256 416" xmlns="http://www.w3.org/2000/svg" preserveAspectRatio="xMidYMid"><path d="M201.816 230.216c-16.186 0-30.697 7.171-40.634 18.461l-25.463-18.026c2.703-7.442 4.255-15.433 4.255-23.797 0-8.219-1.498-16.076-4.112-23.408l25.406-17.835c9.936 11.233 24.409 18.365 40.548 18.365 29.875 0 54.184-24.305 54.184-54.184 0-29.879-24.309-54.184-54.184-54.184-29.875 0-54.184 24.305-54.184 54.184 0 5.348.808 10.505 2.258 15.389l-25.423 17.844c-10.62-13.175-25.911-22.374-43.333-25.182v-30.64c24.544-5.155 43.037-26.962 43.037-53.019C124.171 24.305 99.862 0 69.987 0 40.112 0 15.803 24.305 15.803 54.184c0 25.708 18.014 47.246 42.067 52.769v31.038C25.044 143.753 0 172.401 0 206.854c0 34.621 25.292 63.374 58.355 68.94v32.774c-24.299 5.341-42.552 27.011-42.552 52.894 0 29.879 24.309 54.184 54.184 54.184 29.875 0 54.184-24.305 54.184-54.184 0-25.883-18.253-47.553-42.552-52.894v-32.775a69.965 69.965 0 0 0 42.6-24.776l25.633 18.143c-1.423 4.84-2.22 9.946-2.22 15.24 0 29.879 24.309 54.184 54.184 54.184 29.875 0 54.184-24.305 54.184-54.184 0-29.879-24.309-54.184-54.184-54.184zm0-126.695c14.487 0 26.27 11.788 26.27 26.271s-11.783 26.27-26.27 26.27-26.27-11.787-26.27-26.27c0-14.483 11.783-26.271 26.27-26.271zm-158.1-49.337c0-14.483 11.784-26.27 26.271-26.27s26.27 11.787 26.27 26.27c0 14.483-11.783 26.27-26.27 26.27s-26.271-11.787-26.271-26.27zm52.541 307.278c0 14.483-11.783 26.27-26.27 26.27s-26.271-11.787-26.271-26.27c0-14.483 11.784-26.27 26.271-26.27s26.27 11.787 26.27 26.27zm-26.272-117.97c-20.205 0-36.642-16.434-36.642-36.638 0-20.205 16.437-36.642 36.642-36.642 20.204 0 36.641 16.437 36.641 36.642 0 20.204-16.437 36.638-36.641 36.638zm131.831 67.179c-14.487 0-26.27-11.788-26.27-26.271s11.783-26.27 26.27-26.27 26.27 11.787 26.27 26.27c0 14.483-11.783 26.271-26.27 26.271z" style="fill:#231f20"/></svg>

Before

Width:  |  Height:  |  Size: 1.8 KiB

View File

@@ -1,67 +0,0 @@
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: {{ .Release.Name }}
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
kafka:
replicas: {{ .Values.kafka.replicas }}
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
- name: external
port: 9094
{{- if .Values.external }}
type: loadbalancer
{{- else }}
type: internal
{{- end }}
tls: false
config:
{{- if eq (int .Values.kafka.replicas) 1 }}
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
default.replication.factor: 1
min.insync.replicas: 1
{{- else if eq (int .Values.kafka.replicas) 2 }}
offsets.topic.replication.factor: 2
transaction.state.log.replication.factor: 2
transaction.state.log.min.isr: 2
default.replication.factor: 2
min.insync.replicas: 2
{{- else }}
offsets.topic.replication.factor: 3
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
default.replication.factor: 3
min.insync.replicas: 2
{{- end }}
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
{{- with .Values.kafka.size }}
size: {{ . }}
{{- end }}
deleteClaim: true
zookeeper:
replicas: {{ .Values.zookeeper.replicas }}
storage:
type: persistent-claim
{{- with .Values.zookeeper.size }}
size: {{ . }}
{{- end }}
deleteClaim: false
entityOperator:
topicOperator: {}
userOperator: {}

View File

@@ -1,21 +0,0 @@
{{- range $topic := .Values.topics }}
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: "{{ $.Release.Name }}-{{ kebabcase $topic.name }}"
labels:
strimzi.io/cluster: "{{ $.Release.Name }}"
spec:
topicName: "{{ $topic.name }}"
{{- with $topic.partitions }}
partitions: {{ . }}
{{- end }}
{{- with $topic.replicas }}
replicas: {{ . }}
{{- end }}
{{- with $topic.config }}
config:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -1,47 +0,0 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"description": "Enable external access from outside the cluster",
"default": false
},
"kafka": {
"type": "object",
"properties": {
"size": {
"type": "string",
"description": "Persistent Volume size for Kafka",
"default": "10Gi"
},
"replicas": {
"type": "number",
"description": "Number of Kafka replicas",
"default": 3
}
}
},
"zookeeper": {
"type": "object",
"properties": {
"size": {
"type": "string",
"description": "Persistent Volume size for ZooKeeper",
"default": "5Gi"
},
"replicas": {
"type": "number",
"description": "Number of ZooKeeper replicas",
"default": 3
}
}
},
"topics": {
"type": "array",
"description": "Topics configuration",
"default": [],
"items": {}
}
}
}

View File

@@ -1,37 +0,0 @@
## @section Common parameters
## @param external Enable external access from outside the cluster
## @param kafka.size Persistent Volume size for Kafka
## @param kafka.replicas Number of Kafka replicas
## @param zookeeper.size Persistent Volume size for ZooKeeper
## @param zookeeper.replicas Number of ZooKeeper replicas
##
external: false
kafka:
size: 10Gi
replicas: 3
zookeeper:
size: 5Gi
replicas: 3
## @section Configuration parameters
## @param topics Topics configuration
## Example:
## topics:
## - name: Results
## partitions: 1
## replicas: 3
## config:
## min.insync.replicas: 2
## - name: Orders
## config:
## cleanup.policy: compact
## segment.ms: 3600000
## max.compaction.lag.ms: 5400000
## min.insync.replicas: 2
## partitions: 1
## replicas: 3
##
topics: []

View File

@@ -1,25 +0,0 @@
apiVersion: v2
name: kubernetes-proxmox
description: Managed Kubernetes service
icon: https://upload.wikimedia.org/wikipedia/commons/thumb/3/39/Kubernetes_logo_without_workmark.svg/723px-Kubernetes_logo_without_workmark.svg.png
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.19.0"

View File

@@ -1,17 +0,0 @@
UBUNTU_CONTAINER_DISK_TAG = v1.29.1
include ../../../scripts/common-envs.mk
image: image-ubuntu-container-disk
image-ubuntu-container-disk:
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
--provenance false \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
--cache-to type=inline \
--metadata-file images/ubuntu-container-disk.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))" > images/ubuntu-container-disk.tag

View File

@@ -1,28 +0,0 @@
# Managed Kubernetes Service
## Overview
The Managed Kubernetes Service offers a streamlined solution for efficiently managing server workloads. Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration. This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management.
The Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method. Additionally, it ensures seamless scaling across a multitude of servers, addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms. This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort.
## Deployment Details
The managed Kubernetes service deploys a standard Kubernetes cluster utilizing the Cluster API, Kamaji as control-plane provicer and the KubeVirt infrastructure provider. This ensures a consistent and reliable setup for workloads.
Within this cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed. The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
- Docs: https://github.com/clastix/kamaji
- Docs: https://cluster-api.sigs.k8s.io/
- GitHub: https://github.com/clastix/kamaji
- GitHub: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt
- GitHub: https://github.com/kubevirt/csi-driver
## How-Tos
How to access to deployed cluster:
```
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
```

View File

@@ -1,4 +0,0 @@
{
"containerimage.config.digest": "sha256:62baab666445d76498fb14cc1d0865fc82e4bdd5cb1d7ba80475dc5024184622",
"containerimage.digest": "sha256:9363d717f966f4e7927da332eaaf17401b42203a2fcb493b428f94d096dae3a5"
}

View File

@@ -1 +0,0 @@
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.29.1

View File

@@ -1,51 +0,0 @@
FROM ubuntu:22.04 as guestfish
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update \
&& apt-get -y install \
libguestfs-tools \
linux-image-generic \
make \
bash-completion \
&& apt-get clean
WORKDIR /build
FROM guestfish as builder
RUN wget -O image.img https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
RUN qemu-img resize image.img 5G \
&& eval "$(guestfish --listen --network)" \
&& guestfish --remote add-drive image.img \
&& guestfish --remote run \
&& guestfish --remote mount /dev/sda1 / \
&& guestfish --remote command "growpart /dev/sda 1 --verbose" \
&& guestfish --remote command "resize2fs /dev/sda1" \
# docker repo
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
# kubernetes repo
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
# install containerd
&& guestfish --remote command "apt-get update -y" \
&& guestfish --remote command "apt-get install -y containerd.io" \
# configure containerd
&& guestfish --remote command "mkdir -p /etc/containerd" \
&& guestfish --remote sh "containerd config default | tee /etc/containerd/config.toml" \
&& guestfish --remote command "sed -i '/SystemdCgroup/ s/=.*/= true/' /etc/containerd/config.toml" \
# install kubernetes
&& guestfish --remote command "apt-get install -y kubelet kubeadm" \
# clean apt cache
&& guestfish --remote sh 'apt-get clean && rm -rf /var/lib/apt/lists/*' \
# write system configuration
&& guestfish --remote sh 'printf "%s\n" net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward=1 net.ipv6.conf.all.forwarding=1 net.ipv6.conf.all.disable_ipv6=0 net.ipv4.tcp_congestion_control=bbr vm.overcommit_memory=1 kernel.panic=10 kernel.panic_on_oops=1 fs.inotify.max_user_instances=8192 fs.inotify.max_user_watches=524288 | tee > /etc/sysctl.d/kubernetes.conf' \
&& guestfish --remote sh 'printf "%s\n" overlay br_netfilter | tee /etc/modules-load.d/kubernetes.conf' \
&& guestfish --remote sh "rm -f /etc/resolv.conf && ln -s ../run/systemd/resolve/stub-resolv.conf /etc/resolv.conf" \
# umount all and exit
&& guestfish --remote umount-all \
&& guestfish --remote exit
FROM scratch
COPY --from=builder /build/image.img /disk/image.qcow2

View File

@@ -1,3 +0,0 @@
To get kubeconfig for this cluster run:
kubectl get secret -n {{ .Release.Namespace }} {{ .Release.Name }}-admin-kubeconfig -o go-template='{{`{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}`}}'

View File

@@ -1,51 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kubernetes.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubernetes.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubernetes.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kubernetes.labels" -}}
helm.sh/chart: {{ include "kubernetes.chart" . }}
{{ include "kubernetes.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kubernetes.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubernetes.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-cloud-config
data:
cloud-config: |
loadBalancer:
creationPollInterval: 5
creationPollTimeout: 60
namespace: {{ .Release.Namespace }}

View File

@@ -1,86 +0,0 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-cluster-autoscaler
labels:
app: {{ .Release.Name }}-cluster-autoscaler
spec:
selector:
matchLabels:
app: {{ .Release.Name }}-cluster-autoscaler
replicas: 1
template:
metadata:
labels:
app: {{ .Release.Name }}-cluster-autoscaler
spec:
containers:
- image: ghcr.io/kvaps/test:cluster-autoscaller
name: cluster-autoscaler
command:
- /cluster-autoscaler
args:
- --cloud-provider=clusterapi
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
- --clusterapi-cloud-config-authoritative
- --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }}
volumeMounts:
- mountPath: /etc/kubernetes/kubeconfig
name: kubeconfig
readOnly: true
volumes:
- configMap:
name: {{ .Release.Name }}-cloud-config
name: cloud-config
- secret:
secretName: {{ .Release.Name }}-admin-kubeconfig
name: kubeconfig
serviceAccountName: {{ .Release.Name }}-cluster-autoscaler
terminationGracePeriodSeconds: 10
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-cluster-autoscaler
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-cluster-autoscaler
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-cluster-autoscaler
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-cluster-autoscaler
rules:
- apiGroups:
- cluster.x-k8s.io
resources:
- machinedeployments
- machinedeployments/scale
- machines
- machinesets
- machinepools
verbs:
- get
- list
- update
- watch
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- proxmoxmachinetemplates
verbs:
- get
- list
- update
- watch

View File

@@ -1,147 +0,0 @@
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
{{- $etcd := index $myNS.metadata.annotations "namespace.cozystack.io/etcd" }}
{{- $ingress := index $myNS.metadata.annotations "namespace.cozystack.io/ingress" }}
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}'
spec:
clusterNetwork:
pods:
cidrBlocks:
- 10.243.0.0/16
controlPlaneRef:
namespace: {{ .Release.Namespace }}
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlane
name: {{ .Release.Name }}
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: ProxmoxCluster
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlane
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
cluster.x-k8s.io/role: control-plane
annotations:
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
spec:
dataStoreName: "{{ $etcd }}"
addons:
coreDNS: {}
konnectivity: {}
kubelet:
cgroupfs: systemd
preferredAddressTypes:
- InternalIP
- ExternalIP
network:
serviceType: ClusterIP
ingress:
extraAnnotations:
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443
className: "{{ $ingress }}"
deployment:
replicas: 2
version: 1.29.0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: ProxmoxCluster
metadata:
annotations:
cluster.x-k8s.io/managed-by: kamaji
cluster.x-k8s.io/cluster-name: {{ .Release.Name }}
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
spec:
controlPlaneEndpoint:
host: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}
port: 443
ipv4Config:
addresses: ${NODE_IP_RANGES}
prefix: ${IP_PREFIX}
gateway: ${GATEWAY}
dnsServers: ${DNS_SERVERS}
allowedNodes: ${ALLOWED_NODES:=[]}
{{- range $groupName, $group := .Values.nodeGroups }}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: {{ $.Release.Namespace }}
spec:
template:
spec:
users:
- name: root
sshAuthorizedKeys: [${VM_SSH_KEYS}]
joinConfiguration:
nodeRegistration:
kubeletExtraArgs:
provider-id: "proxmox://'{{ ds.meta_data.instance_id }}'"
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: ProxmoxMachineTemplate
metadata:
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: {{ $.Release.Namespace }}
spec:
template:
spec:
sourceNode: "${PROXMOX_SOURCENODE}"
templateID: ${TEMPLATE_VMID}
format: "qcow2"
full: true
numSockets: ${NUM_SOCKETS:=2}
numCores: ${NUM_CORES:=4}
memoryMiB: ${MEMORY_MIB:=16384}
disks:
bootVolume:
disk: ${BOOT_VOLUME_DEVICE}
sizeGb: ${BOOT_VOLUME_SIZE:=100}
network:
default:
bridge: ${BRIDGE}
model: virtio
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: {{ $.Release.Namespace }}
annotations:
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "{{ $group.minReplicas }}"
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "{{ $group.maxReplicas }}"
capacity.cluster-autoscaler.kubernetes.io/memory: "{{ $group.resources.memory }}"
capacity.cluster-autoscaler.kubernetes.io/cpu: "{{ $group.resources.cpu }}"
spec:
clusterName: {{ $.Release.Name }}
template:
spec:
clusterName: {{ $.Release.Name }}
version: v1.29.0
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: default
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: ProxmoxMachineTemplate
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: default
{{- end }}

View File

@@ -1,29 +0,0 @@
## -- Controller settings -- ##
PROXMOX_URL: "https://pve.example:8006" # The Proxmox VE host
PROXMOX_TOKEN: "root@pam!capi" # The Proxmox VE TokenID for authentication
PROXMOX_SECRET: "REDACTED" # The secret associated with the TokenID
## -- Required workload cluster default settings -- ##
PROXMOX_SOURCENODE: "pve" # The node that hosts the VM template to be used to provision VMs
TEMPLATE_VMID: "100" # The template VM ID used for cloning VMs
ALLOWED_NODES: "[pve1,pve2,pve3, ...]" # The Proxmox VE nodes used for VM deployments
VM_SSH_KEYS: "ssh-ed25519 ..., ssh-ed25519 ..." # The ssh authorized keys used to ssh to the machines.
## -- networking configuration-- ##
CONTROL_PLANE_ENDPOINT_IP: "10.10.10.4" # The IP that kube-vip is going to use as a control plane endpoint
NODE_IP_RANGES: "[10.10.10.5-10.10.10.50, ...]" # The IP ranges for Cluster nodes
GATEWAY: "10.10.10.1" # The gateway for the machines network-config.
IP_PREFIX: "25" # Subnet Mask in CIDR notation for your node IP ranges
DNS_SERVERS: "[8.8.8.8,8.8.4.4]" # The dns nameservers for the machines network-config.
BRIDGE: "vmbr1" # The network bridge device for Proxmox VE VMs
## -- xl nodes -- ##
BOOT_VOLUME_DEVICE: "scsi0" # The device used for the boot disk.
BOOT_VOLUME_SIZE: "100" # The size of the boot disk in GB.
NUM_SOCKETS: "1" # The number of sockets for the VMs.
NUM_CORES: "4" # The number of cores for the VMs.
MEMORY_MIB: "8192" # The memory size for the VMs.
EXP_CLUSTER_RESOURCE_SET: "true" # This enables the ClusterResourceSet feature that we are using to deploy CNI
CLUSTER_TOPOLOGY: "true" # This enables experimental ClusterClass templating

View File

@@ -1,126 +0,0 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ .Release.Name }}-kcsi-controller
labels:
app: {{ .Release.Name }}-kcsi-driver
spec:
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-kcsi-driver
template:
metadata:
labels:
app: {{ .Release.Name }}-kcsi-driver
spec:
serviceAccountName: {{ .Release.Name }}-kcsi
priorityClassName: system-cluster-critical
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: "NoSchedule"
containers:
- name: csi-driver
imagePullPolicy: Always
image: ghcr.io/kvaps/test:kubevirt-csi-driver
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--infra-cluster-namespace=$(INFRACLUSTER_NAMESPACE)"
- "--infra-cluster-labels=$(INFRACLUSTER_LABELS)"
- "--v=5"
ports:
- name: healthz
containerPort: 10301
protocol: TCP
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: INFRACLUSTER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INFRACLUSTER_LABELS
value: "csi-driver/cluster=test"
- name: INFRA_STORAGE_CLASS_ENFORCEMENT
valueFrom:
configMapKeyRef:
name: driver-config
key: infraStorageClassEnforcement
optional: true
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
resources:
requests:
memory: 50Mi
cpu: 10m
- name: csi-provisioner
image: quay.io/openshift/origin-csi-external-provisioner:latest
args:
- "--csi-address=$(ADDRESS)"
- "--default-fstype=ext4"
- "--kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc"
- "--v=5"
- "--timeout=3m"
- "--retry-interval-max=1m"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
- name: csi-attacher
image: quay.io/openshift/origin-csi-external-attacher:latest
args:
- "--csi-address=$(ADDRESS)"
- "--kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc"
- "--v=5"
- "--timeout=3m"
- "--retry-interval-max=1m"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
resources:
requests:
memory: 50Mi
cpu: 10m
- name: csi-liveness-probe
image: quay.io/openshift/origin-csi-livenessprobe:latest
args:
- "--csi-address=/csi/csi.sock"
- "--probe-timeout=3s"
- "--health-port=10301"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
requests:
memory: 50Mi
cpu: 10m
volumes:
- name: socket-dir
emptyDir: {}
- secret:
secretName: {{ .Release.Name }}-admin-kubeconfig
name: kubeconfig

View File

@@ -1,32 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-kcsi
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Release.Name }}-kcsi
rules:
- apiGroups: ["cdi.kubevirt.io"]
resources: ["datavolumes"]
verbs: ["get", "create", "delete"]
- apiGroups: ["kubevirt.io"]
resources: ["virtualmachineinstances"]
verbs: ["list", "get"]
- apiGroups: ["subresources.kubevirt.io"]
resources: ["virtualmachineinstances/addvolume", "virtualmachineinstances/removevolume"]
verbs: ["update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Release.Name }}-kcsi
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-kcsi
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-kcsi

View File

@@ -1,46 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-cilium
labels:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 1m
releaseName: cilium
chart:
spec:
chart: cozy-cilium
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-cilium
storageNamespace: cozy-cilium
install:
createNamespace: true
values:
cilium:
tunnel: disabled
autoDirectNodeRoutes: true
cgroup:
autoMount:
enabled: true
hostRoot: /run/cilium/cgroupv2
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
k8sServicePort: 6443
cni:
chainingMode: ~
customConf: false
configMap: ""
routingMode: native
enableIPv4Masquerade: true
ipv4NativeRoutingCIDR: "10.244.0.0/16"
dependsOn:
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}

View File

@@ -1,28 +0,0 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-csi
labels:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 1m
releaseName: csi
chart:
spec:
chart: cozy-kubevirt-csi-node
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-csi
storageNamespace: cozy-csi
install:
createNamespace: true
dependsOn:
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}

View File

@@ -1,73 +0,0 @@
---
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "10"
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed
name: {{ .Release.Name }}-flux-teardown
spec:
template:
spec:
serviceAccountName: {{ .Release.Name }}-flux-teardown
restartPolicy: Never
containers:
- name: kubectl
image: docker.io/clastix/kubectl:v1.29.1
command:
- kubectl
- --namespace={{ .Release.Namespace }}
- patch
- helmrelease
- {{ .Release.Name }}-cilium
- {{ .Release.Name }}-csi
- -p
- '{"spec": {"suspend": true}}'
- --type=merge
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-flux-teardown
annotations:
helm.sh/hook: pre-delete
helm.sh/hook-delete-policy: before-hook-creation,hook-failed
helm.sh/hook-weight: "0"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
"helm.sh/hook": pre-install,post-install,pre-delete
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed
"helm.sh/hook-weight": "5"
name: {{ .Release.Name }}-flux-teardown
rules:
- apiGroups:
- "helm.toolkit.fluxcd.io"
resources:
- helmreleases
verbs:
- get
- patch
resourceNames:
- {{ .Release.Name }}-cilium
- {{ .Release.Name }}-csi
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
helm.sh/hook: pre-delete
helm.sh/hook-delete-policy: hook-succeeded,before-hook-creation,hook-failed
helm.sh/hook-weight: "5"
name: {{ .Release.Name }}-flux-teardown
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-flux-teardown
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-flux-teardown
namespace: {{ .Release.Namespace }}

View File

@@ -1,102 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-kccm
labels:
helm.sh/chart: proxmox-cloud-controller-manager-0.2.0
app.kubernetes.io/name: {{ .Release.Name }}-kccm
app.kubernetes.io/instance: {{ .Release.Name }}-kccm
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
namespace: {{ .Release.Namespace }}
spec:
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: {{ .Release.Name }}-kccm
app.kubernetes.io/instance: {{ .Release.Name }}-kccm
spec:
enableServiceLinks: false
priorityClassName: system-cluster-critical
serviceAccountName: {{ .Release.Name }}-pccm
securityContext:
fsGroup: 10258
fsGroupChangePolicy: OnRootMismatch
runAsGroup: 10258
runAsNonRoot: true
runAsUser: 10258
hostAliases:
[]
initContainers:
[]
containers:
- name: proxmox-cloud-controller-manager
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
image: ghcr.io/sergelogvinov/proxmox-cloud-controller-manager
imagePullPolicy: IfNotPresent
args:
- --v=4
- --cloud-provider=proxmox
- --cloud-config=/etc/cloud/cloud-config
- --controllers=cloud-node,cloud-node-lifecycle
- --leader-elect-resource-name=cloud-controller-manager-proxmox
- --use-service-account-credentials
- --secure-port=10258
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
livenessProbe:
httpGet:
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 30
timeoutSeconds: 5
resources:
requests:
cpu: 10m
memory: 32Mi
volumeMounts:
- mountPath: /etc/kubernetes/kubeconfig
name: kubeconfig
readOnly: true
- mountPath: /etc/proxmox
name: cloud-config
readOnly: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: {{ .Release.Name }}-kccm
app.kubernetes.io/instance: {{ .Release.Name }}-kccm
volumes:
- name: cloud-config
secret:
secretName: {{ .Release.Name }}-cloud-config
defaultMode: 416
- secret:
secretName: {{ .Release.Name }}-admin-kubeconfig
name: kubeconfig

View File

@@ -1,57 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm
labels:
helm.sh/chart: proxmox-cloud-controller-manager-0.2.0
app.kubernetes.io/name: {{ .Release.Name }}-kccm
app.kubernetes.io/instance: {{ .Release.Name }}-kccm
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- create
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
- delete
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- get
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create

View File

@@ -1,27 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm
subjects:
- kind: ServiceAccount
name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm:extension-apiserver-authentication-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm
namespace: {{ .Release.Namespace }}

View File

@@ -1,11 +0,0 @@
{{- if ne (len .Values.config.clusters) 0 }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Namespace }}-{{ .Release.Name }}-pccm
labels:
{{- include "proxmox-cloud-controller-manager.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
data:
config.yaml: {{ toYaml .Values.config | b64enc | quote }}
{{- end }}

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-pccm
labels:
helm.sh/chart: proxmox-cloud-controller-manager-0.2.0
app.kubernetes.io/name: {{ .Release.Name }}-pccm
app.kubernetes.io/instance: {{ .Release.Name }}-pccm
app.kubernetes.io/version: "v0.4.0"
app.kubernetes.io/managed-by: Helm

View File

@@ -1,10 +0,0 @@
host: ""
controlPlane:
replicas: 2
nodeGroups:
md0:
minReplicas: 0
maxReplicas: 10
resources:
cpu: 2
memory: 1024Mi

View File

@@ -1,3 +1,23 @@
.helmignore
/logos
/Makefile
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,7 +1,7 @@
apiVersion: v2
name: kubernetes
description: Managed Kubernetes service
icon: /logos/kubernetes.svg
icon: https://upload.wikimedia.org/wikipedia/commons/thumb/3/39/Kubernetes_logo_without_workmark.svg/723px-Kubernetes_logo_without_workmark.svg.png
# A chart can be either an 'application' or a 'library' chart.
#
@@ -16,10 +16,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.7.0
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.30.1"
appVersion: "1.16.0"

View File

@@ -1,20 +1,19 @@
UBUNTU_CONTAINER_DISK_TAG = v1.30.1
include ../../../scripts/common-envs.mk
generate:
readme-generator -v values.yaml -s values.schema.json -r README.md
PUSH := 1
LOAD := 0
REGISTRY := ghcr.io/aenix-io/cozystack
TAG := v0.2.0
UBUNTU_CONTAINER_DISK_TAG = v1.29.1
image: image-ubuntu-container-disk
image-ubuntu-container-disk:
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
--provenance false \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
--tag $(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG) \
--tag $(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG) \
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG) \
--cache-to type=inline \
--metadata-file images/ubuntu-container-disk.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))" > images/ubuntu-container-disk.tag
echo "$(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG)" > images/ubuntu-container-disk.tag

View File

@@ -26,23 +26,3 @@ How to access to deployed cluster:
```
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
```
## Parameters
### Common parameters
| Name | Description | Value |
| ----------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ----- |
| `host` | The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host). | `""` |
| `controlPlane.replicas` | Number of replicas for Kubernetes contorl-plane components | `2` |
| `nodeGroups` | nodeGroups configuration | `{}` |
### Cluster Addons
| Name | Description | Value |
| ----------------------------- | ---------------------------------------------------------------------------------- | ------- |
| `addons.certManager.enabled` | Enables the cert-manager | `false` |
| `addons.ingressNginx.enabled` | Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role) | `false` |
| `addons.ingressNginx.hosts` | List of domain names that should be passed through to the cluster by upper cluster | `[]` |
| `addons.fluxcd.enabled` | Enables Flux CD | `false` |

View File

@@ -1,38 +1,4 @@
{
"buildx.build.provenance": {
"buildType": "https://mobyproject.org/buildkit@v1",
"materials": [
{
"uri": "pkg:docker/ubuntu@22.04?platform=linux%2Famd64",
"digest": {
"sha256": "340d9b015b194dc6e2a13938944e0d016e57b9679963fdeb9ce021daac430221"
}
}
],
"invocation": {
"configSource": {
"entryPoint": "Dockerfile"
},
"parameters": {
"frontend": "dockerfile.v0",
"args": {
"build-arg:ARCH": "amd64"
},
"locals": [
{
"name": "context"
},
{
"name": "dockerfile"
}
]
},
"environment": {
"platform": "linux/amd64"
}
}
},
"buildx.build.ref": "amd64/amd64/kk2drcq44gorgb3xwa8908pfc",
"containerimage.config.digest": "sha256:363589eb47379eb7548f047aae24045278f14db0b2026022b6bec33a04370f15",
"containerimage.digest": "sha256:f242fd77903f5f5a94ed157e98b0c4532e5ba91734d9653eaf26cfe4b23b017b"
"containerimage.config.digest": "sha256:43d0bfd01c5e364ba961f1e3dc2c7ccd7fd4ca65bd26bc8c4a5298d7ff2c9f4f",
"containerimage.digest": "sha256:908b3c186bee86f1c9476317eb6582d07f19776b291aa068e5642f8fd08fa9e7"
}

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.29.1

View File

@@ -26,8 +26,8 @@ RUN qemu-img resize image.img 5G \
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
# kubernetes repo
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.30/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.30/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
# install containerd
&& guestfish --remote command "apt-get update -y" \
&& guestfish --remote command "apt-get install -y containerd.io" \

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -14,14 +14,7 @@ spec:
metadata:
labels:
app: {{ .Release.Name }}-cluster-autoscaler
policy.cozystack.io/allow-to-apiserver: "true"
spec:
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: "NoSchedule"
containers:
- image: ghcr.io/kvaps/test:cluster-autoscaller
name: cluster-autoscaler

View File

@@ -2,56 +2,6 @@
{{- $etcd := index $myNS.metadata.annotations "namespace.cozystack.io/etcd" }}
{{- $ingress := index $myNS.metadata.annotations "namespace.cozystack.io/ingress" }}
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
{{- $kubevirtmachinetemplateNames := list }}
{{- define "kubevirtmachinetemplate" -}}
spec:
virtualMachineBootstrapCheck:
checkStrategy: ssh
virtualMachineTemplate:
metadata:
namespace: {{ $.Release.Namespace }}
labels:
{{- range .group.roles }}
node-role.kubernetes.io/{{ . }}: ""
{{- end }}
spec:
runStrategy: Always
template:
spec:
domain:
cpu:
threads: 1
cores: {{ .group.resources.cpu }}
sockets: 1
devices:
disks:
- name: system
disk:
bus: virtio
pciAddress: 0000:07:00.0
- name: containerd
disk:
bus: virtio
pciAddress: 0000:08:00.0
- name: kubelet
disk:
bus: virtio
pciAddress: 0000:09:00.0
networkInterfaceMultiqueue: true
memory:
guest: {{ .group.resources.memory }}
evictionStrategy: External
volumes:
- name: system
containerDisk:
image: "{{ $.Files.Get "images/ubuntu-container-disk.tag" | trim }}@{{ index ($.Files.Get "images/ubuntu-container-disk.json" | fromJson) "containerimage.digest" }}"
- name: containerd
emptyDisk:
capacity: 20Gi
- name: kubelet
emptyDisk:
capacity: 20Gi
{{- end }}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
@@ -89,9 +39,7 @@ metadata:
spec:
dataStoreName: "{{ $etcd }}"
addons:
coreDNS:
dnsServiceIPs:
- 10.95.0.10
coreDNS: {}
konnectivity: {}
kubelet:
cgroupfs: systemd
@@ -106,11 +54,8 @@ spec:
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443
className: "{{ $ingress }}"
deployment:
podAdditionalMetadata:
labels:
policy.cozystack.io/allow-to-etcd: "true"
replicas: 2
version: 1.30.1
version: 1.29.0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtCluster
@@ -119,118 +64,87 @@ metadata:
cluster.x-k8s.io/managed-by: kamaji
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- range $groupName, $group := .Values.nodeGroups }}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: {{ $.Release.Namespace }}
name: {{ .Release.Name }}-md-0
namespace: {{ .Release.Namespace }}
spec:
template:
spec:
diskSetup:
filesystems:
- device: /dev/vdb
filesystem: xfs
label: containerd
partition: "none"
- device: /dev/vdc
filesystem: xfs
label: kubelet
partition: "none"
mounts:
- ["LABEL=containerd", "/var/lib/containerd"]
- ["LABEL=kubelet", "/var/lib/kubelet"]
preKubeadmCommands:
- sed -i 's|root:x:|root::|' /etc/passwd
joinConfiguration:
nodeRegistration:
kubeletExtraArgs: {}
discovery:
bootstrapToken:
apiServerEndpoint: {{ $.Release.Name }}.{{ $.Release.Namespace }}.svc:6443
apiServerEndpoint: {{ .Release.Name }}.{{ .Release.Namespace }}.svc:6443
initConfiguration:
skipPhases:
- addon/kube-proxy
---
{{- $context := deepCopy $ }}
{{- $_ := set $context "group" $group }}
{{- $kubevirtmachinetemplate := include "kubevirtmachinetemplate" $context }}
{{- $kubevirtmachinetemplateHash := $kubevirtmachinetemplate | sha256sum | trunc 6 }}
{{- $kubevirtmachinetemplateName := printf "%s-%s-%s" $.Release.Name $groupName $kubevirtmachinetemplateHash }}
{{- $kubevirtmachinetemplateNames = append $kubevirtmachinetemplateNames $kubevirtmachinetemplateName }}
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtMachineTemplate
metadata:
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
namespace: {{ $.Release.Namespace }}
name: {{ .Release.Name }}-md-0
namespace: {{ .Release.Namespace }}
spec:
template:
{{- $kubevirtmachinetemplate | nindent 4 }}
spec:
virtualMachineBootstrapCheck:
checkStrategy: ssh
virtualMachineTemplate:
metadata:
namespace: {{ .Release.Namespace }}
spec:
runStrategy: Always
template:
spec:
domain:
cpu:
threads: 1
cores: 2
sockets: 1
devices:
disks:
- disk:
bus: virtio
name: containervolume
networkInterfaceMultiqueue: true
memory:
guest: 1024Mi
evictionStrategy: External
volumes:
- containerDisk:
image: "{{ $.Files.Get "images/ubuntu-container-disk.tag" | trim }}@{{ index ($.Files.Get "images/ubuntu-container-disk.json" | fromJson) "containerimage.digest" }}"
name: containervolume
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: {{ $.Release.Namespace }}
name: {{ .Release.Name }}-md-0
namespace: {{ .Release.Namespace }}
annotations:
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "{{ $group.minReplicas }}"
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "{{ $group.maxReplicas }}"
capacity.cluster-autoscaler.kubernetes.io/memory: "{{ $group.resources.memory }}"
capacity.cluster-autoscaler.kubernetes.io/cpu: "{{ $group.resources.cpu }}"
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "2"
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "0"
capacity.cluster-autoscaler.kubernetes.io/memory: "1024Mi"
capacity.cluster-autoscaler.kubernetes.io/cpu: "2"
spec:
clusterName: {{ $.Release.Name }}
clusterName: {{ .Release.Name }}
selector:
matchLabels: null
template:
metadata:
labels:
cluster.x-k8s.io/cluster-name: {{ $.Release.Name }}
cluster.x-k8s.io/deployment-name: {{ $.Release.Name }}-{{ $groupName }}
{{- range $group.roles }}
node-role.kubernetes.io/{{ . }}: ""
{{- end }}
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: {{ $.Release.Name }}-{{ $groupName }}
namespace: {{ $.Release.Namespace }}
clusterName: {{ $.Release.Name }}
name: {{ .Release.Name }}-md-0
namespace: default
clusterName: {{ .Release.Name }}
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtMachineTemplate
name: {{ $.Release.Name }}-{{ $groupName }}-{{ $kubevirtmachinetemplateHash }}
name: {{ .Release.Name }}-md-0
namespace: default
version: v1.30.1
{{- end }}
---
{{- /*
We must preserve all previous KubevirtMachineTemplates until a MachineSet references them.
*/ -}}
{{- $mss := (lookup "cluster.x-k8s.io/v1beta1" "MachineSet" $.Release.Namespace "").items }}
{{- $oldKubevirtmachinetemplates := dict }}
{{- range $kmt := (lookup "infrastructure.cluster.x-k8s.io/v1alpha1" "KubevirtMachineTemplate" .Release.Namespace "").items }}
{{- range $or := $kmt.metadata.ownerReferences }}
{{- if and (eq $or.kind "Cluster") (eq $or.name $.Release.Name) }}
{{- range $ms := $mss }}
{{- if and (eq $ms.spec.template.spec.infrastructureRef.kind "KubevirtMachineTemplate") (eq $ms.spec.template.spec.infrastructureRef.name $kmt.metadata.name) }}
{{- if not (has $kmt.metadata.name $kubevirtmachinetemplateNames) }}
{{- $oldKubevirtmachinetemplates = merge $oldKubevirtmachinetemplates (dict $kmt.metadata.name $kmt) }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- range $oldKubevirtmachinetemplates }}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtMachineTemplate
metadata:
name: {{ .metadata.name }}
namespace: {{ .metadata.Namespace }}
spec:
{{- .spec | toYaml | nindent 2 }}
{{- end }}
version: v1.23.10

View File

@@ -13,14 +13,15 @@ spec:
metadata:
labels:
app: {{ .Release.Name }}-kcsi-driver
policy.cozystack.io/allow-to-apiserver: "true"
spec:
serviceAccountName: {{ .Release.Name }}-kcsi
priorityClassName: system-cluster-critical
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/control-plane
- key: node-role.kubernetes.io/master
operator: Exists
effect: "NoSchedule"
containers:

View File

@@ -1,39 +0,0 @@
{{- if .Values.addons.certManager.enabled }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-cert-manager
labels:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: cert-manager
chart:
spec:
chart: cozy-cert-manager
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-cert-manager
storageNamespace: cozy-cert-manager
install:
createNamespace: true
remediation:
retries: -1
upgrade:
remediation:
retries: -1
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}
- name: {{ .Release.Name }}-cilium
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,4 +1,4 @@
apiVersion: helm.toolkit.fluxcd.io/v2
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-cilium
@@ -6,7 +6,7 @@ metadata:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
interval: 1m
releaseName: cilium
chart:
spec:
@@ -23,17 +23,10 @@ spec:
storageNamespace: cozy-cilium
install:
createNamespace: true
remediation:
retries: -1
upgrade:
remediation:
retries: -1
values:
cilium:
tunnel: disabled
autoDirectNodeRoutes: false
bpf:
masquerade: true
autoDirectNodeRoutes: true
cgroup:
autoMount:
enabled: true
@@ -45,11 +38,9 @@ spec:
chainingMode: ~
customConf: false
configMap: ""
routingMode: tunnel
routingMode: native
enableIPv4Masquerade: true
ipv4NativeRoutingCIDR: ""
ipv4NativeRoutingCIDR: "10.244.0.0/16"
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,4 +1,4 @@
apiVersion: helm.toolkit.fluxcd.io/v2
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-csi
@@ -6,7 +6,7 @@ metadata:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
interval: 1m
releaseName: csi
chart:
spec:
@@ -23,13 +23,6 @@ spec:
storageNamespace: cozy-csi
install:
createNamespace: true
remediation:
retries: -1
upgrade:
remediation:
retries: -1
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -12,31 +12,19 @@ spec:
spec:
serviceAccountName: {{ .Release.Name }}-flux-teardown
restartPolicy: Never
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: "NoSchedule"
containers:
- name: kubectl
image: docker.io/clastix/kubectl:v1.30.1
image: docker.io/clastix/kubectl:v1.29.1
command:
- /bin/sh
- -c
- |
kubectl
--namespace={{ .Release.Namespace }}
patch
helmrelease
{{ .Release.Name }}-cilium
{{ .Release.Name }}-csi
{{ .Release.Name }}-cert-manager
{{ .Release.Name }}-ingress-nginx
{{ .Release.Name }}-fluxcd-operator
{{ .Release.Name }}-fluxcd
-p '{"spec": {"suspend": true}}'
--type=merge --field-manager=flux-client-side-apply || true
- kubectl
- --namespace={{ .Release.Namespace }}
- patch
- helmrelease
- {{ .Release.Name }}-cilium
- {{ .Release.Name }}-csi
- -p
- '{"spec": {"suspend": true}}'
- --type=merge
---
apiVersion: v1
kind: ServiceAccount
@@ -66,10 +54,6 @@ rules:
resourceNames:
- {{ .Release.Name }}-cilium
- {{ .Release.Name }}-csi
- {{ .Release.Name }}-cert-manager
- {{ .Release.Name }}-ingress-nginx
- {{ .Release.Name }}-fluxcd-operator
- {{ .Release.Name }}-fluxcd
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding

View File

@@ -1,84 +0,0 @@
{{- if .Values.addons.fluxcd.enabled }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-fluxcd-operator
labels:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: fluxcd-operator
chart:
spec:
chart: cozy-fluxcd-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-fluxcd
storageNamespace: cozy-fluxcd
install:
createNamespace: true
remediation:
retries: -1
upgrade:
remediation:
retries: -1
values:
flux-operator:
fullnameOverride: flux-operator
tolerations: []
hostNetwork: false
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}
- name: {{ .Release.Name }}-cilium
namespace: {{ .Release.Namespace }}
---
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-fluxcd
labels:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: fluxcd
chart:
spec:
chart: cozy-fluxcd
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-fluxcd
storageNamespace: cozy-fluxcd
install:
createNamespace: true
remediation:
retries: -1
upgrade:
remediation:
retries: -1
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}
- name: {{ .Release.Name }}-cilium
namespace: {{ .Release.Namespace }}
- name: {{ .Release.Name }}-fluxcd-operator
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,49 +0,0 @@
{{- if .Values.addons.ingressNginx.enabled }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-ingress-nginx
labels:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: ingress-nginx
chart:
spec:
chart: cozy-ingress-nginx
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-ingress-nginx
storageNamespace: cozy-ingress-nginx
install:
createNamespace: true
remediation:
retries: -1
upgrade:
remediation:
retries: -1
values:
ingress-nginx:
fullnameOverride: ingress-nginx
controller:
kind: DaemonSet
hostNetwork: true
service:
enabled: false
nodeSelector:
node-role.kubernetes.io/ingress-nginx: ""
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}
- name: {{ .Release.Name }}-cilium
namespace: {{ .Release.Namespace }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More