Preapare release v0.0.1

Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
This commit is contained in:
Andrei Kvapil
2023-12-10 20:09:43 +01:00
commit f642698921
1181 changed files with 381555 additions and 0 deletions

20
packages/apps/Makefile Normal file
View File

@@ -0,0 +1,20 @@
OUT=../../_out/repos/apps
TMP=../../_out/repos/apps/historical
repo:
rm -rf "$(OUT)"
mkdir -p "$(OUT)"
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
cd "$(OUT)" && helm repo index .
rm -rf "$(TMP)"
fix-chartnames:
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
gen-versions-map: fix-chartnames
../../hack/gen_versions_map.sh
check-version-map: gen-versions-map
git diff --exit-code -- versions_map

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: http-cache
description: Layer7 load balacner and caching service
icon: https://www.svgrepo.com/show/373924/nginx.svg
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,35 @@
PUSH := 1
LOAD := 0
REGISTRY := ghcr.io/aenix-io/cozystack
TAG := v0.0.1
image: image-nginx
image-nginx:
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/nginx-cache \
--provenance false \
--tag $(REGISTRY)/nginx-cache:$(TAG) \
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:$(TAG) \
--cache-to type=inline \
--metadata-file images/nginx-cache.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/nginx:$(TAG)" > images/nginx-cache.tag
update:
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/chrislim2888/IP2Location-C-Library | awk -F'[/^]' 'END{print $$3}') && \
sed -i "/^ARG IP2LOCATION_C_VERSION=/ s/=.*/=$$tag/" images/nginx/Dockerfile
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/ip2location/ip2proxy-c | awk -F'[/^]' 'END{print $$3}') && \
sed -i "/^ARG IP2PROXY_C_VERSION=/ s/=.*/=$$tag/" images/nginx/Dockerfile
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/ip2location/ip2location-nginx | awk -F'[/^]' 'END{print $$3}') && \
sed -i "/^ARG IP2LOCATION_NGINX_VERSION=/ s/=.*/=$$tag/" images/nginx/Dockerfile
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/ip2location/ip2proxy-nginx | awk -F'[/^]' 'END{print $$3}') && \
sed -i "/^ARG IP2PROXY_NGINX_VERSION=/ s/=.*/=$$tag/" images/nginx/Dockerfile
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/nginx/nginx | awk -F'[/^]' 'END{print $$3}' | awk -F- '{print $$2}') && \
sed -i "/^ARG NGINX_VERSION=/ s/=.*/=$$tag/" images/nginx/Dockerfile
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/nginx-modules/ngx_cache_purge | awk -F'[/^]' 'END{print $$3}') && \
sed -i "/^ARG NGINX_CACHE_PURGE_VERSION=/ s/=.*/=$$tag/" images/nginx/Dockerfile
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/vozlt/nginx-module-vts | awk -F'[/^]' 'END{print $$3}' | sed 's/^v//') && \
sed -i "/^ARG NGINX_VTS_VERSION=/ s/=.*/=$$tag/" images/nginx/Dockerfile
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/51Degrees/Device-Detection | awk -F'[/^]' 'END{print $$3}' | sed 's/^v//') && \
sed -i "/^ARG FIFTYONEDEGREES_NGINX_VERSION=/ s/=.*/=$$tag/" images/nginx/Dockerfile

View File

@@ -0,0 +1,57 @@
# Managed Nginx Caching Service
The Nginx Caching Service is designed to optimize web traffic and enhance web application performance. This service combines custom-built Nginx instances with HAproxy for efficient caching and load balancing.
## Deployment infromation
The Nginx instances include the following modules and features:
- VTS module for statistics
- Integration with ip2location
- Integration with ip2proxy
- Support for 51Degrees
- Cache purge functionality
HAproxy plays a vital role in this setup by directing incoming traffic to specific Nginx instances based on a consistent hash calculated from the URL. Each Nginx instance includes a Persistent Volume Claim (PVC) for storing cached content, ensuring fast and reliable access to frequently used resources.
## Deployment Details
The deployment architecture is illustrated in the diagram below:
```
┌─────────┐
│ metallb │ arp announce
└────┬────┘
┌───────▼───────────────────────────┐
│ kubernetes service │ node
│ (externalTrafficPolicy: Local) │ level
└──────────┬────────────────────────┘
┌────▼────┐ ┌─────────┐
│ haproxy │ │ haproxy │ loadbalancer
│ (active)│ │ (backup)│ layer
└────┬────┘ └─────────┘
│ balance uri whole
│ hash-type consistent
┌──────┴──────┬──────────────┐
┌───▼───┐ ┌───▼───┐ ┌───▼───┐ caching
│ nginx │ │ nginx │ │ nginx │ layer
└───┬───┘ └───┬───┘ └───┬───┘
│ │ │
┌────┴───────┬─────┴────┬─────────┴──┐
│ │ │ │
┌───▼────┐ ┌────▼───┐ ┌───▼────┐ ┌────▼───┐
│ origin │ │ origin │ │ origin │ │ origin │
└────────┘ └────────┘ └────────┘ └────────┘
```
## Known issues
VTS module shows wrong upstream resonse time
- https://github.com/vozlt/nginx-module-vts/issues/198

View File

@@ -0,0 +1,14 @@
{
"containerimage.config.digest": "sha256:d68641167af14b246e0332c14a7a9d9f6c0a4f813881db2de5fc53816bd35786",
"containerimage.descriptor": {
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"digest": "sha256:241da53aba9b121d5d1829744a9ba31036cd5e5ffd6cf584da8113ddd79764f2",
"size": 1093,
"platform": {
"architecture": "amd64",
"os": "linux"
}
},
"containerimage.digest": "sha256:241da53aba9b121d5d1829744a9ba31036cd5e5ffd6cf584da8113ddd79764f2",
"image.name": "ghcr.io/aenix-io/cozystack/nginx-cache:v0.0.1"
}

View File

@@ -0,0 +1 @@
ghcr.io/aenix-io/cozystack/nginx:v0.0.1

View File

@@ -0,0 +1,178 @@
FROM ubuntu:22.04 as stage
ARG NGINX_VERSION=1.25.3
ARG IP2LOCATION_C_VERSION=8.6.1
ARG IP2PROXY_C_VERSION=4.1.2
ARG IP2LOCATION_NGINX_VERSION=8.6.0
ARG IP2PROXY_NGINX_VERSION=8.1.1
ARG FIFTYONEDEGREES_NGINX_VERSION=3.2.21.1
ARG NGINX_CACHE_PURGE_VERSION=2.5.3
ARG NGINX_VTS_VERSION=0.2.2
# Install required packages for development
RUN apt-get update -q \
&& apt-get install -yq \
unzip \
autoconf \
build-essential \
libtool \
libpcre3 \
libpcre3-dev \
libssl-dev \
libgd-dev \
zlib1g-dev \
gcc \
make \
git \
wget \
curl \
checkinstall
# Download sources
RUN mkdir ip2location \
&& curl -sS -L https://github.com/chrislim2888/IP2Location-C-Library/archive/refs/tags/${IP2LOCATION_C_VERSION}.tar.gz \
| tar -C ip2location -xzvf- --strip=1
RUN mkdir ip2proxy \
&& curl -sS -L https://github.com/ip2location/ip2proxy-c/archive/refs/tags/${IP2PROXY_C_VERSION}.tar.gz \
| tar -C ip2proxy -xzvf- --strip=1
RUN mkdir ip2mod-location \
&& curl -sS -L https://github.com/ip2location/ip2location-nginx/archive/refs/tags/${IP2LOCATION_NGINX_VERSION}.tar.gz \
| tar -C ip2mod-location -xzvf- --strip=1
RUN mkdir ip2mod-proxy \
&& curl -sS -L https://github.com/ip2location/ip2proxy-nginx/archive/refs/tags/${IP2PROXY_NGINX_VERSION}.tar.gz \
| tar -C ip2mod-proxy -xzvf- --strip=1
RUN mkdir cache-purge-module \
&& curl -sS -L https://github.com/nginx-modules/ngx_cache_purge/archive/refs/tags/${NGINX_CACHE_PURGE_VERSION}.tar.gz \
| tar -C cache-purge-module -xzvf- --strip=1
RUN mkdir nginx-module-vts \
&& curl -sS -L https://github.com/vozlt/nginx-module-vts/archive/refs/tags/v${NGINX_VTS_VERSION}.tar.gz \
| tar -C nginx-module-vts -xzvf- --strip=1
RUN mkdir nginx \
&& curl -sS -L https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz \
| tar -C nginx -xzvf- --strip=1
# Compile C Library for IP2Location module
WORKDIR /ip2location
RUN autoreconf -i -v --force
RUN ./configure
RUN make
RUN checkinstall \
-D \
--install=no \
--default \
--pkgname=ip2location-c \
--pkgversion=${IP2LOCATION_C_VERSION} \
--pkgarch=amd64 \
--pkggroup=lib \
--pkgsource="https://github.com/chrislim2888/IP2Location-C-Library" \
--maintainer="Eduard Generalov <eduard@generalov.net>" \
--requires=librtmp1 \
--autodoinst=no \
--deldoc=yes \
--deldesc=yes \
--delspec=yes \
--backup=no \
make install
WORKDIR /ip2location/data
RUN perl ip-country.pl
WORKDIR /ip2location/test
RUN ./test-IP2Location
# Compile C Library for IP2Proxy module
WORKDIR /ip2proxy
RUN autoreconf -i -v --force
RUN ./configure
RUN make
RUN checkinstall \
-D \
--install=no \
--default \
--pkgname=ip2proxy-c \
--pkgversion=${IP2PROXY_C_VERSION} \
--pkgarch=amd64 \
--pkggroup=lib \
--pkgsource="https://github.com/ip2location/ip2proxy-c" \
--maintainer="Eduard Generalov <eduard@generalov.net>" \
--requires=librtmp1 \
--autodoinst=no \
--deldoc=yes \
--deldesc=yes \
--delspec=yes \
--backup=no \
make install
# Compile Nginx
WORKDIR /nginx
RUN ./configure \
--with-compat \
--prefix=/usr/share/nginx \
--sbin-path=/usr/bin/nginx \
--with-http_ssl_module \
--conf-path=/etc/nginx/nginx.conf \
--http-log-path=/var/log/nginx/access.log \
--error-log-path=/var/log/nginx/error.log \
--lock-path=/var/lock/nginx.lock \
--pid-path=/run/nginx.pid \
--modules-path=/usr/lib/nginx/modules \
--http-client-body-temp-path=/var/lib/nginx/body \
--http-fastcgi-temp-path=/var/lib/nginx/fastcgi \
--http-proxy-temp-path=/var/lib/nginx/proxy \
--http-scgi-temp-path=/var/lib/nginx/scgi \
--http-uwsgi-temp-path=/var/lib/nginx/uwsgi \
--with-http_realip_module \
--with-http_stub_status_module \
--with-stream \
--add-module=../nginx-module-vts \
--add-module=../cache-purge-module \
--add-dynamic-module=../ip2mod-proxy \
--add-dynamic-module=../ip2mod-location \
--with-compat
RUN make modules
RUN make -j 8
RUN checkinstall \
-D \
--install=no \
--default \
--pkgname=nginx \
--pkgversion=$VERS \
--pkgarch=amd64 \
--pkggroup=web \
--provides=nginx \
--requires=ip2location-c,ip2proxy-c,libssl3,libc-bin,libc6,libzstd1,libpcre++0v5,libpcre16-3,libpcre2-8-0,libpcre3,libpcre32-3,libpcrecpp0v5,libmaxminddb0 \
--autodoinst=no \
--deldoc=yes \
--deldesc=yes \
--delspec=yes \
--backup=no \
make install
RUN mkdir /packages \
&& mv /*/*.deb /packages/
FROM ubuntu:22.04
COPY --from=stage /packages /packages
COPY nginx-reloader.sh /usr/bin/nginx-reloader.sh
RUN set -x \
&& groupadd --system --gid 101 nginx \
&& useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
&& apt update \
&& apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates inotify-tools \
&& apt -y install /packages/*.deb \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* \
&& mkdir -p /var/lib/nginx /var/log/nginx \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log \
&& chown -R nginx: /var/lib/nginx /var/log/nginx
ENTRYPOINT ["/usr/bin/nginx", "-g", "daemon off;"]

View File

@@ -0,0 +1,13 @@
#!/bin/sh
set -e
cleanup() {
echo "Received termination signal. Exiting..."
exit 0
}
trap cleanup INT
while true; do
inotifywait -s -e close_write,attrib --include 'reload' /data >/dev/null
nginx -s reload
done

View File

@@ -0,0 +1,106 @@
{{- define "backendoptions" }}
{{- if eq . "http" }}
mode http
option forwardfor
balance uri whole
hash-type consistent
retry-on conn-failure 503
retries 2
option redispatch 1
default-server observe layer7 error-limit 10 on-error mark-down check
{{- else if eq . "tcp" }}
mode tcp
balance roundrobin
default-server observe layer4 error-limit 10 on-error mark-down check
{{- else if eq . "tcp-with-proxy" }}
mode tcp
balance roundrobin
default-server observe layer4 error-limit 10 on-error mark-down check send-proxy-v2
{{- else }}
{{- fail (printf "mode %s is not supported" .) }}
{{- end }}
{{- end }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-haproxy
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
haproxy.cfg: |
defaults
mode tcp
option dontlognull
timeout http-request 10s
timeout queue 20s
timeout connect 5s
timeout client 5m
timeout server 5m
timeout tunnel 5m
timeout http-keep-alive 10s
timeout check 10s
frontend http
bind :::8080 v4v6
mode http
{{- if $.Values.whitelistHTTP }}
{{- with $.Values.whitelist }}
acl whitelist src{{ range . }} {{ . }}{{ end }}
{{- end }}
acl all src 0.0.0.0
tcp-request content accept if whitelist
tcp-request content reject
{{- end }}
tcp-request content set-dst-port int(80)
# match real IP from cloudflare
acl from_cf src -f /usr/local/etc/haproxy/CF_ips.lst
acl cf_ip_hdr req.hdr(CF-Connecting-IP) -m found
http-request set-header X-Forwarded-For %[req.hdr(CF-Connecting-IP)] if from_cf cf_ip_hdr
# overwrite real IP header from anywhere else
http-request set-header X-Forwarded-For %[src] if !from_cf
default_backend http
backend http
mode http
balance uri whole
hash-type consistent
retry-on conn-failure 503
retries 2
option redispatch 1
default-server observe layer7 error-limit 10 on-error mark-down
{{- range $i, $e := until (int $.Values.replicas) }}
server cache{{ $i }} {{ $.Release.Name }}-nginx-cache-{{ $i }}:80 check
{{- end }}
{{- range $i, $e := $.Values.endpoints }}
server origin{{ $i }} {{ $e }} backup
{{- end }}
# https://developers.cloudflare.com/support/troubleshooting/restoring-visitor-ips/restoring-original-visitor-ips/
CF_ips.lst: |
173.245.48.0/20
103.21.244.0/22
103.22.200.0/22
103.31.4.0/22
141.101.64.0/18
108.162.192.0/18
190.93.240.0/20
188.114.96.0/20
197.234.240.0/22
198.41.128.0/17
162.158.0.0/15
104.16.0.0/13
104.24.0.0/14
172.64.0.0/13
131.0.72.0/22
2400:cb00::/32
2606:4700::/32
2803:f800::/32
2405:b500::/32
2405:8100::/32
2a06:98c0::/29
2c0f:f248::/32

View File

@@ -0,0 +1,45 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-haproxy
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 2
selector:
matchLabels:
app: {{ .Release.Name }}-haproxy
template:
metadata:
labels:
app: {{ .Release.Name }}-haproxy
annotations:
checksum/config: {{ include (print $.Template.BasePath "/haproxy/configmap.yaml") . | sha256sum }}
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ .Release.Name }}-haproxy
topologyKey: kubernetes.io/hostname
containers:
- image: haproxy:latest
name: haproxy
ports:
- containerPort: 8080
name: http
volumeMounts:
- mountPath: /usr/local/etc/haproxy
name: config
volumes:
- configMap:
name: {{ .Release.Name }}-haproxy
name: config

View File

@@ -0,0 +1,21 @@
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-haproxy
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}
externalTrafficPolicy: Local
allocateLoadBalancerNodePorts: false
{{- end }}
selector:
app: {{ .Release.Name }}-haproxy
ports:
- name: http
protocol: TCP
port: 80
targetPort: http

View File

@@ -0,0 +1,162 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ $.Release.Name }}-nginx-cache
labels:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
data:
nginx.conf: |
user nginx;
worker_processes 2;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
#load_module /usr/lib/nginx/modules/ngx_http_ip2location_module.so;
#load_module /usr/lib/nginx/modules/ngx_http_ip2proxy_module.so;
events {
use epoll;
multi_accept on;
worker_connections 10240;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
vhost_traffic_status_zone;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
keepalive_timeout 65;
gzip on;
proxy_cache_path /data/cache levels=1:2 keys_zone=mycache:400m max_size=100g
inactive=30d use_temp_path=off;
#ip2location_database /data/dbs/ip2location.bin;
#ip2location_proxy_recursive on;
#ip2location_proxy 10.0.0.0/8;
#ip2proxy_database /data/dbs/ip2proxy.bin;
#ip2proxy_proxy_recursive on;
#ip2proxy_proxy 10.0.0.0/8;
server {
listen *:10253;
server_name _;
vhost_traffic_status_bypass_limit on;
vhost_traffic_status_bypass_stats on;
location /health {
access_log off;
add_header 'Content-Type' 'text/plain';
return 200 "healthy\n";
}
location /metrics {
vhost_traffic_status_display;
vhost_traffic_status_display_format prometheus;
}
}
upstream origin_servers {
{{- range $num, $ep := $.Values.endpoints }}
server {{ $ep }};
{{- end }}
}
# URL shorter:
# / --> /
# /a --> /a
# /a/b --> /a/*
map $uri $shorten_url {
~^/$ /;
~^/([^/]+)$ /$1;
~^/([^/]+)/.*$ /$1/*;
}
# URL shortener:
# Example: / --> /
# Example: /a --> /a
# Example: /a/ --> /a
# Example: /a/b --> /a/*
map $uri $shorten_url {
~^/$ /;
~^/([^/]+)$ /$1;
~^/([^/]+)/$ /$1;
~^/([^/]+)/.*$ /$1/*;
}
server {
listen *:80;
server_name _;
vhost_traffic_status_filter_by_host on;
#vhost_traffic_status_filter_by_set_key $host country::$ip2location_country_short;
vhost_traffic_status_filter_by_set_key $shorten_url url::$host;
proxy_cache mycache;
proxy_cache_revalidate on;
proxy_cache_lock on;
proxy_cache_key $scheme$http_host$request_uri;
proxy_cache_purge PURGE from all;
cache_purge_response_type json;
proxy_cache_valid 200 1h;
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
proxy_cache_background_update on;
proxy_connect_timeout 400ms;
proxy_next_upstream error timeout http_500 http_502 http_503 http_504;
location / {
proxy_set_header Host $http_host;
## debug
add_header X-Cache-Status $upstream_cache_status;
#add_header X-Cache-Node $hostname;
#add_header X-Cache-Key $scheme$http_host$request_uri;
proxy_set_header X-Real-IP $remote_addr;
real_ip_header $real_ip_header;
real_ip_recursive on;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
#proxy_set_header X-Anonymous-Type $ip2proxy_proxy_type;
#proxy_set_header X-Country $ip2location_country_short;
#proxy_set_header X-Country-Code $ip2location_country_short;
#proxy_set_header X-Country-Name $ip2location_country_long;
#proxy_set_header X-GeoIP-Region $ip2location_region;
#proxy_set_header X-GeoIP-City $ip2location_city;
#proxy_set_header X-Geoip-Country $ip2location_country_short;
#proxy_set_header X-Geoip-Latitude $ip2location_latitude;
#proxy_set_header X-Geoip-Longitude $ip2location_longitude;
#proxy_set_header X-GeoIP-ISP $ip2location_isp;
##proxy_set_header X-GeoIP-Postal-Code $ip2location_zipcode;
##proxy_set_header X-Geoip-Timezone $ip2location_timezone;
##proxy_set_header X-Geoip-Asn $ip2location_asn;
proxy_hide_header Pragma;
proxy_hide_header Expires;
# to backends
proxy_pass http://origin_servers;
proxy_buffering on;
}
}
}

View File

@@ -0,0 +1,140 @@
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ $.Release.Name }}-nginx-cache
labels:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
spec:
maxUnavailable: 1
selector:
matchLabels:
app: {{ $.Release.Name }}-nginx-cache
{{- range $i := until 3 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ $.Release.Name }}-nginx-cache-{{ $i }}
labels:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
spec:
selector:
matchLabels:
app: {{ $.Release.Name }}-nginx-cache
instance: "{{ $i }}"
template:
metadata:
labels:
app: {{ $.Release.Name }}-nginx-cache
instance: "{{ $i }}"
annotations:
checksum/config: {{ include (print $.Template.BasePath "/nginx/configmap.yaml") $ | sha256sum }}
spec:
imagePullSecrets:
- name: {{ $.Release.Name }}-regsecret
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ $.Release.Name }}-nginx-cache
- key: instance
operator: NotIn
values:
- "{{ $i }}"
topologyKey: kubernetes.io/hostname
shareProcessNamespace: true
containers:
- name: nginx
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}@{{ index ($.Files.Get "images/nginx-cache.json" | fromJson) "containerimage.digest" }}"
readinessProbe:
httpGet:
path: /healthz
port: metrics
initialDelaySeconds: 5
periodSeconds: 5
livenessProbe:
httpGet:
path: /healthz
port: metrics
failureThreshold: 1
periodSeconds: 10
ports:
- containerPort: 80
name: http
- containerPort: 8087
name: cache
- containerPort: 10253
name: metrics
volumeMounts:
- mountPath: /etc/nginx/nginx.conf
name: config
subPath: nginx.conf
- mountPath: /data
name: data
- mountPath: /run
name: run
- name: reloader
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}@{{ index ($.Files.Get "images/nginx-cache.json" | fromJson) "containerimage.digest" }}"
command: ["/usr/bin/nginx-reloader.sh"]
#command: ["sleep", "infinity"]
volumeMounts:
- mountPath: /etc/nginx/nginx.conf
name: config
subPath: nginx.conf
- mountPath: /data
name: data
- mountPath: /run
name: run
volumes:
- name: config
configMap:
name: {{ $.Release.Name }}-nginx-cache
- name: data
persistentVolumeClaim:
claimName: {{ $.Release.Name }}-nginx-cache-{{ $i }}
- name: run
emptyDir: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ $.Release.Name }}-nginx-cache-{{ $i }}
labels:
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: "{{ $.Values.size }}"
---
apiVersion: v1
kind: Service
metadata:
name: {{ $.Release.Name }}-nginx-cache-{{ $i }}
labels:
app: {{ $.Release.Name }}-nginx-cache
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/managed-by: {{ $.Release.Service }}
spec:
type: ClusterIP
selector:
app: {{ $.Release.Name }}-nginx-cache
instance: "{{ $i }}"
ports:
- name: http
protocol: TCP
port: 80
targetPort: http
- name: metrics
protocol: TCP
port: 10253
targetPort: metrics
{{- end }}

View File

@@ -0,0 +1,26 @@
---
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMServiceScrape
metadata:
name: nginx-cache
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- infra-nginx-cache
endpoints:
- path: /metrics
port: metrics
honorLabels: true
relabelConfigs:
- replacement: nginx-cache
targetLabel: job
- source_labels: [__meta_kubernetes_service_name]
target_label: instance
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: node
- targetLabel: tier
replacement: cluster
selector:
matchLabels:
app: {{ $.Release.Name }}-nginx-cache

View File

@@ -0,0 +1,9 @@
external: false
size: 10Gi
endpoints:
- 10.100.3.1:80
- 10.100.3.11:80
- 10.100.3.2:80
- 10.100.3.12:80
- 10.100.3.3:80
- 10.100.3.13:80

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: kubernetes
description: Managed Kubernetes service
icon: https://upload.wikimedia.org/wikipedia/commons/thumb/3/39/Kubernetes_logo_without_workmark.svg/723px-Kubernetes_logo_without_workmark.svg.png
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,18 @@
PUSH := 1
LOAD := 0
REGISTRY := ghcr.io/aenix-io/cozystack
TAG := v0.0.1
UBUNTU_CONTAINER_DISK_TAG = v1.29.1
image: image-ubuntu-container-disk
image-ubuntu-container-disk:
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
--provenance false \
--tag $(REGISTRY)/ubuntu-container-disk:$(TAG)-$(UBUNTU_CONTAINER_DISK_TAG) \
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG) \
--cache-to type=inline \
--metadata-file images/ubuntu-container-disk.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG)" > images/ubuntu-container-disk.tag

View File

@@ -0,0 +1,28 @@
# Managed Kubernetes Service
## Overview
The Managed Kubernetes Service offers a streamlined solution for efficiently managing server workloads. Kubernetes has emerged as the industry standard, providing a unified and accessible API, primarily utilizing YAML for configuration. This means that teams can easily understand and work with Kubernetes, streamlining infrastructure management.
The Kubernetes leverages robust software design patterns, enabling continuous recovery in any scenario through the reconciliation method. Additionally, it ensures seamless scaling across a multitude of servers, addressing the challenges posed by complex and outdated APIs found in traditional virtualization platforms. This managed service eliminates the need for developing custom solutions or modifying source code, saving valuable time and effort.
## Deployment Details
The managed Kubernetes service deploys a standard Kubernetes cluster utilizing the Cluster API, Kamaji as control-plane provicer and the KubeVirt infrastructure provider. This ensures a consistent and reliable setup for workloads.
Within this cluster, users can take advantage of LoadBalancer services and easily provision physical volumes as needed. The control-plane operates within containers, while the worker nodes are deployed as virtual machines, all seamlessly managed by the application.
- Docs: https://github.com/clastix/kamaji
- Docs: https://cluster-api.sigs.k8s.io/
- GitHub: https://github.com/clastix/kamaji
- GitHub: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt
- GitHub: https://github.com/kubevirt/csi-driver
## How-Tos
How to access to deployed cluster:
```
kubectl get secret -n <namespace> kubernetes-<clusterName>-admin-kubeconfig -o go-template='{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}' > test
```

View File

@@ -0,0 +1,4 @@
{
"containerimage.config.digest": "sha256:e982cfa2320d3139ed311ae44bcc5ea18db7e4e76d2746e0af04c516288ff0f1",
"containerimage.digest": "sha256:34f6aba5b5a2afbb46bbb891ef4ddc0855c2ffe4f9e5a99e8e553286ddd2c070"
}

View File

@@ -0,0 +1 @@
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.29.1

View File

@@ -0,0 +1,51 @@
FROM ubuntu:22.04 as guestfish
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update \
&& apt-get -y install \
libguestfs-tools \
linux-image-generic \
make \
bash-completion \
&& apt-get clean
WORKDIR /build
FROM guestfish as builder
RUN wget -O image.img https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
RUN qemu-img resize image.img 5G \
&& eval "$(guestfish --listen --network)" \
&& guestfish --remote add-drive image.img \
&& guestfish --remote run \
&& guestfish --remote mount /dev/sda1 / \
&& guestfish --remote command "growpart /dev/sda 1 --verbose" \
&& guestfish --remote command "resize2fs /dev/sda1" \
# docker repo
&& guestfish --remote sh "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg" \
&& guestfish --remote sh 'echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list' \
# kubernetes repo
&& guestfish --remote sh "curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg" \
&& guestfish --remote sh "echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /' | tee /etc/apt/sources.list.d/kubernetes.list" \
# install containerd
&& guestfish --remote command "apt-get update -y" \
&& guestfish --remote command "apt-get install -y containerd.io" \
# configure containerd
&& guestfish --remote command "mkdir -p /etc/containerd" \
&& guestfish --remote sh "containerd config default | tee /etc/containerd/config.toml" \
&& guestfish --remote command "sed -i '/SystemdCgroup/ s/=.*/= true/' /etc/containerd/config.toml" \
# install kubernetes
&& guestfish --remote command "apt-get install -y kubelet kubeadm" \
# clean apt cache
&& guestfish --remote sh 'apt-get clean && rm -rf /var/lib/apt/lists/*' \
# write system configuration
&& guestfish --remote sh 'printf "%s\n" net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward=1 net.ipv6.conf.all.forwarding=1 net.ipv6.conf.all.disable_ipv6=0 net.ipv4.tcp_congestion_control=bbr vm.overcommit_memory=1 kernel.panic=10 kernel.panic_on_oops=1 fs.inotify.max_user_instances=8192 fs.inotify.max_user_watches=524288 | tee > /etc/sysctl.d/kubernetes.conf' \
&& guestfish --remote sh 'printf "%s\n" overlay br_netfilter | tee /etc/modules-load.d/kubernetes.conf' \
&& guestfish --remote sh "rm -f /etc/resolv.conf && ln -s ../run/systemd/resolve/stub-resolv.conf /etc/resolv.conf" \
# umount all and exit
&& guestfish --remote umount-all \
&& guestfish --remote exit
FROM scratch
COPY --from=builder /build/image.img /disk/image.qcow2

View File

@@ -0,0 +1,3 @@
To get kubeconfig for this cluster run:
kubectl get secret -n {{ .Release.Namespace }} {{ .Release.Name }}-admin-kubeconfig -o go-template='{{`{{ printf "%s\n" (index .data "super-admin.conf" | base64decode) }}`}}'

View File

@@ -0,0 +1,51 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kubernetes.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubernetes.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubernetes.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kubernetes.labels" -}}
helm.sh/chart: {{ include "kubernetes.chart" . }}
{{ include "kubernetes.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kubernetes.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubernetes.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-cloud-config
data:
cloud-config: |
loadBalancer:
creationPollInterval: 5
creationPollTimeout: 60
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,86 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-cluster-autoscaler
labels:
app: {{ .Release.Name }}-cluster-autoscaler
spec:
selector:
matchLabels:
app: {{ .Release.Name }}-cluster-autoscaler
replicas: 1
template:
metadata:
labels:
app: {{ .Release.Name }}-cluster-autoscaler
spec:
containers:
- image: ghcr.io/kvaps/test:cluster-autoscaller
name: cluster-autoscaler
command:
- /cluster-autoscaler
args:
- --cloud-provider=clusterapi
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
- --clusterapi-cloud-config-authoritative
- --node-group-auto-discovery=clusterapi:namespace={{ .Release.Namespace }},clusterName={{ .Release.Name }}
volumeMounts:
- mountPath: /etc/kubernetes/kubeconfig
name: kubeconfig
readOnly: true
volumes:
- configMap:
name: {{ .Release.Name }}-cloud-config
name: cloud-config
- secret:
secretName: {{ .Release.Name }}-admin-kubeconfig
name: kubeconfig
serviceAccountName: {{ .Release.Name }}-cluster-autoscaler
terminationGracePeriodSeconds: 10
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-cluster-autoscaler
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-cluster-autoscaler
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-cluster-autoscaler
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ .Release.Name }}-cluster-autoscaler
rules:
- apiGroups:
- cluster.x-k8s.io
resources:
- machinedeployments
- machinedeployments/scale
- machines
- machinesets
- machinepools
verbs:
- get
- list
- update
- watch
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- kubevirtmachinetemplates
verbs:
- get
- list
- update
- watch

View File

@@ -0,0 +1,150 @@
{{- $myNS := lookup "v1" "Namespace" "" .Release.Namespace }}
{{- $etcd := index $myNS.metadata.annotations "namespace.cozystack.io/etcd" }}
{{- $ingress := index $myNS.metadata.annotations "namespace.cozystack.io/ingress" }}
{{- $host := index $myNS.metadata.annotations "namespace.cozystack.io/host" }}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
spec:
clusterNetwork:
pods:
cidrBlocks:
- 10.243.0.0/16
services:
cidrBlocks:
- 10.95.0.0/16
controlPlaneRef:
namespace: {{ .Release.Namespace }}
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlane
name: {{ .Release.Name }}
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtCluster
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha1
kind: KamajiControlPlane
metadata:
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
labels:
cluster.x-k8s.io/role: control-plane
annotations:
kamaji.clastix.io/kubeconfig-secret-key: "super-admin.svc"
spec:
dataStoreName: "{{ $etcd }}"
addons:
coreDNS: {}
konnectivity: {}
kubelet:
cgroupfs: systemd
preferredAddressTypes:
- InternalIP
- ExternalIP
network:
serviceType: ClusterIP
ingress:
extraAnnotations:
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
hostname: {{ .Values.host | default (printf "%s.%s" .Release.Name $host) }}:443
className: "{{ $ingress }}"
deployment:
replicas: 2
version: 1.29.0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtCluster
metadata:
annotations:
cluster.x-k8s.io/managed-by: kamaji
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
metadata:
name: {{ .Release.Name }}-md-0
namespace: {{ .Release.Namespace }}
spec:
template:
spec:
joinConfiguration:
nodeRegistration:
kubeletExtraArgs: {}
discovery:
bootstrapToken:
apiServerEndpoint: {{ .Release.Name }}.{{ .Release.Namespace }}.svc:6443
initConfiguration:
skipPhases:
- addon/kube-proxy
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtMachineTemplate
metadata:
name: {{ .Release.Name }}-md-0
namespace: {{ .Release.Namespace }}
spec:
template:
spec:
virtualMachineBootstrapCheck:
checkStrategy: ssh
virtualMachineTemplate:
metadata:
namespace: {{ .Release.Namespace }}
spec:
runStrategy: Always
template:
spec:
domain:
cpu:
threads: 1
cores: 2
sockets: 1
devices:
disks:
- disk:
bus: virtio
name: containervolume
networkInterfaceMultiqueue: true
memory:
guest: 1024Mi
evictionStrategy: External
volumes:
- containerDisk:
image: "{{ $.Files.Get "images/ubuntu-container-disk.tag" | trim }}@{{ index ($.Files.Get "images/ubuntu-container-disk.json" | fromJson) "containerimage.digest" }}"
name: containervolume
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: {{ .Release.Name }}-md-0
namespace: {{ .Release.Namespace }}
annotations:
cluster.x-k8s.io/cluster-api-autoscaler-node-group-max-size: "2"
cluster.x-k8s.io/cluster-api-autoscaler-node-group-min-size: "0"
capacity.cluster-autoscaler.kubernetes.io/memory: "1024Mi"
capacity.cluster-autoscaler.kubernetes.io/cpu: "2"
spec:
clusterName: {{ .Release.Name }}
selector:
matchLabels: null
template:
spec:
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: KubeadmConfigTemplate
name: {{ .Release.Name }}-md-0
namespace: default
clusterName: {{ .Release.Name }}
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1
kind: KubevirtMachineTemplate
name: {{ .Release.Name }}-md-0
namespace: default
version: v1.23.10

View File

@@ -0,0 +1,126 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ .Release.Name }}-kcsi-controller
labels:
app: {{ .Release.Name }}-kcsi-driver
spec:
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}-kcsi-driver
template:
metadata:
labels:
app: {{ .Release.Name }}-kcsi-driver
spec:
serviceAccountName: {{ .Release.Name }}-kcsi
priorityClassName: system-cluster-critical
nodeSelector:
node-role.kubernetes.io/control-plane: ""
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
effect: "NoSchedule"
containers:
- name: csi-driver
imagePullPolicy: Always
image: ghcr.io/kvaps/test:kubevirt-csi-driver
args:
- "--endpoint=$(CSI_ENDPOINT)"
- "--infra-cluster-namespace=$(INFRACLUSTER_NAMESPACE)"
- "--infra-cluster-labels=$(INFRACLUSTER_LABELS)"
- "--v=5"
ports:
- name: healthz
containerPort: 10301
protocol: TCP
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: INFRACLUSTER_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: INFRACLUSTER_LABELS
value: "csi-driver/cluster=test"
- name: INFRA_STORAGE_CLASS_ENFORCEMENT
valueFrom:
configMapKeyRef:
name: driver-config
key: infraStorageClassEnforcement
optional: true
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
resources:
requests:
memory: 50Mi
cpu: 10m
- name: csi-provisioner
image: quay.io/openshift/origin-csi-external-provisioner:latest
args:
- "--csi-address=$(ADDRESS)"
- "--default-fstype=ext4"
- "--kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc"
- "--v=5"
- "--timeout=3m"
- "--retry-interval-max=1m"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
- name: csi-attacher
image: quay.io/openshift/origin-csi-external-attacher:latest
args:
- "--csi-address=$(ADDRESS)"
- "--kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc"
- "--v=5"
- "--timeout=3m"
- "--retry-interval-max=1m"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: kubeconfig
mountPath: /etc/kubernetes/kubeconfig
readOnly: true
resources:
requests:
memory: 50Mi
cpu: 10m
- name: csi-liveness-probe
image: quay.io/openshift/origin-csi-livenessprobe:latest
args:
- "--csi-address=/csi/csi.sock"
- "--probe-timeout=3s"
- "--health-port=10301"
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
requests:
memory: 50Mi
cpu: 10m
volumes:
- name: socket-dir
emptyDir: {}
- secret:
secretName: {{ .Release.Name }}-admin-kubeconfig
name: kubeconfig

View File

@@ -0,0 +1,32 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-kcsi
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Release.Name }}-kcsi
rules:
- apiGroups: ["cdi.kubevirt.io"]
resources: ["datavolumes"]
verbs: ["get", "create", "delete"]
- apiGroups: ["kubevirt.io"]
resources: ["virtualmachineinstances"]
verbs: ["list", "get"]
- apiGroups: ["subresources.kubevirt.io"]
resources: ["virtualmachineinstances/addvolume", "virtualmachineinstances/removevolume"]
verbs: ["update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Release.Name }}-kcsi
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-kcsi
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-kcsi

View File

@@ -0,0 +1,46 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-cilium
labels:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 1m
releaseName: cilium
chart:
spec:
chart: cozy-cilium
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-cilium
storageNamespace: cozy-cilium
install:
createNamespace: true
values:
cilium:
tunnel: disabled
autoDirectNodeRoutes: true
cgroup:
autoMount:
enabled: true
hostRoot: /run/cilium/cgroupv2
k8sServiceHost: {{ .Release.Name }}.{{ .Release.Namespace }}.svc
k8sServicePort: 6443
cni:
chainingMode: ~
customConf: false
configMap: ""
routingMode: native
enableIPv4Masquerade: true
ipv4NativeRoutingCIDR: "10.244.0.0/16"
dependsOn:
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,28 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-csi
labels:
cozystack.io/repository: system
coztstack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 1m
releaseName: csi
chart:
spec:
chart: cozy-kubevirt-csi-node
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
kubeConfig:
secretRef:
name: {{ .Release.Name }}-kubeconfig
targetNamespace: cozy-csi
storageNamespace: cozy-csi
install:
createNamespace: true
dependsOn:
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,73 @@
---
apiVersion: batch/v1
kind: Job
metadata:
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "10"
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed
name: {{ .Release.Name }}-flux-teardown
spec:
template:
spec:
serviceAccountName: {{ .Release.Name }}-flux-teardown
restartPolicy: Never
containers:
- name: kubectl
image: docker.io/clastix/kubectl:v1.29.1
command:
- kubectl
- --namespace={{ .Release.Namespace }}
- patch
- helmrelease
- {{ .Release.Name }}-cilium
- {{ .Release.Name }}-csi
- -p
- '{"spec": {"suspend": true}}'
- --type=merge
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-flux-teardown
annotations:
helm.sh/hook: pre-delete
helm.sh/hook-delete-policy: before-hook-creation,hook-failed
helm.sh/hook-weight: "0"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
"helm.sh/hook": pre-install,post-install,pre-delete
"helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation,hook-failed
"helm.sh/hook-weight": "5"
name: {{ .Release.Name }}-flux-teardown
rules:
- apiGroups:
- "helm.toolkit.fluxcd.io"
resources:
- helmreleases
verbs:
- get
- patch
resourceNames:
- {{ .Release.Name }}-cilium
- {{ .Release.Name }}-csi
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
helm.sh/hook: pre-delete
helm.sh/hook-delete-policy: hook-succeeded,before-hook-creation,hook-failed
helm.sh/hook-weight: "5"
name: {{ .Release.Name }}-flux-teardown
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-flux-teardown
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-flux-teardown
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ .Release.Namespace }}-{{ .Release.Name }}-kccm
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ .Release.Namespace }}-{{ .Release.Name }}-kccm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Namespace }}-{{ .Release.Name }}-kccm
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-kccm
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,42 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Release.Name }}-kccm
rules:
- apiGroups:
- kubevirt.io
resources:
- virtualmachines
verbs:
- get
- watch
- list
- apiGroups:
- kubevirt.io
resources:
- virtualmachineinstances
verbs:
- get
- watch
- list
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- "*"
- apiGroups:
- ""
resources:
- nodes
verbs:
- get

View File

@@ -0,0 +1,27 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Release.Namespace }}-{{ .Release.Name }}-kccm
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-kccm
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Release.Name }}-kccm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-kccm
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}-kccm
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-kccm
labels:
k8s-app: {{ .Release.Name }}-kccm
spec:
replicas: 1
selector:
matchLabels:
k8s-app: {{ .Release.Name }}-kccm
template:
metadata:
labels:
k8s-app: {{ .Release.Name }}-kccm
spec:
containers:
- name: kubevirt-cloud-controller-manager
args:
- --cloud-provider=kubevirt
- --cloud-config=/etc/cloud/cloud-config
- --kubeconfig=/etc/kubernetes/kubeconfig/super-admin.svc
- --cluster-name={{ .Release.Name }}
command:
- /bin/kubevirt-cloud-controller-manager
image: ghcr.io/kvaps/test:kubevirt-cloud-provider
imagePullPolicy: Always
#securityContext:
# privileged: true
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: /etc/kubernetes/kubeconfig
name: kubeconfig
readOnly: true
- mountPath: /etc/cloud
name: cloud-config
readOnly: true
volumes:
- configMap:
name: {{ .Release.Name }}-cloud-config
name: cloud-config
- secret:
secretName: {{ .Release.Name }}-admin-kubeconfig
name: kubeconfig
tolerations:
- operator: Exists
serviceAccountName: {{ .Release.Name }}-kccm

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Release.Name }}-kccm

View File

@@ -0,0 +1,11 @@
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"host": {
"type": "string",
"title": "Domain name for this kubernetes cluster",
"description": "This host will be used for all apps deployed in this tenant"
}
}
}

View File

@@ -0,0 +1 @@
host: ""

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: mysql
description: Managed MariaDB service
icon: https://static-00.iconduck.com/assets.00/mariadb-icon-512x340-txozryr2.png
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,64 @@
## Managed MariaDB Service
The Managed MariaDB Service offers a powerful and widely used relational database solution. This service allows you to create and manage a replicated MariaDB cluster seamlessly.
## Deployment Details
This managed service is controlled by mariadb-operator, ensuring efficient management and seamless operation.
- Docs: https://mariadb.com/kb/en/documentation/
- GitHub: https://github.com/mariadb-operator/mariadb-operator
## HowTos
### How to switch master/slave replica
```
kubectl edit mariadb <instnace>
```
update:
```
spec:
replication:
primary:
podIndex: 1
```
check status:
```
NAME READY STATUS PRIMARY POD AGE
<instance> True Running app-db1-1 41d
```
### How to restore backup:
find snapshot:
```
restic -r s3:s3.example.org/mariadb-backups/database_name snapshots
```
restore:
```
restic -r s3:s3.example.org/mariadb-backups/database_name restore latest --target /tmp/
```
more details:
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
### Known issues
- **Replication can't not be finished with various errors**
- **Replication can't be finised in case if binlog purged**
Until mariadbbackup is not used to bootstrap a node by mariadb-operator (this feature is not inmplemented yet), follow these manual steps to fix it:
https://github.com/mariadb-operator/mariadb-operator/issues/141#issuecomment-1804760231
- **Corrupted indicies**
Sometimes some indecies can be corrupted on master replica, you can recover them from slave:
```
mysqldump -h <slave> -P 3306 -u<user> -p<password> --column-statistics=0 <database> <table> ~/tmp/fix-table.sql
mysql -h <master> -P 3306 -u<user> -p<password> <database> < ~/tmp/fix-table.sql
```

View File

View File

@@ -0,0 +1,94 @@
{{- if .Values.backup.enabled }}
{{ $image := .Files.Get "images/backup.json" | fromJson }}
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ .Release.Name }}-backup
spec:
schedule: "{{ .Values.backup.schedule }}"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 2
template:
spec:
restartPolicy: OnFailure
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/backup-script.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/backup-secret.yaml") . | sha256sum }}
spec:
imagePullSecrets:
- name: {{ .Release.Name }}-regsecret
restartPolicy: Never
containers:
- name: mysqldump
image: "{{ index $image "image.name" }}@{{ index $image "containerimage.digest" }}"
command:
- /bin/sh
- /scripts/backup.sh
env:
- name: REPO_PREFIX
value: {{ required "s3Bucket is not specified!" .Values.backup.s3Bucket | quote }}
- name: CLEANUP_STRATEGY
value: {{ required "cleanupPolicy is not specified!" .Values.backup.cleanupStrategy | quote }}
- name: MYSQL_USER
value: root
- name: MYSQL_PWD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}
key: root-password
- name: MYSQL_HOST
value: {{ .Release.Name }}-secondary
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3AccessKey
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3SecretKey
- name: AWS_DEFAULT_REGION
value: {{ .Values.backup.s3Region }}
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: resticPassword
volumeMounts:
- mountPath: /scripts
name: scripts
- mountPath: /tmp
name: tmp
- mountPath: /.cache
name: cache
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumes:
- name: scripts
secret:
secretName: {{ .Release.Name }}-backup-script
- name: tmp
emptyDir: {}
- name: cache
emptyDir: {}
securityContext:
runAsNonRoot: true
runAsUser: 9000
runAsGroup: 9000
seccompProfile:
type: RuntimeDefault
{{- end }}

View File

@@ -0,0 +1,50 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup-script
stringData:
backup.sh: |
#!/bin/sh
set -e
set -o pipefail
JOB_ID="job-$(uuidgen|cut -f1 -d-)"
DB_LIST=$(mysql -u "$MYSQL_USER" -h "$MYSQL_HOST" -sNe 'SHOW DATABASES;' | grep -v '^\(#.*\|mysql\|sys\|information_schema\|performance_schema\)$')
echo DB_LIST=$(echo "$DB_LIST" | shuf) # shuffle list
echo "Job ID: $JOB_ID"
echo "Target repo: $REPO_PREFIX"
echo "Cleanup strategy: $CLEANUP_STRATEGY"
echo "Start backup for:"
echo "$DB_LIST"
echo
echo "Backup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic -r "s3:${REPO_PREFIX}/$db" cat config >/dev/null 2>&1 || \
restic -r "s3:${REPO_PREFIX}/$db" init --repository-version 2
restic -r "s3:${REPO_PREFIX}/$db" unlock --remove-all >/dev/null 2>&1 || true # no locks, k8s takes care of it
mysqldump -u "$MYSQL_USER" -h "$MYSQL_HOST" --single-transaction --databases $db | \
restic -r "s3:${REPO_PREFIX}/$db" backup --tag "$JOB_ID" --stdin --stdin-filename dump.sql
restic -r "s3:${REPO_PREFIX}/$db" tag --tag "$JOB_ID" --set "completed"
)
done
echo "Backup finished at `date +%Y-%m-%d\ %H:%M:%S`"
echo
echo "Run cleanup:"
echo
echo "Cleanup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags --keep-tag "completed" # keep completed snapshots only
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags $CLEANUP_STRATEGY
restic prune -r "s3:${REPO_PREFIX}/$db"
)
done
echo "Cleanup finished at `date +%Y-%m-%d\ %H:%M:%S`"
{{- end }}

View File

@@ -0,0 +1,11 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup
stringData:
s3AccessKey: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey }}
s3SecretKey: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey }}
resticPassword: {{ required "resticPassword is not specified!" .Values.backup.resticPassword }}
{{- end }}

View File

@@ -0,0 +1,35 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-my-cnf
data:
config: |
[mysqld]
sql-mode=NO_ENGINE_SUBSTITUTION
max_connections=4096
default_authentication_plugin=mysql_native_password
#innodb_buffer_pool_dump_at_shutdown=1
innodb_buffer_pool_instances=48
innodb_buffer_pool_size=60G
innodb_fast_shutdown=0
innodb_flush_method=O_DIRECT_NO_FSYNC
innodb_flush_log_at_trx_commit=2
innodb_io_capacity=10000
innodb_io_capacity_max=50000
#innodb_log_buffer_size=128M
innodb_log_file_size=4096M
#innodb_log_files_in_group=6
innodb_thread_concurrency=24
join_buffer_size=2M
key_buffer_size=1024M
read_rnd_buffer_size=16M
#sync_binlog=0
table_open_cache=40714
table_definition_cache=4000
thread_pool_size=24
tmp_table_size=512M
master_info_repository=TABLE
relay_log_info_repository=TABLE
innodb_read_io_threads=12
innodb_write_io_threads=12

View File

@@ -0,0 +1,14 @@
{{- range $name := .Values.databases }}
{{ $dnsName := replace "_" "-" $name }}
---
apiVersion: mariadb.mmontes.io/v1alpha1
kind: Database
metadata:
name: {{ $.Release.Name }}-{{ $dnsName }}
spec:
name: {{ $name }}
mariaDbRef:
name: {{ $.Release.Name }}
characterSet: utf8
collate: utf8_general_ci
{{- end }}

View File

@@ -0,0 +1,71 @@
---
apiVersion: mariadb.mmontes.io/v1alpha1
kind: MariaDB
metadata:
name: {{ .Release.Name }}
spec:
rootPasswordSecretKeyRef:
name: {{ .Release.Name }}
key: root-password
image: "mariadb:11.0.2"
port: 3306
replicas: 2
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- mariadb
- key: app.kubernetes.io/instance
operator: In
values:
- {{ .Release.Name }}
topologyKey: "kubernetes.io/hostname"
replication:
enabled: true
#primary:
# podIndex: 0
# automaticFailover: true
metrics:
exporter:
image: prom/mysqld-exporter:v0.14.0
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 300m
memory: 512Mi
port: 9104
serviceMonitor:
interval: 10s
scrapeTimeout: 10s
myCnfConfigMapKeyRef:
name: {{ .Release.Name }}-my-cnf
key: config
volumeClaimTemplate:
resources:
requests:
storage: {{ .Values.size }}
accessModes:
- ReadWriteOnce
{{- if .Values.external }}
primaryService:
type: LoadBalancer
{{- end }}
#secondaryService:
# type: LoadBalancer

View File

@@ -0,0 +1,10 @@
{{- if .Values.registrySecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-regsecret
type: kubernetes.io/dockerconfigjson
stringData:
.dockerconfigjson: |
{{- toJson .Values.registrySecret | nindent 4 }}
{{- end }}

View File

@@ -0,0 +1,9 @@
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}
stringData:
{{- range $name, $u := .Values.users }}
{{ $name }}-password: {{ $u.password }}
{{- end }}

View File

@@ -0,0 +1,31 @@
{{- range $name, $u := .Values.users }}
{{ if not (eq $name "root") }}
{{ $dnsName := replace "_" "-" $name }}
---
apiVersion: mariadb.mmontes.io/v1alpha1
kind: User
metadata:
name: {{ $.Release.Name }}-{{ $dnsName }}
spec:
name: {{ $name }}
mariaDbRef:
name: {{ $.Release.Name }}
passwordSecretKeyRef:
name: {{ $.Release.Name }}
key: {{ $name }}-password
maxUserConnections: {{ $u.maxUserConnections }}
---
apiVersion: mariadb.mmontes.io/v1alpha1
kind: Grant
metadata:
name: {{ $.Release.Name }}-{{ $dnsName }}
spec:
mariaDbRef:
name: {{ $.Release.Name }}
privileges: {{ $u.privileges | toJson }}
database: "*"
table: "*"
username: {{ $name }}
grantOption: true
{{- end }}
{{- end }}

View File

@@ -0,0 +1,30 @@
external: false
size: 10Gi
users:
root:
password: strongpassword
user1:
privileges: ['ALL']
maxUserConnections: 1000
password: hackme
user2:
privileges: ['SELECT']
maxUserConnections: 1000
password: hackme
databases:
- wordpress1
- wordpress2
- wordpress3
- wordpress4
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: postgres
description: Managed PostgreSQL service
icon: https://cdn-icons-png.flaticon.com/512/5968/5968342.png
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,32 @@
# Managed PostgreSQL Service
PostgreSQL is currently the leading choice among relational databases, known for its robust features and performance. Our Managed PostgreSQL Service takes advantage of platform-side implementation to provide a self-healing replicated cluster. This cluster is efficiently managed using the highly acclaimed CloudNativePG operator, which has gained popularity within the community.
## Deployment Details
This managed service is controlled by the CloudNativePG operator, ensuring efficient management and seamless operation.
- Docs: https://cloudnative-pg.io/documentation/
- Github: https://github.com/cloudnative-pg/cloudnative-pg
## HowTos
### How to switch master/slave replica
See:
- https://cloudnative-pg.io/documentation/1.15/rolling_update/#manual-updates-supervised
### How to restore backup:
find snapshot:
```
restic -r s3:s3.example.org/postgres-backups/database_name snapshots
```
restore:
```
restic -r s3:s3.example.org/postgres-backups/database_name restore latest --target /tmp/
```
more details:
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1

View File

@@ -0,0 +1,101 @@
{{- if .Values.backup.enabled }}
{{ $image := .Files.Get "images/backup.json" | fromJson }}
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ .Release.Name }}-backup
spec:
schedule: "{{ .Values.backup.schedule }}"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 2
template:
spec:
restartPolicy: OnFailure
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/backup-script.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/backup-secret.yaml") . | sha256sum }}
spec:
imagePullSecrets:
- name: {{ .Release.Name }}-regsecret
restartPolicy: Never
containers:
- name: mysqldump
image: "{{ index $image "image.name" }}@{{ index $image "containerimage.digest" }}"
command:
- /bin/sh
- /scripts/backup.sh
env:
- name: REPO_PREFIX
value: {{ required "s3Bucket is not specified!" .Values.backup.s3Bucket | quote }}
- name: CLEANUP_STRATEGY
value: {{ required "cleanupStrategy is not specified!" .Values.backup.cleanupStrategy | quote }}
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3AccessKey
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3SecretKey
- name: AWS_DEFAULT_REGION
value: {{ .Values.backup.s3Region }}
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: resticPassword
volumeMounts:
- mountPath: /scripts
name: scripts
- mountPath: /tmp
name: tmp
- mountPath: /.cache
name: cache
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumes:
- name: scripts
secret:
secretName: {{ .Release.Name }}-backup-script
- name: tmp
emptyDir: {}
- name: cache
emptyDir: {}
securityContext:
runAsNonRoot: true
runAsUser: 9000
runAsGroup: 9000
seccompProfile:
type: RuntimeDefault
{{- end }}

View File

@@ -0,0 +1,50 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup-script
stringData:
backup.sh: |
#!/bin/sh
set -e
set -o pipefail
JOB_ID="job-$(uuidgen|cut -f1 -d-)"
DB_LIST=$(psql -Atq -c 'SELECT datname FROM pg_catalog.pg_database;' | grep -v '^\(postgres\|app\|template.*\)$')
echo DB_LIST=$(echo "$DB_LIST" | shuf) # shuffle list
echo "Job ID: $JOB_ID"
echo "Target repo: $REPO_PREFIX"
echo "Cleanup strategy: $CLEANUP_STRATEGY"
echo "Start backup for:"
echo "$DB_LIST"
echo
echo "Backup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic -r "s3:${REPO_PREFIX}/$db" cat config >/dev/null 2>&1 || \
restic -r "s3:${REPO_PREFIX}/$db" init --repository-version 2
restic -r "s3:${REPO_PREFIX}/$db" unlock --remove-all >/dev/null 2>&1 || true # no locks, k8s takes care of it
pg_dump -Z0 -Ft -d "$db" | \
restic -r "s3:${REPO_PREFIX}/$db" backup --tag "$JOB_ID" --stdin --stdin-filename dump.tar
restic -r "s3:${REPO_PREFIX}/$db" tag --tag "$JOB_ID" --set "completed"
)
done
echo "Backup finished at `date +%Y-%m-%d\ %H:%M:%S`"
echo
echo "Run cleanup:"
echo
echo "Cleanup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags --keep-tag "completed" # keep completed snapshots only
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags $CLEANUP_STRATEGY
restic prune -r "s3:${REPO_PREFIX}/$db"
)
done
echo "Cleanup finished at `date +%Y-%m-%d\ %H:%M:%S`"
{{- end }}

View File

@@ -0,0 +1,11 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup
stringData:
s3AccessKey: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey }}
s3SecretKey: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey }}
resticPassword: {{ required "resticPassword is not specified!" .Values.backup.resticPassword }}
{{- end }}

View File

@@ -0,0 +1,18 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: {{ .Release.Name }}
spec:
instances: 2
enableSuperuserAccess: true
postgresql:
parameters:
max_wal_senders: "30"
monitoring:
enablePodMonitor: true
storage:
size: {{ required ".Values.size is required" .Values.size }}

View File

@@ -0,0 +1,18 @@
{{- if .Values.external }}
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-external-write
spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}
externalTrafficPolicy: Local
allocateLoadBalancerNodePorts: false
{{- end }}
ports:
- name: postgres
port: 5432
selector:
cnpg.io/cluster: {{ .Release.Name }}
role: primary
{{- end }}

View File

@@ -0,0 +1,66 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-init-job
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
template:
metadata:
name: {{ .Release.Name }}-init-job
annotations:
checksum/config: {{ include (print $.Template.BasePath "/init-script.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: postgres
image: ghcr.io/cloudnative-pg/postgresql:15.3
command:
- bash
- /scripts/init.sh
env:
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /etc/secret
name: secret
- mountPath: /scripts
name: scripts
securityContext:
fsGroup: 26
runAsGroup: 26
runAsNonRoot: true
runAsUser: 26
seccompProfile:
type: RuntimeDefault
volumes:
- name: secret
secret:
secretName: {{ .Release.Name }}-superuser
- name: scripts
secret:
secretName: {{ .Release.Name }}-init-script

View File

@@ -0,0 +1,127 @@
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-init-script
stringData:
init.sh: |
#!/bin/bash
set -e
echo "== create users"
{{- if .Values.users }}
psql -v ON_ERROR_STOP=1 <<\EOT
{{- range $user, $u := .Values.users }}
SELECT 'CREATE ROLE {{ $user }} LOGIN INHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $user }}')\gexec
ALTER ROLE {{ $user }} WITH PASSWORD '{{ $u.password }}' LOGIN INHERIT {{ ternary "REPLICATION" "NOREPLICATION" (default false $u.replication) }};
COMMENT ON ROLE {{ $user }} IS 'user managed by helm';
{{- end }}
EOT
{{- end }}
echo "== delete users"
MANAGED_USERS=$(echo '\du+' | psql | awk -F'|' '$4 == " user managed by helm" {print $1}' | awk NF=NF RS= OFS=' ')
DEFINED_USERS="{{ join " " (keys .Values.users) }}"
DELETE_USERS=$(for user in $MANAGED_USERS; do case " $DEFINED_USERS " in *" $user "*) :;; *) echo $user;; esac; done)
echo "users to delete: $DELETE_USERS"
for user in $DELETE_USERS; do
# https://stackoverflow.com/a/51257346/2931267
psql -v ON_ERROR_STOP=1 --echo-all <<EOT
REASSIGN OWNED BY $user TO postgres;
DROP OWNED BY $user;
DROP USER $user;
EOT
done
echo "== create databases and roles"
{{- if .Values.databases }}
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
{{- range $database, $d := .Values.databases }}
SELECT 'CREATE DATABASE {{ $database }}'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '{{ $database }}')\gexec
COMMENT ON DATABASE {{ $database }} IS 'database managed by helm';
SELECT 'CREATE ROLE {{ $database }}_admin NOINHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $database }}_admin')\gexec
COMMENT ON ROLE {{ $database }}_admin IS 'role managed by helm';
SELECT 'CREATE ROLE {{ $database }}_readonly NOINHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $database }}_readonly')\gexec
COMMENT ON ROLE {{ $database }}_readonly IS 'role managed by helm';
{{- end }}
EOT
{{- end }}
echo "== grant privileges on databases to roles"
{{- range $database, $d := .Values.databases }}
# admin
psql -v ON_ERROR_STOP=1 --echo-all -d "{{ $database }}" <<\EOT
DO $$DECLARE r record;
DECLARE
v_schema varchar := 'public';
v_new_owner varchar := '{{ $database }}_admin';
BEGIN
FOR r IN
select 'ALTER TABLE "' || table_schema || '"."' || table_name || '" OWNER TO ' || v_new_owner || ';' as a from information_schema.tables where table_schema = v_schema
union all
select 'ALTER TABLE "' || sequence_schema || '"."' || sequence_name || '" OWNER TO ' || v_new_owner || ';' as a from information_schema.sequences where sequence_schema = v_schema
union all
select 'ALTER TABLE "' || table_schema || '"."' || table_name || '" OWNER TO ' || v_new_owner || ';' as a from information_schema.views where table_schema = v_schema
union all
select 'ALTER FUNCTION "'||nsp.nspname||'"."'||p.proname||'"('||pg_get_function_identity_arguments(p.oid)||') OWNER TO ' || v_new_owner || ';' as a from pg_proc p join pg_namespace nsp ON p.pronamespace = nsp.oid where nsp.nspname = v_schema
LOOP
EXECUTE r.a;
END LOOP;
END$$;
ALTER DATABASE {{ $database }} OWNER TO {{ $database }}_admin;
ALTER SCHEMA public OWNER TO {{ $database }}_admin;
GRANT ALL ON SCHEMA public TO {{ $database }}_admin;
GRANT ALL ON ALL TABLES IN SCHEMA public TO {{ $database }}_admin;
GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO {{ $database }}_admin;
GRANT ALL ON ALL FUNCTIONS IN SCHEMA public TO {{ $database }}_admin;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO {{ $database }}_admin;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO {{ $database }}_admin;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON FUNCTIONS TO {{ $database }}_admin;
EOT
# readonly
psql -v ON_ERROR_STOP=1 --echo-all -d "{{ $database }}" <<\EOT
GRANT CONNECT ON DATABASE {{ $database }} TO {{ $database }}_readonly;
GRANT USAGE ON SCHEMA public TO {{ $database }}_readonly;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO {{ $database }}_readonly;
GRANT USAGE ON ALL SEQUENCES IN SCHEMA public TO {{ $database }}_readonly;
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA public TO {{ $database }}_readonly;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO {{ $database }}_readonly;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE ON SEQUENCES TO {{ $database }}_readonly;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT EXECUTE ON FUNCTIONS TO {{ $database }}_readonly;
EOT
{{- end }}
echo "== assign roles to users"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
{{- range $database, $d := .Values.databases }}
{{- range $user, $u := $.Values.roles }}
{{- if has $user $d.users.admin }}
GRANT {{ $database }}_admin TO {{ $user }};
{{- else }}
REVOKE {{ $database }}_admin FROM {{ $user }};
{{- end }}
{{- if has $user $d.users.readonly }}
GRANT {{ $database }}_readonly TO {{ $user }};
{{- else }}
REVOKE {{ $database }}_readonly FROM {{ $user }};
{{- end }}
{{- end }}
{{- end }}
EOT
echo "== create extensions"
{{- range $database, $d := .Values.databases }}
{{- if $d.extensions }}
psql -v ON_ERROR_STOP=1 --echo-all -d "{{ $database }}" <<\EOT
{{- range $extension := $d.extensions }}
CREATE EXTENSION IF NOT EXISTS {{ $extension }};
{{- end }}
EOT
{{- end }}
{{- end }}

View File

@@ -0,0 +1,10 @@
{{- if .Values.registrySecret }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-regsecret
type: kubernetes.io/dockerconfigjson
stringData:
.dockerconfigjson: |
{{- toJson .Values.registrySecret | nindent 4 }}
{{- end }}

View File

@@ -0,0 +1,39 @@
external: false
size: 10Gi
users:
user1:
password: strongpassword
user2:
password: hackme
airflow:
password: qwerty123
debezium:
replication: true
databases:
myapp:
roles:
admin:
- user1
- debezium
readonly:
- user2
airflow:
roles:
admin:
- airflow
extensions:
- hstore
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: rabbitmq
description: Managed RabbitMQ service
icon: https://static-00.iconduck.com/assets.00/rabbitmq-icon-484x512-s9lfaapn.png
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,10 @@
# Managed RabbitMQ Service
RabbitMQ is a robust message broker that plays a crucial role in modern distributed systems. Our Managed RabbitMQ Service simplifies the deployment and management of RabbitMQ clusters, ensuring reliability and scalability for your messaging needs.
## Deployment Details
The service utilizes official RabbitMQ operator. This ensures the reliability and seamless operation of your RabbitMQ instances.
- Github: https://github.com/rabbitmq/cluster-operator/
- Docs: https://www.rabbitmq.com/kubernetes/operator/operator-overview.html

View File

@@ -0,0 +1,13 @@
apiVersion: rabbitmq.com/v1beta1
kind: RabbitmqCluster
metadata:
name: {{ .Release.Name }}
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 3
{{- if .Values.external }}
service:
type: LoadBalancer
{{- end }}

View File

@@ -0,0 +1,10 @@
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"title": "Enable external Access"
}
}
}

View File

@@ -0,0 +1 @@
external: false

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: redis
description: Managed Redis service
icon: https://cdn4.iconfinder.com/data/icons/redis-2/1451/Untitled-2-512.png
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,10 @@
# Managed Redis Service
Redis is a highly versatile and blazing-fast in-memory data store and cache that can significantly boost the performance of your applications. Managed Redis Service offers a hassle-free solution for deploying and managing Redis clusters, ensuring that your data is always available and responsive.
## Deployment Details
Service utilizes the Spotahome Redis Operator for efficient management and orchestration of Redis clusters.
- Docs: https://redis.io/docs/
- GitHub: https://github.com/spotahome/redis-operator

View File

@@ -0,0 +1,52 @@
apiVersion: databases.spotahome.com/v1
kind: RedisFailover
metadata:
name: {{ .Release.Name }}
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
sentinel:
replicas: 3
resources:
requests:
cpu: 100m
limits:
memory: 100Mi
redis:
replicas: 3
resources:
requests:
cpu: 150m
memory: 400Mi
limits:
cpu: 2
memory: 1000Mi
{{- with .Values.size }}
storage:
persistentVolumeClaim:
metadata:
name: redisfailover-persistent-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ . }}
{{- end }}
exporter:
enabled: true
image: oliver006/redis_exporter:v1.55.0-alpine
args:
- --web.telemetry-path
- /metrics
env:
- name: REDIS_EXPORTER_LOG_FORMAT
value: txt
customConfig:
- tcp-keepalive 0
- loglevel notice
{{- if not .Values.size }}
- appendonly no
- save ""
{{- end }}

View File

@@ -0,0 +1,25 @@
{{- if .Values.external }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-external-lb
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}
externalTrafficPolicy: Local
allocateLoadBalancerNodePorts: false
{{- end }}
selector:
app.kubernetes.io/component: redis
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/part-of: redis-failover
redisfailovers-role: master
ports:
- name: redis
port: 6379
targetPort: redis
{{- end }}

View File

@@ -0,0 +1,46 @@
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMServiceScrape
metadata:
name: {{ .Release.Name }}
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
endpoints:
- metricRelabelConfigs:
relabelConfigs:
- replacement: redis
targetLabel: job
- sourceLabels: [__meta_kubernetes_pod_node_name]
targetLabel: node
- replacement: cluster
targetLabel: tier
- target_label: service
replacement: {{ .Release.Name }}
port: metrics
selector:
matchLabels:
app: {{ .Release.Name }}-metrics
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-metrics
labels:
app: {{ .Release.Name }}-metrics
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
annotations:
spec:
selector:
app.kubernetes.io/component: redis
app.kubernetes.io/name: {{ .Release.Name }}
app.kubernetes.io/part-of: redis-failover
ports:
- name: metrics
port: 9121
targetPort: metrics

View File

@@ -0,0 +1,14 @@
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"title": "Enable external Access"
},
"size": {
"type": "string",
"title": "Disk Size"
}
}
}

View File

@@ -0,0 +1,2 @@
external: false
size: 5Gi

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: tcp-balancer
description: Layer4 load balancer service
icon: https://cdn.icon-icons.com/icons2/2699/PNG/512/haproxy_logo_icon_171017.png
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,9 @@
# Managed TCP Load Balancer Service
The Managed TCP Load Balancer Service simplifies the deployment and management of load balancers. It efficiently distributes incoming TCP traffic across multiple backend servers, ensuring high availability and optimal resource utilization.
## Deployment Details
Managed TCP Load Balancer Service efficiently utilizes HAProxy for load balancing purposes. HAProxy is a well-established and reliable solution for distributing incoming TCP traffic across multiple backend servers, ensuring high availability and efficient resource utilization. This deployment choice guarantees the seamless and dependable operation of your load balancing infrastructure.
- Docs: https://www.haproxy.com/documentation/

View File

@@ -0,0 +1,122 @@
{{- define "backendoptions" }}
{{- if eq . "tcp" }}
mode tcp
balance leastconn
default-server observe layer4 error-limit 10 on-error mark-down check
{{- else if eq . "tcp-with-proxy" }}
mode tcp
balance leastconn
default-server observe layer4 error-limit 10 on-error mark-down check send-proxy-v2
{{- else }}
{{- fail (printf "mode %s is not supported" .) }}
{{- end }}
{{- end }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-haproxy
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
data:
haproxy.cfg: |
defaults
mode tcp
option dontlognull
timeout http-request 10s
timeout queue 20s
timeout connect 5s
timeout client 5m
timeout server 5m
timeout tunnel 5m
timeout http-keep-alive 10s
timeout check 10s
{{- with .Values.httpAndHttps }}
{{- if .targetPorts.http }}
frontend http
bind :::8080 v4v6
mode tcp
default_backend http
{{- if $.Values.whitelistHTTP }}
{{- with $.Values.whitelist }}
acl whitelist src{{ range . }} {{ . }}{{ end }}
{{- end }}
acl all src 0.0.0.0
tcp-request content accept if whitelist
tcp-request content reject
{{- end }}
tcp-request content set-dst-port int(80)
backend http
{{- include "backendoptions" (default "tcp" .mode) | nindent 8 }}
{{- range $num, $ep := .endpoints }}
server srv{{ $num }} {{ $ep }}:{{ $.Values.httpAndHttps.targetPorts.http }} check
{{- end }}
{{- end }}
{{- if .targetPorts.https }}
frontend https
bind :::8443 v4v6
{{- if eq (default "tcp" .mode) "http" }}
mode http
{{- else }}
mode tcp
{{- end }}
default_backend https
{{- if $.Values.whitelistHTTP }}
{{- with $.Values.whitelist }}
acl whitelist src{{ range . }} {{ . }}{{ end }}
{{- end }}
acl all src 0.0.0.0
tcp-request content accept if whitelist
tcp-request content reject
{{- end }}
tcp-request content set-dst-port int(443)
backend https
{{- include "backendoptions" (default "tcp" .mode) | nindent 8 }}
{{- range $num, $ep := .endpoints }}
server srv{{ $num }} {{ $ep }}:{{ $.Values.httpAndHttps.targetPorts.https }} check
{{- end }}
{{- end }}
{{- end }}
{{- with .Values.kubernetesAndTalos }}
frontend kubernetes
bind :::6443 v4v6
mode tcp
default_backend kubernetes
{{- with $.Values.whitelist }}
acl whitelist src{{ range . }} {{ . }}{{ end }}
{{- end }}
acl all src 0.0.0.0
tcp-request content accept if whitelist
tcp-request content reject
frontend talos
bind :::50000 v4v6
mode tcp
default_backend talos
{{- with $.Values.whitelist }}
acl whitelist src{{ range . }} {{ . }}{{ end }}
{{- end }}
acl all src 0.0.0.0
tcp-request content accept if whitelist
tcp-request content reject
backend kubernetes
{{- include "backendoptions" (default "tcp" .mode) | nindent 8 }}
{{- range $num, $ep := .endpoints }}
server srv{{ $num }} {{ $ep }}:6443 check
{{- end }}
backend talos
{{- include "backendoptions" (default "tcp" .mode) | nindent 8 }}
{{- range $num, $ep := .endpoints }}
server srv{{ $num }} {{ $ep }}:50000 check
{{- end }}
{{- end }}

View File

@@ -0,0 +1,55 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}-haproxy
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 2
selector:
matchLabels:
app: {{ .Release.Name }}-haproxy
template:
metadata:
labels:
app: {{ .Release.Name }}-haproxy
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- {{ .Release.Name }}-haproxy
topologyKey: kubernetes.io/hostname
containers:
- image: haproxy:latest
name: haproxy
ports:
{{- with .Values.httpAndHttps }}
- containerPort: 8080
name: http
- containerPort: 8443
name: https
{{- end }}
{{- with .Values.kubernetesAndTalos }}
- containerPort: 6443
name: kubernetes
- containerPort: 50000
name: talos
{{- end }}
volumeMounts:
- mountPath: /usr/local/etc/haproxy
name: config
volumes:
- configMap:
name: {{ .Release.Name }}-haproxy
name: config

View File

@@ -0,0 +1,37 @@
---
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-haproxy
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}
externalTrafficPolicy: Local
allocateLoadBalancerNodePorts: false
{{- end }}
selector:
app: {{ .Release.Name }}-haproxy
ports:
{{- with .Values.httpAndHttps }}
- name: http
protocol: TCP
port: 80
targetPort: http
- name: https
protocol: TCP
port: 443
targetPort: https
{{- end }}
{{- with .Values.httpAndHttps }}
- name: kubernetes
protocol: TCP
port: 6443
targetPort: kubernetes
- name: talos
protocol: TCP
port: 50000
targetPort: talos
{{- end }}

View File

@@ -0,0 +1,18 @@
external: false
httpAndHttps:
mode: tcp
targetPorts:
http: 80
https: 443
endpoints:
- 10.100.3.1
- 10.100.3.11
- 10.100.3.2
- 10.100.3.12
- 10.100.3.3
- 10.100.3.13
whitelistHTTP: false
whitelist:
- "1.2.3.4"
- "10.100.0.0/16"

View File

@@ -0,0 +1,7 @@
apiVersion: v2
name: tenant
description: Additional tenant namespace
icon: https://upload.wikimedia.org/wikipedia/commons/0/04/User_icon_1.svg
type: application
version: 1.0.0

View File

@@ -0,0 +1,47 @@
# Tenant
A tenant is the main unit of security on the platform. The closest analogy would be Linux kernel namespaces.
Tenants can be created recursively and are subject to the following rules:
### Higher-level tenants can access lower-level ones.
Higher-level tenants can view and manage the applications of all their children.
### Each tenant has its own domain
By default (unless otherwise specified), it inherits the domain of its parent with a prefix of its name, for example, if the parent had the domain `example.org`, then `tenant-foo` would get the domain `foo.example.org` by default.
Kubernetes clusters created in this tenant namespace would get domains like: `kubernetes-cluster.foo.example.org`
Example:
```
tenant-root (example.org)
└── tenant-foo (foo.example.org)
└── kubernetes-cluster1 (kubernetes-cluster1.foo.example.org)
```
### Lower-level tenants can access the cluster services of their parent (provided they do not run their own)
Thus, you can create `tenant-u1` with a set of services like `etcd`, `ingress`, `monitoring`. And create another tenant namespace `tenant-u2` inside of `tenant-u1`.
Let's see what will happen when you run Kubernetes and Postgres under `tenant-u2` namesapce.
Since `tenant-u2` does not have its own cluster services like `etcd`, `ingress`, and `monitoring`, the applications will use the cluster services of the parent tenant.
This in turn means:
- The Kubernetes cluster data will be stored in etcd for `tenant-u1`.
- Access to the cluster will be through the common ingress of `tenant-u1`.
- Essentially, all metrics will be collected in the monitoring from `tenant-u1`, and only it will have access to them.
Example:
```
tenant-u1
├── etcd
├── ingress
├── monitoring
└── tenant-u2
├── kubernetes-cluster1
└── postgres-db1
```

View File

@@ -0,0 +1,15 @@
{{- define "tenant.name" -}}
{{- $parts := splitList "-" .Release.Name }}
{{- if or (ne ($parts|first) "tenant") (ne (len $parts) 2) }}
{{- fail (printf "The release name should start with \"tenant-\" and should not contain any other dashes: %s" .Release.Name) }}
{{- end }}
{{- if not (hasPrefix "tenant-" .Release.Namespace) }}
{{- fail (printf "The release namespace should start with \"tenant-\": %s" .Release.Namespace) }}
{{- end }}
{{- $tenantName := ($parts|last) }}
{{- if ne .Release.Namespace "tenant-root" }}
{{- printf "%s-%s" .Release.Namespace $tenantName }}
{{- else }}
{{- printf "tenant-%s" $tenantName }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,25 @@
{{- if .Values.etcd }}
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: etcd
namespace: {{ include "tenant.name" . }}
annotations:
helm.sh/resource-policy: keep
labels:
cozystack.io/ui: "true"
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
chart:
spec:
chart: etcd
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
version: "*"
interval: 1m0s
timeout: 5m0s
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if .Values.ingress }}
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: ingress
namespace: {{ include "tenant.name" . }}
annotations:
helm.sh/resource-policy: keep
labels:
cozystack.io/ui: "true"
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
chart:
spec:
chart: ingress
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
version: "*"
interval: 1m0s
timeout: 5m0s
values: {}
{{- end }}

View File

@@ -0,0 +1,40 @@
{{- if .Values.monitoring }}
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: monitoring
namespace: {{ include "tenant.name" . }}
annotations:
helm.sh/resource-policy: keep
labels:
cozystack.io/ui: "true"
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
chart:
spec:
chart: monitoring
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-extra
namespace: cozy-public
version: "*"
interval: 1m0s
timeout: 5m0s
values:
{{- with .Values.host }}
host: grafana.{{ . }}
{{- end }}
metricsStorages:
- name: shortterm
retentionPeriod: "3d"
deduplicationInterval: "5m"
storage: 10Gi
- name: longterm
retentionPeriod: "14d"
deduplicationInterval: "15s"
storage: 10Gi
oncall:
enabled: false
{{- end }}

View File

@@ -0,0 +1,35 @@
{{- if ne (include "tenant.name" .) "tenant-root" }}
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ include "tenant.name" . }}
{{- if hasPrefix "tenant-" .Release.Namespace }}
{{- $existingNS := lookup "v1" "Namespace" "" .Release.Namespace }}
{{- if $existingNS }}
annotations:
{{- if .Values.host }}
namespace.cozystack.io/host: "{{ .Values.host }}"
{{- else }}
{{ $parentHost := index $existingNS.metadata.annotations "namespace.cozystack.io/host" | required (printf "namespace %s has no namespace.cozystack.io/host annotation" $.Release.Namespace) }}
namespace.cozystack.io/host: "{{ splitList "-" (include "tenant.name" .) | last }}.{{ $parentHost }}"
{{- end }}
{{- range $x := list "etcd" "monitoring" "ingress" }}
{{- if (index $.Values $x) }}
namespace.cozystack.io/{{ $x }}: "{{ include "tenant.name" $ }}"
{{- else }}
namespace.cozystack.io/{{ $x }}: "{{ index $existingNS.metadata.annotations (printf "namespace.cozystack.io/%s" $x) | required (printf "namespace %s has no namespace.cozystack.io/%s annotation" $.Release.Namespace $x) }}"
{{- end }}
{{- end }}
ownerReferences:
- apiVersion: v1
blockOwnerDeletion: true
controller: true
kind: Namespace
name: {{ .Release.Namespace }}
uid: {{ $existingNS.metadata.uid }}
{{- else }}
{{- fail (printf "error lookup exiting namespace: %s" .Release.Namespace) }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,84 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "tenant.name" . }}
namespace: {{ include "tenant.name" . }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "tenant.name" . }}
namespace: {{ include "tenant.name" . }}
annotations:
kubernetes.io/service-account.name: {{ include "tenant.name" . }}
type: kubernetes.io/service-account-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "tenant.name" . }}
namespace: {{ include "tenant.name" . }}
rules:
- apiGroups: [""]
resources: ["*"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["helm.toolkit.fluxcd.io"]
resources: ["helmreleases"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "tenant.name" . }}
namespace: {{ include "tenant.name" . }}
subjects:
{{- if ne .Release.Namespace "tenant-root" }}
- kind: ServiceAccount
name: tenant-root
namespace: tenant-root
{{- end }}
{{- if hasPrefix "tenant-" .Release.Namespace }}
{{- $parts := splitList "-" .Release.Namespace }}
{{- range $i, $v := $parts }}
{{- if ne $i 0 }}
- kind: ServiceAccount
name: {{ join "-" (slice $parts 0 (add $i 1)) }}
namespace: {{ join "-" (slice $parts 0 (add $i 1)) }}
{{- end }}
{{- end }}
{{- end }}
- kind: ServiceAccount
name: {{ include "tenant.name" . }}
namespace: {{ include "tenant.name" . }}
roleRef:
kind: Role
name: {{ include "tenant.name" . }}
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "tenant.name" . }}
namespace: cozy-public
rules:
- apiGroups: ["source.toolkit.fluxcd.io"]
resources: ["helmrepositories"]
verbs: ["get", "list"]
- apiGroups: ["source.toolkit.fluxcd.io"]
resources: ["helmcharts"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "tenant.name" . }}
namespace: cozy-public
subjects:
- kind: ServiceAccount
name: {{ include "tenant.name" . }}
namespace: {{ include "tenant.name" . }}
roleRef:
kind: Role
name: {{ include "tenant.name" . }}
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,27 @@
{
"$schema": "http://json-schema.org/schema#",
"type": "object",
"properties": {
"host": {
"type": "string",
"form": true,
"title": "Domain name for this tenant",
"description": "This host will be used for all apps deployed in this tenant"
},
"etcd": {
"type": "boolean",
"title": "Deploy own Etcd cluster",
"form": true
},
"monitoring": {
"type": "boolean",
"title": "Deploy own Monitoring Stack",
"form": true
},
"ingress": {
"type": "boolean",
"title": "Deploy own Ingress Controller",
"form": true
}
}
}

View File

@@ -0,0 +1,4 @@
host: ""
etcd: false
monitoring: false
ingress: false

View File

@@ -0,0 +1,14 @@
http-cache 0.1.0 HEAD
kubernetes 0.1.0 HEAD
mysql 0.1.0 HEAD
postgres 0.1.0 HEAD
rabbitmq 0.1.0 HEAD
redis 0.1.1 HEAD
tcp-balancer 0.1.0 HEAD
tenant 0.1.3 3d1b86c
tenant 0.1.4 d200480
tenant 0.1.5 e3ab858
tenant 1.0.0 HEAD
virtual-machine 0.1.4 f2015d6
virtual-machine 0.1.5 HEAD
vpn 0.1.0 HEAD

Some files were not shown because too many files have changed in this diff Show More