Compare commits

...

4 Commits

Author SHA1 Message Date
Andrei Kvapil
575d096671 Add nats-operator 2024-07-18 14:22:29 +02:00
Marian Koreniuk
5261145b2d Merge pull request #217 from aenix-io/ferretdb
FerretDB
2024-07-16 12:52:37 +02:00
Andrei Kvapil
4ffa861534 add ferretdb
Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
2024-07-16 10:23:27 +02:00
Andrei Kvapil
07d666c0be fix: scraping ingress-nginx metrics (#212)
Now grafana dashboards for ingress-nginx controller completely works!

![pic](https://github.com/user-attachments/assets/c2414cc7-9e0c-441e-9668-bf78ea3ef0c6)

![pic](https://github.com/user-attachments/assets/8ebe2488-0c53-4fc8-9e26-fc37e0047ebe)

![pic](https://github.com/user-attachments/assets/675a47b8-0304-4c58-9379-75e23c2db90f)
2024-07-16 08:06:16 +02:00
34 changed files with 1651 additions and 6 deletions

View File

@@ -0,0 +1,25 @@
apiVersion: v2
name: ferretdb
description: Managed FerretDB service
icon: ferretdb.svg
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.22.0"

View File

@@ -0,0 +1,2 @@
generate:
readme-generator -v values.yaml -s values.schema.json -r README.md

View File

@@ -0,0 +1,34 @@
# Managed FerretDB Service
## Parameters
### Common parameters
| Name | Description | Value |
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------- | ------- |
| `external` | Enable external access from outside the cluster | `false` |
| `size` | Persistent Volume size | `10Gi` |
| `replicas` | Number of Postgres replicas | `2` |
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed. | `0` |
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances). | `0` |
### Configuration parameters
| Name | Description | Value |
| ------- | ------------------- | ----- |
| `users` | Users configuration | `{}` |
### Backup parameters
| Name | Description | Value |
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
| `backup.enabled` | Enable pereiodic backups | `false` |
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |

View File

@@ -0,0 +1,54 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
width="200mm"
height="195.323mm"
viewBox="0 0 200 195.323"
version="1.1"
id="svg948"
inkscape:version="1.1.1 (c3084ef, 2021-09-22)"
sodipodi:docname="ferretdb.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview950"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:document-units="mm"
showgrid="false"
inkscape:zoom="0.64052329"
inkscape:cx="-69.474445"
inkscape:cy="579.99452"
inkscape:window-width="3440"
inkscape:window-height="1387"
inkscape:window-x="0"
inkscape:window-y="25"
inkscape:window-maximized="1"
inkscape:current-layer="layer1" />
<defs
id="defs945" />
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
<path
d="M 95.871302,0.25836635 C 73.52529,3.312081 51.107429,17.502874 38.138123,36.831094 c -2.083712,3.125567 -5.676318,9.628178 -5.676318,10.274847 0,0.0719 1.724451,-0.970003 3.808162,-2.335187 25.651206,-16.921175 56.260205,-20.046742 81.156963,-8.298921 5.42484,2.550751 8.83781,5.029648 13.68783,9.879665 8.15521,8.191137 14.11894,19.148592 18.25044,33.554942 2.15556,7.400765 3.95187,17.495992 4.4189,24.35786 0.10778,1.86816 0.39518,3.52075 0.57482,3.62853 1.00593,0.61075 5.53261,-5.96372 8.73003,-12.645965 5.06558,-10.634111 7.43669,-21.0886 7.40077,-32.692714 -0.036,-16.418213 -5.71224,-30.213814 -17.13674,-41.710153 C 143.22184,10.640997 130.43216,3.6354156 117.03174,0.90503536 113.90617,0.29429263 111.6069,0.11466224 105.75097,0.00688441 101.69132,-0.02904391 97.272414,0.07873086 95.871302,0.25836635 Z"
id="path824"
style="fill:#216778;stroke-width:0.0359261" />
<path
d="m 48.377049,48.219658 c -2.335194,1.149625 -6.251134,4.742233 -9.700036,8.873735 -1.54482,1.832222 -3.880014,4.095564 -5.604464,5.388902 -4.02372,3.017795 -10.885597,9.735963 -14.370424,14.083015 -18.1785821,22.525641 -23.2441594,48.21277 -14.585984,74.00768 7.113359,21.12453 23.567499,35.13569 48.859444,41.4946 9.843739,2.51482 24.60935,3.91593 30.788632,2.94593 l 1.580747,-0.25148 -2.442972,-1.43704 C 69.42972,185.49312 60.017093,172.27233 57.39449,157.57857 c -0.790373,-4.45483 -0.826299,-12.35856 -0.03593,-16.70562 1.760377,-9.77189 6.682247,-18.7534 13.364494,-24.35786 3.125567,-2.6226 8.586328,-5.31706 12.933381,-6.35891 6.538543,-1.58075 10.526335,-3.37705 14.657827,-6.64633 2.658538,-2.0837 4.993728,-5.2452 6.933738,-9.340763 1.65259,-3.484834 5.17335,-14.550063 5.17335,-16.310439 0,-1.221482 -1.25742,-2.874082 -3.05372,-3.987789 -0.93408,-0.574812 -2.40705,-0.898147 -6.17927,-1.293338 C 84.949773,70.888992 76.866409,67.943063 67.094521,60.218953 65.693406,59.105246 64.00488,57.847837 63.322285,57.416727 62.639691,57.021536 61.2745,55.512639 60.340423,54.111526 c -2.838159,-4.131492 -6.358912,-6.790025 -9.053367,-6.825953 -0.574817,0 -1.904081,0.431119 -2.910011,0.934085 z m 17.639695,16.633763 c 1.221486,0.610741 2.55075,1.401113 2.981863,1.724447 l 0.790373,0.646669 -1.257411,5.029649 c -1.077783,4.38298 -1.257413,5.496687 -1.149634,8.622257 0.107777,3.089642 0.215555,3.77223 0.934077,4.778161 1.18556,1.616673 3.233345,2.586676 5.532613,2.586676 3.269271,0 5.820021,-1.86815 10.059296,-7.436693 1.221486,-1.580744 2.19149,-2.442973 3.628532,-3.125571 2.227415,-1.113706 3.808162,-1.221481 8.765958,-0.790372 l 3.305202,0.323335 v 1.940007 c 0,3.053724 1.616677,4.814099 4.921857,5.317065 l 1.58075,0.21555 -0.57481,1.329266 c -2.51483,6.071499 -8.981521,12.93338 -15.05302,15.987093 -0.970004,0.46703 -3.161494,1.32926 -4.850018,1.90408 -2.766306,0.89815 -3.520754,1.00593 -8.262994,1.00593 -4.706313,0 -5.496687,-0.10778 -8.083363,-0.97001 -7.795954,-2.58667 -13.58005,-8.334832 -16.202652,-16.058942 -0.934077,-2.73038 -0.970004,-10.670039 -0.03593,-13.975231 1.257413,-4.562611 3.484828,-8.33485 5.820023,-9.80782 1.508893,-0.970003 4.311126,-0.646669 7.149285,0.754454 z"
id="path826"
style="fill:#216778;stroke-width:0.0359261" />
<path
d="m 181.55494,78.397542 c 0,1.616673 -1.7963,9.089295 -3.30519,13.759681 -5.67632,17.495987 -15.95117,33.195677 -29.35159,44.656087 -9.41263,8.08336 -16.09488,11.64004 -26.69306,14.26265 -6.82596,1.68852 -11.28078,2.22741 -19.93897,2.44297 -10.813737,0.2874 -21.483776,-0.6826 -31.040108,-2.76631 -1.832229,-0.39519 -3.377049,-0.64667 -3.484828,-0.53889 -0.431112,0.39519 1.221487,5.89187 2.658529,8.80189 2.622602,5.38891 5.604466,9.41262 10.921522,14.72968 5.604465,5.60446 9.771888,8.6941 16.238576,12.03522 16.023019,8.263 34.417169,9.37671 53.278339,3.1615 19.90304,-6.50262 34.52495,-18.25043 42.39275,-34.05791 5.24521,-10.4904 7.40077,-21.69934 6.6104,-34.489 -0.97001,-15.77155 -6.79003,-31.219754 -15.23265,-40.344967 -1.32926,-1.437041 -2.55075,-2.586676 -2.73038,-2.586676 -0.17963,0 -0.32334,0.431109 -0.32334,0.934075 z"
id="path828"
style="fill:#216778;stroke-width:0.0359261" />
</g>
</svg>

After

Width:  |  Height:  |  Size: 5.2 KiB

View File

@@ -0,0 +1,99 @@
{{- if .Values.backup.enabled }}
{{ $image := .Files.Get "images/backup.json" | fromJson }}
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ .Release.Name }}-backup
spec:
schedule: "{{ .Values.backup.schedule }}"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 3
jobTemplate:
spec:
backoffLimit: 2
template:
spec:
restartPolicy: OnFailure
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/backup-script.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/backup-secret.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: mysqldump
image: "{{ index $image "image.name" }}@{{ index $image "containerimage.digest" }}"
command:
- /bin/sh
- /scripts/backup.sh
env:
- name: REPO_PREFIX
value: {{ required "s3Bucket is not specified!" .Values.backup.s3Bucket | quote }}
- name: CLEANUP_STRATEGY
value: {{ required "cleanupStrategy is not specified!" .Values.backup.cleanupStrategy | quote }}
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-postgres-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3AccessKey
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: s3SecretKey
- name: AWS_DEFAULT_REGION
value: {{ .Values.backup.s3Region }}
- name: RESTIC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-backup
key: resticPassword
volumeMounts:
- mountPath: /scripts
name: scripts
- mountPath: /tmp
name: tmp
- mountPath: /.cache
name: cache
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumes:
- name: scripts
secret:
secretName: {{ .Release.Name }}-backup-script
- name: tmp
emptyDir: {}
- name: cache
emptyDir: {}
securityContext:
runAsNonRoot: true
runAsUser: 9000
runAsGroup: 9000
seccompProfile:
type: RuntimeDefault
{{- end }}

View File

@@ -0,0 +1,50 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup-script
stringData:
backup.sh: |
#!/bin/sh
set -e
set -o pipefail
JOB_ID="job-$(uuidgen|cut -f1 -d-)"
DB_LIST=$(psql -Atq -c 'SELECT datname FROM pg_catalog.pg_database;' | grep -v '^\(postgres\|app\|template.*\)$')
echo DB_LIST=$(echo "$DB_LIST" | shuf) # shuffle list
echo "Job ID: $JOB_ID"
echo "Target repo: $REPO_PREFIX"
echo "Cleanup strategy: $CLEANUP_STRATEGY"
echo "Start backup for:"
echo "$DB_LIST"
echo
echo "Backup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic -r "s3:${REPO_PREFIX}/$db" cat config >/dev/null 2>&1 || \
restic -r "s3:${REPO_PREFIX}/$db" init --repository-version 2
restic -r "s3:${REPO_PREFIX}/$db" unlock --remove-all >/dev/null 2>&1 || true # no locks, k8s takes care of it
pg_dump -Z0 -Ft -d "$db" | \
restic -r "s3:${REPO_PREFIX}/$db" backup --tag "$JOB_ID" --stdin --stdin-filename dump.tar
restic -r "s3:${REPO_PREFIX}/$db" tag --tag "$JOB_ID" --set "completed"
)
done
echo "Backup finished at `date +%Y-%m-%d\ %H:%M:%S`"
echo
echo "Run cleanup:"
echo
echo "Cleanup started at `date +%Y-%m-%d\ %H:%M:%S`"
for db in $DB_LIST; do
(
set -x
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags --keep-tag "completed" # keep completed snapshots only
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags $CLEANUP_STRATEGY
restic prune -r "s3:${REPO_PREFIX}/$db"
)
done
echo "Cleanup finished at `date +%Y-%m-%d\ %H:%M:%S`"
{{- end }}

View File

@@ -0,0 +1,11 @@
{{- if .Values.backup.enabled }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-backup
stringData:
s3AccessKey: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey }}
s3SecretKey: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey }}
resticPassword: {{ required "resticPassword is not specified!" .Values.backup.resticPassword }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}
spec:
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
{{- if .Values.external }}
externalTrafficPolicy: Local
allocateLoadBalancerNodePorts: false
{{- end }}
ports:
- name: ferretdb
port: 27017
selector:
app: {{ .Release.Name }}

View File

@@ -0,0 +1,26 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicas }}
selector:
matchLabels:
app: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ .Release.Name }}
spec:
containers:
- name: ferretdb
image: ghcr.io/ferretdb/ferretdb:1.22.0
ports:
- containerPort: 27017
env:
- name: FERRETDB_POSTGRESQL_URL
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-app
key: uri

View File

@@ -0,0 +1,66 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ .Release.Name }}-init-job
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
template:
metadata:
name: {{ .Release.Name }}-init-job
annotations:
checksum/config: {{ include (print $.Template.BasePath "/init-script.yaml") . | sha256sum }}
spec:
restartPolicy: Never
containers:
- name: postgres
image: ghcr.io/cloudnative-pg/postgresql:15.3
command:
- bash
- /scripts/init.sh
env:
- name: PGUSER
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: username
- name: PGPASSWORD
valueFrom:
secretKeyRef:
name: {{ .Release.Name }}-postgres-superuser
key: password
- name: PGHOST
value: {{ .Release.Name }}-postgres-rw
- name: PGPORT
value: "5432"
- name: PGDATABASE
value: postgres
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
volumeMounts:
- mountPath: /etc/secret
name: secret
- mountPath: /scripts
name: scripts
securityContext:
fsGroup: 26
runAsGroup: 26
runAsNonRoot: true
runAsUser: 26
seccompProfile:
type: RuntimeDefault
volumes:
- name: secret
secret:
secretName: {{ .Release.Name }}-postgres-superuser
- name: scripts
secret:
secretName: {{ .Release.Name }}-init-script

View File

@@ -0,0 +1,104 @@
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-init-script
stringData:
init.sh: |
#!/bin/bash
set -e
echo "== create users"
{{- if .Values.users }}
psql -v ON_ERROR_STOP=1 <<\EOT
{{- range $user, $u := .Values.users }}
SELECT 'CREATE ROLE {{ $user }} LOGIN INHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $user }}')\gexec
ALTER ROLE {{ $user }} WITH PASSWORD '{{ $u.password }}' LOGIN INHERIT {{ ternary "REPLICATION" "NOREPLICATION" (default false $u.replication) }};
COMMENT ON ROLE {{ $user }} IS 'user managed by helm';
{{- end }}
EOT
{{- end }}
echo "== delete users"
MANAGED_USERS=$(echo '\du+' | psql | awk -F'|' '$4 == " user managed by helm" {print $1}' | awk NF=NF RS= OFS=' ')
DEFINED_USERS="{{ join " " (keys .Values.users) }}"
DELETE_USERS=$(for user in $MANAGED_USERS; do case " $DEFINED_USERS " in *" $user "*) :;; *) echo $user;; esac; done)
echo "users to delete: $DELETE_USERS"
for user in $DELETE_USERS; do
# https://stackoverflow.com/a/51257346/2931267
psql -v ON_ERROR_STOP=1 --echo-all <<EOT
REASSIGN OWNED BY $user TO postgres;
DROP OWNED BY $user;
DROP USER $user;
EOT
done
echo "== create roles"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
SELECT 'CREATE ROLE app_admin NOINHERIT;'
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'app_admin')\gexec
COMMENT ON ROLE app_admin IS 'role managed by helm';
EOT
echo "== grant privileges on databases to roles"
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
ALTER DATABASE app OWNER TO app_admin;
DO $$
DECLARE
schema_record record;
BEGIN
-- Loop over all schemas
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
-- Changing Schema Ownership
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, 'app_admin');
-- Add rights for the admin role
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL SEQUENCES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('GRANT ALL ON ALL FUNCTIONS IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', schema_record.schema_name, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', schema_record.schema_name, 'app_admin');
END LOOP;
END$$;
EOT
echo "== setup event trigger for schema creation"
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
CREATE OR REPLACE FUNCTION auto_grant_schema_privileges()
RETURNS event_trigger LANGUAGE plpgsql AS $$
DECLARE
obj record;
BEGIN
FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE command_tag = 'CREATE SCHEMA' LOOP
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', obj.object_identity, 'app_admin');
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', obj.object_identity, 'app_admin');
-- Set owner for schema
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', obj.object_identity, 'app_admin');
-- Set privileges for admin role
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', obj.object_identity, 'app_admin');
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', obj.object_identity, 'app_admin');
END LOOP;
END;
$$;
DROP EVENT TRIGGER IF EXISTS trigger_auto_grant;
CREATE EVENT TRIGGER trigger_auto_grant ON ddl_command_end
WHEN TAG IN ('CREATE SCHEMA')
EXECUTE PROCEDURE auto_grant_schema_privileges();
EOT
echo "== assign roles to users"
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
GRANT app_admin TO app;
{{- range $user, $u := $.Values.users }}
GRANT app_admin TO {{ $user }};
{{- end }}
EOT

View File

@@ -0,0 +1,45 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: {{ .Release.Name }}-postgres
spec:
instances: {{ .Values.replicas }}
enableSuperuserAccess: true
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
monitoring:
enablePodMonitor: true
storage:
size: {{ required ".Values.size is required" .Values.size }}
{{- if .Values.users }}
managed:
roles:
{{- range $user, $config := .Values.users }}
- name: {{ $user }}
ensure: present
passwordSecret:
name: {{ printf "%s-user-%s" $.Release.Name $user }}
login: true
inRoles:
- app
{{- end }}
{{- end }}
{{- range $user, $config := .Values.users }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-user-%s" $.Release.Name $user }}
labels:
cnpg.io/reload: "true"
type: kubernetes.io/basic-auth
data:
username: {{ $user | b64enc }}
password: {{ $config.password | b64enc }}
{{- end }}

View File

@@ -0,0 +1,81 @@
{
"title": "Chart Values",
"type": "object",
"properties": {
"external": {
"type": "boolean",
"description": "Enable external access from outside the cluster",
"default": false
},
"size": {
"type": "string",
"description": "Persistent Volume size",
"default": "10Gi"
},
"replicas": {
"type": "number",
"description": "Number of Postgres replicas",
"default": 2
},
"quorum": {
"type": "object",
"properties": {
"minSyncReplicas": {
"type": "number",
"description": "Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.",
"default": 0
},
"maxSyncReplicas": {
"type": "number",
"description": "Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).",
"default": 0
}
}
},
"backup": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable pereiodic backups",
"default": false
},
"s3Region": {
"type": "string",
"description": "The AWS S3 region where backups are stored",
"default": "us-east-1"
},
"s3Bucket": {
"type": "string",
"description": "The S3 bucket used for storing backups",
"default": "s3.example.org/postgres-backups"
},
"schedule": {
"type": "string",
"description": "Cron schedule for automated backups",
"default": "0 2 * * *"
},
"cleanupStrategy": {
"type": "string",
"description": "The strategy for cleaning up old backups",
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
},
"s3AccessKey": {
"type": "string",
"description": "The access key for S3, used for authentication",
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
},
"s3SecretKey": {
"type": "string",
"description": "The secret key for S3, used for authentication",
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
},
"resticPassword": {
"type": "string",
"description": "The password for Restic backup encryption",
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
}
}
}
}
}

View File

@@ -0,0 +1,48 @@
## @section Common parameters
## @param external Enable external access from outside the cluster
## @param size Persistent Volume size
## @param replicas Number of Postgres replicas
##
external: false
size: 10Gi
replicas: 2
## Configuration for the quorum-based synchronous replication
## @param quorum.minSyncReplicas Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed.
## @param quorum.maxSyncReplicas Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances).
quorum:
minSyncReplicas: 0
maxSyncReplicas: 0
## @section Configuration parameters
## @param users [object] Users configuration
## Example:
## users:
## user1:
## password: strongpassword
## user2:
## password: hackme
##
users: {}
## @section Backup parameters
## @param backup.enabled Enable pereiodic backups
## @param backup.s3Region The AWS S3 region where backups are stored
## @param backup.s3Bucket The S3 bucket used for storing backups
## @param backup.schedule Cron schedule for automated backups
## @param backup.cleanupStrategy The strategy for cleaning up old backups
## @param backup.s3AccessKey The access key for S3, used for authentication
## @param backup.s3SecretKey The secret key for S3, used for authentication
## @param backup.resticPassword The password for Restic backup encryption
backup:
enabled: false
s3Region: us-east-1
s3Bucket: s3.example.org/postgres-backups
schedule: "0 2 * * *"
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0

View File

@@ -1,6 +1,7 @@
clickhouse 0.1.0 ca79f72
clickhouse 0.2.0 7cd7de73
clickhouse 0.2.1 HEAD
ferretdb 0.1.0 HEAD
http-cache 0.1.0 a956713
http-cache 0.2.0 HEAD
kafka 0.1.0 760f86d2

View File

@@ -3,4 +3,4 @@ name: ingress
description: NGINX Ingress Controller
icon: https://docs.nginx.com/nginx-ingress-controller/images/icons/NGINX-Ingress-Controller-product-icon.svg
type: application
version: 1.1.0
version: 1.2.0

View File

@@ -3,12 +3,11 @@ apiVersion: operator.victoriametrics.com/v1beta1
kind: VMPodScrape
metadata:
name: nginx-ingress-controller
namespace: cozy-monitoring
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- cozy-ingress-nginx
- {{ .Release.Namespace }}
podMetricsEndpoints:
- port: metrics
honorLabels: true
@@ -29,12 +28,11 @@ apiVersion: operator.victoriametrics.com/v1beta1
kind: VMPodScrape
metadata:
name: nginx-ingress-controller-detailed
namespace: cozy-monitoring
spec:
jobLabel: jobLabel
namespaceSelector:
matchNames:
- cozy-ingress-nginx
- {{ .Release.Namespace }}
podMetricsEndpoints:
- port: metrics2
honorLabels: true

View File

@@ -3,6 +3,7 @@ etcd 2.0.0 a6d0f7cf
etcd 2.0.1 6fc1cc7d
etcd 2.1.0 HEAD
ingress 1.0.0 f642698
ingress 1.1.0 HEAD
ingress 1.1.0 838bee5d
ingress 1.2.0 HEAD
monitoring 1.0.0 f642698
monitoring 1.1.0 HEAD

View File

@@ -0,0 +1,3 @@
apiVersion: v2
name: cozy-nats-operator
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process

View File

@@ -0,0 +1,10 @@
export NAME=nats-operator
export NAMESPACE=cozy-$(NAME)
include ../../../scripts/package-system.mk
update:
rm -rf charts
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
helm repo update nats
helm pull nats/nats-operator --untar --untardir charts

View File

@@ -0,0 +1,24 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# Chart specific files
README.md

View File

@@ -0,0 +1,22 @@
apiVersion: v2
appVersion: 0.8.3
description: NATS operator creates/configures/manages nats clusters atop Kubernetes
home: https://github.com/nats-io/nats-operator
icon: https://nats.io/img/nats-icon-color.png
keywords:
- addressing
- discovery
- messaging
- nats
- operator
- pubsub
maintainers:
- email: richerlariviere@gmail.com
name: richerlariviere
- email: wally@nats.io
name: Waldemar Quevedo
url: https://github.com/wallyqs
name: nats-operator
sources:
- https://github.com/nats-io/nats-operator
version: 0.8.3

View File

@@ -0,0 +1,25 @@
{
"users": [
{{- if and (.Values.cluster.auth.username) (not .Values.cluster.auth.users) }}
{
"username": "{{ .Values.cluster.auth.username }}",
"password": "{{ .Values.cluster.auth.password }}"
}
{{- end }}
{{- if .Values.cluster.auth.users }}
{{ $length := len .Values.cluster.auth.users }}
{{- range $index, $user := .Values.cluster.auth.users }}
{
"username": "{{ $user.username }}",
"password": "{{ $user.password }}"
{{- if $user.permissions }},
"permissions": {{ toJson $user.permissions | replace "\\u003e" ">"}}
{{- end}}
}{{- if lt (add1 $index) $length }},{{ end }}
{{- end}}
{{- end }}
]{{- if .Values.cluster.auth.defaultPermissions }},
"default_permissions": {{ toJson .Values.cluster.auth.defaultPermissions | replace "\\u003e" ">" }}
{{- end}}
}

View File

@@ -0,0 +1,305 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: natsclusters.nats.io
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec:
group: nats.io
scope: Namespaced
names:
kind: NatsCluster
listKind: NatsClusterList
plural: natsclusters
singular: natscluster
shortNames:
- nats
versions:
- name: v1alpha2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
size:
type: integer
version:
type: string
serverImage:
type: string
natsConfig:
type: object
properties:
debug:
type: boolean
trace:
type: boolean
write_deadline:
type: string
maxConnections:
type: integer
maxPayload:
type: integer
maxPending:
type: integer
maxSubscriptions:
type: integer
maxControlLine:
type: integer
disableLogtime:
type: boolean
useServerName:
type: boolean
paused:
type: boolean
pod:
type: object
properties:
labels:
x-kubernetes-preserve-unknown-fields: true
type: object
annotations:
x-kubernetes-preserve-unknown-fields: true
type: object
nodeSelector:
x-kubernetes-preserve-unknown-fields: true
type: object
antiAffinity:
type: boolean
resources:
x-kubernetes-preserve-unknown-fields: true
type: object
tolerations:
type: array
items:
x-kubernetes-preserve-unknown-fields: true
type: object
natsEnv:
type: array
items:
x-kubernetes-preserve-unknown-fields: true
type: object
enableConfigReload:
type: boolean
reloaderImage:
type: string
reloaderImageTag:
type: string
reloaderImagePullPolicy:
type: string
reloaderResources:
x-kubernetes-preserve-unknown-fields: true
type: object
enableMetrics:
type: boolean
metricsImage:
type: string
metricsImageTag:
type: string
metricsImagePullPolicy:
type: string
enableClientsHostPort:
type: boolean
advertiseExternalIP:
type: boolean
bootconfigImage:
type: string
bootconfigImageTag:
type: string
volumeMounts:
type: array
items:
x-kubernetes-preserve-unknown-fields: true
type: object
tls:
type: object
properties:
serverSecret:
type: string
serverSecretCAFileName:
type: string
serverSecretKeyFileName:
type: string
serverSecretCertFileName:
type: string
routesSecret:
type: string
routesSecretCAFileName:
type: string
routesSecretKeyFileName:
type: string
routesSecretCertFileName:
type: string
gatewaySecret:
type: string
gatewaySecretCAFileName:
type: string
gatewaySecretKeyFileName:
type: string
gatewaySecretCertFileName:
type: string
leafnodeSecret:
type: string
leafnodeSecretCAFileName:
type: string
leafnodeSecretKeyFileName:
type: string
leafnodeSecretCertFileName:
type: string
websocketSecret:
type: string
websocketSecretCAFileName:
type: string
websocketSecretKeyFileName:
type: string
websocketSecretCertFileName:
type: string
websocketTLSTimeout:
type: number
enableHttps:
type: boolean
clientsTLSTimeout:
type: number
routesTLSTimeout:
type: number
gatewaysTLSTimeout:
type: number
leafnodesTLSTimeout:
type: number
verify:
type: boolean
cipherSuites:
type: array
items:
type: string
curvePreferences:
type: array
items:
type: string
auth:
type: object
properties:
enableServiceAccounts:
type: boolean
clientsAuthSecret:
type: string
clientsAuthFile:
type: string
clientsAuthTimeout:
type: integer
tlsVerifyAndMap:
type: boolean
lameDuckDurationSeconds:
type: integer
noAdvertise:
type: boolean
template:
x-kubernetes-preserve-unknown-fields: true
type: object
extraRoutes:
type: array
items:
type: object
properties:
cluster:
type: string
route:
type: string
gatewayConfig:
type: object
properties:
name:
type: string
hostPort:
type: integer
rejectUnknown:
type: boolean
gateways:
type: array
items:
type: object
properties:
name:
type: string
url:
type: string
leafnodeConfig:
type: object
properties:
port:
type: integer
remotes:
type: array
items:
type: object
properties:
url:
type: string
urls:
type: array
items:
type: string
credentials:
type: string
operatorConfig:
type: object
properties:
secret:
type: string
systemAccount:
type: string
resolver:
type: string
websocketConfig:
type: object
properties:
port:
type: integer
handshakeTimeout:
type: integer
compression:
type: boolean
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: natsserviceroles.nats.io
annotations:
"helm.sh/hook": "crd-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
spec:
group: nats.io
scope: Namespaced
names:
kind: NatsServiceRole
listKind: NatsServiceRoleList
plural: natsserviceroles
singular: natsservicerole
versions:
- name: v1alpha2
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
permissions:
type: object
properties:
publish:
type: array
items:
type: string
subscribe:
type: array
items:
type: string

View File

@@ -0,0 +1,26 @@
** Please be patient while the chart is being deployed **
{{- if .Values.clusterScoped }}
** WARNING ! **: You've installed a cluster-scoped NATS Operator. Make sure that there are no other deployments of NATS Operator in the Kubernetes cluster.
{{- if not (eq .Release.Namespace "nats-io") }}
** WARNING ! **: The namespace must be "nats-io" however you used "{{ .Release.Namespace }}" !
{{- end }}
{{- end}}
NATS can be accessed via port 4222 on the following DNS name from within your cluster:
nats-cluster.{{ .Release.Namespace }}.svc.cluster.local
NATS monitoring service can be accessed via port 8222 on the following DNS name from within your cluster:
nats-cluster-mgmt.{{ .Release.Namespace }}.svc.cluster.local
To access the Monitoring svc from outside the cluster, follow the steps below:
1. Get the name of a pod from the cluster that was deployed, then use port-forward to connect top it. For example:
kubectl get pods -l nats_cluster=nats-cluster
kubectl port-forward nats-cluster-1 8222
2. Open a browser and access the NATS monitoring browsing to the Monitoring URL

View File

@@ -0,0 +1,44 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "nats.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "nats.fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "nats.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "nats.labels" -}}
app.kubernetes.io/name: {{ template "nats.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "operator"
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
helm.sh/chart: {{ include "nats.chart" . }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "nats.selectorLabels" -}}
app.kubernetes.io/name: {{ include "nats.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "operator"
{{- end -}}

View File

@@ -0,0 +1,130 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "nats.fullname" . }}
{{- if and .Values.clusterScoped .Values.cluster.namespace }}
namespace: {{ .Values.cluster.namespace }}
{{- end }}
labels:
{{- include "nats.labels" . | nindent 4 }}
app: {{ template "nats.name" . }}
chart: {{ template "nats.chart" . }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
type: {{ .Values.updateStrategy }}
{{- if eq .Values.updateStrategy "RollingUpdate" }}
rollingUpdate:
maxSurge: {{ .Values.rollingUpdateMaxSurge }}
maxUnavailable: {{ .Values.rollingUpdateMaxUnavailable }}
{{- end }}
selector:
matchLabels:
app: {{ template "nats.name" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
{{- include "nats.selectorLabels" . | nindent 8 }}
app: {{ template "nats.name" . }}
release: {{ .Release.Name }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
{{- if .Values.podAnnotations }}
annotations:
{{- toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
{{- if .Values.rbacEnabled }}
serviceAccountName: nats-operator
{{- end }}
containers:
- name: nats-operator
image: {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.clusterScoped }}
args:
- nats-operator
- --feature-gates=ClusterScoped=true
{{- end }}
env:
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
ports:
- name: readyz
containerPort: 8080
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /readyz
port: readyz
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /readyz
port: readyz
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10}}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName}}"
{{- end }}
{{- if eq .Values.antiAffinity "hard" }}
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
labelSelector:
matchLabels:
app: "{{ template "nats.name" . }}"
release: {{ .Release.Name | quote }}
{{- else if eq .Values.antiAffinity "soft" }}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app: "{{ template "nats.name" . }}"
release: "{{ .Release.Name }}"
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{ .Values.image.pullSecrets}}
{{- end }}

View File

@@ -0,0 +1,70 @@
---
{{- if .Values.cluster.create }}
apiVersion: "nats.io/v1alpha2"
kind: "NatsCluster"
metadata:
name: {{ .Values.cluster.name }}
{{- if and .Values.clusterScoped .Values.cluster.namespace }}
namespace: {{ .Values.cluster.namespace }}
{{- end }}
spec:
size: {{ .Values.cluster.size }}
version: {{ .Values.cluster.version }}
pod:
{{- if .Values.cluster.annotations }}
annotations: {{ toYaml .Values.cluster.annotations | nindent 6 }}
{{- end }}
{{- if .Values.cluster.resources }}
resources: {{ toYaml .Values.cluster.resources | nindent 6 }}
{{- end }}
enableConfigReload: {{ .Values.cluster.configReload.enabled }}
reloaderImage: {{ .Values.cluster.configReload.repository }}
reloaderImageTag: {{ .Values.cluster.configReload.tag }}
reloaderImagePullPolicy: {{ .Values.cluster.configReload.pullPolicy }}
{{- if .Values.cluster.configReload.resources }}
reloaderResources: {{ toYaml .Values.cluster.configReload.resources | nindent 6 }}
{{- end }}
enableMetrics: {{ .Values.cluster.metrics.enabled }}
metricsImage: {{ .Values.cluster.metrics.repository }}
metricsImageTag: {{ .Values.cluster.metrics.tag }}
metricsImagePullPolicy: {{ .Values.cluster.metrics.pullPolicy }}
{{- if .Values.cluster.auth.enabled }}
auth:
enableServiceAccounts: {{ .Values.cluster.auth.enableServiceAccounts }}
clientsAuthSecret: {{ .Values.cluster.name }}-clients-auth
clientsAuthTimeout: 5
{{- end }}
{{- if .Values.cluster.tls.enabled }}
tls:
# Certificates to secure the NATS client connections:
serverSecret: {{ .Values.cluster.tls.serverSecret }}
# Certificates to secure the routes.
routesSecret: {{ .Values.cluster.tls.routesSecret }}
{{- end }}
---
{{- if and .Values.cluster.metrics.enabled .Values.cluster.metrics.servicemonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ .Values.cluster.name }}
{{- if and .Values.clusterScoped .Values.cluster.namespace }}
namespace: {{ .Values.cluster.namespace }}
{{- end }}
labels:
app: nats
nats_cluster: {{ .Values.cluster.name }}
prometheus: {{ .Values.cluster.metrics.servicemonitor.prometheusInstance }}
spec:
jobLabel: nats-{{ .Values.cluster.name }}
selector:
matchLabels:
app: nats
nats_cluster: {{ .Values.cluster.name }}
endpoints:
- port: metrics
interval: 60s
{{- end }}
{{- end }}

View File

@@ -0,0 +1,108 @@
{{- if .Values.rbacEnabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nats-io-nats-operator-crd
rules:
# Allow creating CRDs
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs: ["get", "list", "create", "update", "watch"]
# Allow all actions on NatsClusters
- apiGroups:
- nats.io
resources:
- natsclusters
- natsserviceroles
verbs: ["*"]
# Allowed actions on Pods
- apiGroups: [""]
resources:
- pods
verbs: ["create", "watch", "get", "patch", "update", "delete", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nats-io-nats-operator-crd-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nats-io-nats-operator-crd
subjects:
- kind: ServiceAccount
name: nats-operator
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
{{- if .Values.clusterScoped }}
kind: ClusterRole
{{- else }}
kind: Role
{{- end }}
metadata:
name: nats-io-nats-operator
rules:
# Allowed actions on Pods
- apiGroups: [""]
resources:
- pods
verbs: ["create", "watch", "get", "patch", "update", "delete", "list"]
# Allowed actions on Services
- apiGroups: [""]
resources:
- services
verbs: ["create", "watch", "get", "patch", "update", "delete", "list"]
# Allowed actions on Secrets
- apiGroups: [""]
resources:
- secrets
verbs: ["create", "watch", "get", "update", "delete", "list"]
# Allow all actions on some special subresources
- apiGroups: [""]
resources:
- pods/exec
- pods/log
- serviceaccounts/token
- events
verbs: ["*"]
# Allow listing Namespaces and ServiceAccounts
- apiGroups: [""]
resources:
- namespaces
- serviceaccounts
verbs: ["list", "get", "watch"]
# Allow actions on Endpoints
- apiGroups: [""]
resources:
- endpoints
verbs: ["create", "watch", "get", "update", "delete", "list"]
---
apiVersion: rbac.authorization.k8s.io/v1
{{- if .Values.clusterScoped }}
kind: ClusterRoleBinding
{{- else }}
kind: RoleBinding
{{- end }}
metadata:
name: nats-io-nats-operator-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
{{- if .Values.clusterScoped }}
kind: ClusterRole
{{- else }}
kind: Role
{{- end }}
name: nats-io-nats-operator
subjects:
- kind: ServiceAccount
name: nats-operator
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,12 @@
{{- if and .Values.cluster.create .Values.cluster.auth.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.cluster.name }}-clients-auth
{{- if and .Values.clusterScoped .Values.cluster.namespace }}
namespace: {{ .Values.cluster.namespace }}
{{- end }}
type: Opaque
data:
clients-auth.json: {{ (tpl (.Files.Get "config/client-auth.json") . ) | b64enc }}
{{- end }}

View File

@@ -0,0 +1,9 @@
{{- if .Values.rbacEnabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: nats-operator
{{- if and .Values.clusterScoped .Values.cluster.namespace }}
namespace: {{ .Values.cluster.namespace }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,191 @@
## Specify if RBAC authorization is enabled.
## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/
##
rbacEnabled: true
## Operator scope
## NOTE: If true
## * Make sure that no othe NATS operator is running in the cluster
## * The Release namespace must be "nats-io"
clusterScoped: false
## Set default Replica Coint for the Operator
replicaCount: 1
image:
# natsio/nats-operator:0.8.3
registry: docker.io
repository: natsio/nats-operator
tag: 0.8.3
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: Always
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistrKeySecretName
## NATS Pod Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
##
securityContext:
enabled: true
fsGroup: 1001
runAsUser: 1001
## NATS Node selector and tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations
##
# nodeSelector: {}
# tolerations: []
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Pods anti-affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
## Possible values: soft, hard
antiAffinity: soft
## Pod annotations
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
podAnnotations: {}
## Additional pod labels
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
##
podLabels: {}
## Update strategy, can be "Recreate" or "RollingUpdate". Default is RollingUpdate.
updateStrategy: RollingUpdate
# rollingUpdateMaxSurge: 25%
# rollingUpdateMaxUnavailable: "25%
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources: {}
# limits:
# cpu: 100m
# memory: 64Mi
# requests:
# cpu: 10m
# memory: 64Mi
## Configure extra options for liveness and readiness probes
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes)
livenessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
cluster:
## Create a NATS Cluster when installing the operator
create: true
name: nats-cluster
## Choose namespace for cluster deployment if clusterScoped is set to true
namespace: "nats-io"
## Nats version
## Image tags are listed here: https://hub.docker.com/_/nats?tab=tags
version: 1.4.1
## Cluster Size
size: 3
## Optional custom annotations to add to Pods in the cluster
annotations: {}
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 100m
# memory: 256Mi
## Client Authentication
## ref: https://github.com/nats-io/gnatsd#authentication
## note: token not supported only user/password will work with this chart version
##
auth:
enabled: true
# NOTE: Only supported in Kubernetes v1.12+ clusters having the "TokenRequest" API enabled.
enableServiceAccounts: false
## This is where you enter a username/password for 1 user
username: "my-user"
password: "T0pS3cr3t"
## This is a where you can specify 2 or more users
users: []
# - username: "another-user-1"
# password: "another-password-1"
# - username: "another-user-2"
# password: "another-password-2"
# permissions:
# publish: ["hello.*"]
# subscribe: ["hello.world"]
defaultPermissions: {}
# publish: ["SANDBOX.*"]
# subscribe: ["PUBLIC.>"]
tls:
enabled: false
# serverSecret:
# routesSecret:
## Configuration Reload
## NOTE: Only supported in Kubernetes v1.12+.
configReload:
enabled: false
registry: "docker.io"
repository: "connecteverything/nats-server-config-reloader"
tag: "0.2.2-v1alpha2"
pullPolicy: "IfNotPresent"
resources: {}
# limits:
# cpu: 50m
# memory: 32Mi
# requests:
# cpu: 10m
# memory: 32Mi
## Prometheus Metrics Exporter
##
metrics:
enabled: false
registry: "docker.io"
repository: "synadia/prometheus-nats-exporter"
tag: "0.6.2"
pullPolicy: "IfNotPresent"
# Prometheus Operator ServiceMonitor config
##
servicemonitor:
enabled: false
prometheusInstance: default

View File

@@ -0,0 +1,6 @@
nats-operator:
clusterScoped: true
cluster:
create: true
metrics:
enabled: true