mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-04-05 02:06:24 +00:00
Compare commits
1 Commits
clickhouse
...
framework
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa4d3a5c77 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1 @@
|
|||||||
_out
|
_out
|
||||||
.git
|
|
||||||
.idea
|
|
||||||
@@ -33,7 +33,7 @@ You can use Cozystack as Kubernetes distribution for Bare Metal
|
|||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
The documentation is located on official [cozystack.io](https://cozystack.io) website.
|
The documentation is located on official [cozystack.io](cozystack.io) website.
|
||||||
|
|
||||||
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
||||||
|
|
||||||
@@ -44,8 +44,6 @@ If you encounter any difficulties, start with the [troubleshooting guide](https:
|
|||||||
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
||||||
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/aenix-io/cozystack/releases) section.
|
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/aenix-io/cozystack/releases) section.
|
||||||
|
|
||||||
- [Roadmap](https://github.com/orgs/aenix-io/projects/2)
|
|
||||||
|
|
||||||
## Contributions
|
## Contributions
|
||||||
|
|
||||||
Contributions are highly appreciated and very welcomed!
|
Contributions are highly appreciated and very welcomed!
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ set -e
|
|||||||
|
|
||||||
if [ -e $1 ]; then
|
if [ -e $1 ]; then
|
||||||
echo "Please pass version in the first argument"
|
echo "Please pass version in the first argument"
|
||||||
echo "Example: $0 0.2.0"
|
echo "Example: $0 v0.0.2"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -12,14 +12,8 @@ talos_version=$(awk '/^version:/ {print $2}' packages/core/installer/images/talo
|
|||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
sed -i "/^TAG / s|=.*|= v${version}|" \
|
sed -i "/^TAG / s|=.*|= ${version}|" \
|
||||||
packages/apps/http-cache/Makefile \
|
packages/apps/http-cache/Makefile \
|
||||||
packages/apps/kubernetes/Makefile \
|
packages/apps/kubernetes/Makefile \
|
||||||
packages/core/installer/Makefile \
|
packages/core/installer/Makefile \
|
||||||
packages/system/dashboard/Makefile
|
packages/system/dashboard/Makefile
|
||||||
|
|
||||||
sed -i "/^VERSION / s|=.*|= ${version}|" \
|
|
||||||
packages/core/Makefile \
|
|
||||||
packages/system/Makefile
|
|
||||||
make -C packages/core fix-chartnames
|
|
||||||
make -C packages/system fix-chartnames
|
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ spec:
|
|||||||
serviceAccountName: cozystack
|
serviceAccountName: cozystack
|
||||||
containers:
|
containers:
|
||||||
- name: cozystack
|
- name: cozystack
|
||||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.2.0"
|
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.1.0"
|
||||||
env:
|
env:
|
||||||
- name: KUBERNETES_SERVICE_HOST
|
- name: KUBERNETES_SERVICE_HOST
|
||||||
value: localhost
|
value: localhost
|
||||||
@@ -89,7 +89,7 @@ spec:
|
|||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: metadata.name
|
fieldPath: metadata.name
|
||||||
- name: darkhttpd
|
- name: darkhttpd
|
||||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.2.0"
|
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.1.0"
|
||||||
command:
|
command:
|
||||||
- /usr/bin/darkhttpd
|
- /usr/bin/darkhttpd
|
||||||
- /cozystack/assets
|
- /cozystack/assets
|
||||||
@@ -102,6 +102,3 @@ spec:
|
|||||||
- key: "node.kubernetes.io/not-ready"
|
- key: "node.kubernetes.io/not-ready"
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
- key: "node.cilium.io/agent-not-ready"
|
|
||||||
operator: "Exists"
|
|
||||||
effect: "NoSchedule"
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ PUSH := 1
|
|||||||
LOAD := 0
|
LOAD := 0
|
||||||
REGISTRY := ghcr.io/aenix-io/cozystack
|
REGISTRY := ghcr.io/aenix-io/cozystack
|
||||||
NGINX_CACHE_TAG = v0.1.0
|
NGINX_CACHE_TAG = v0.1.0
|
||||||
TAG := v0.2.0
|
TAG := v0.1.0
|
||||||
|
|
||||||
image: image-nginx
|
image: image-nginx
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{
|
{
|
||||||
"containerimage.config.digest": "sha256:0487fc50bb5f870720b05e947185424a400fad38b682af8f1ca4b418ed3c5b4b",
|
"containerimage.config.digest": "sha256:318fd8d0d6f6127387042f6ad150e87023d1961c7c5059dd5324188a54b0ab4e",
|
||||||
"containerimage.digest": "sha256:be12f3834be0e2f129685f682fab83c871610985fc43668ce6a294c9de603798"
|
"containerimage.digest": "sha256:e3cf145238e6e45f7f13b9acaea445c94ff29f76a34ba9fa50828401a5a3cc68"
|
||||||
}
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
PUSH := 1
|
PUSH := 1
|
||||||
LOAD := 0
|
LOAD := 0
|
||||||
REGISTRY := ghcr.io/aenix-io/cozystack
|
REGISTRY := ghcr.io/aenix-io/cozystack
|
||||||
TAG := v0.2.0
|
TAG := v0.1.0
|
||||||
UBUNTU_CONTAINER_DISK_TAG = v1.29.1
|
UBUNTU_CONTAINER_DISK_TAG = v1.29.1
|
||||||
|
|
||||||
image: image-ubuntu-container-disk
|
image: image-ubuntu-container-disk
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{
|
{
|
||||||
"containerimage.config.digest": "sha256:43d0bfd01c5e364ba961f1e3dc2c7ccd7fd4ca65bd26bc8c4a5298d7ff2c9f4f",
|
"containerimage.config.digest": "sha256:ee8968be63c7c45621ec45f3687211e0875acb24e8d9784e8d2ebcbf46a3538c",
|
||||||
"containerimage.digest": "sha256:908b3c186bee86f1c9476317eb6582d07f19776b291aa068e5642f8fd08fa9e7"
|
"containerimage.digest": "sha256:16c3c07e74212585786dc1f1ae31d3ab90a575014806193e8e37d1d7751cb084"
|
||||||
}
|
}
|
||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.2.0
|
version: 0.1.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{{- range $name := .Values.databases }}
|
{{- range $name := .Values.databases }}
|
||||||
{{ $dnsName := replace "_" "-" $name }}
|
{{ $dnsName := replace "_" "-" $name }}
|
||||||
---
|
---
|
||||||
apiVersion: k8s.mariadb.com/v1alpha1
|
apiVersion: mariadb.mmontes.io/v1alpha1
|
||||||
kind: Database
|
kind: Database
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ $.Release.Name }}-{{ $dnsName }}
|
name: {{ $.Release.Name }}-{{ $dnsName }}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
---
|
---
|
||||||
apiVersion: k8s.mariadb.com/v1alpha1
|
apiVersion: mariadb.mmontes.io/v1alpha1
|
||||||
kind: MariaDB
|
kind: MariaDB
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ .Release.Name }}
|
name: {{ .Release.Name }}
|
||||||
@@ -35,9 +35,8 @@ spec:
|
|||||||
# automaticFailover: true
|
# automaticFailover: true
|
||||||
|
|
||||||
metrics:
|
metrics:
|
||||||
enabled: true
|
|
||||||
exporter:
|
exporter:
|
||||||
image: prom/mysqld-exporter:v0.15.1
|
image: prom/mysqld-exporter:v0.14.0
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 50m
|
cpu: 50m
|
||||||
@@ -54,10 +53,14 @@ spec:
|
|||||||
name: {{ .Release.Name }}-my-cnf
|
name: {{ .Release.Name }}-my-cnf
|
||||||
key: config
|
key: config
|
||||||
|
|
||||||
storage:
|
volumeClaimTemplate:
|
||||||
size: {{ .Values.size }}
|
resources:
|
||||||
resizeInUseVolumes: true
|
requests:
|
||||||
waitForVolumeResize: true
|
storage: {{ .Values.size }}
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
{{- if .Values.external }}
|
{{- if .Values.external }}
|
||||||
primaryService:
|
primaryService:
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
{{ if not (eq $name "root") }}
|
{{ if not (eq $name "root") }}
|
||||||
{{ $dnsName := replace "_" "-" $name }}
|
{{ $dnsName := replace "_" "-" $name }}
|
||||||
---
|
---
|
||||||
apiVersion: k8s.mariadb.com/v1alpha1
|
apiVersion: mariadb.mmontes.io/v1alpha1
|
||||||
kind: User
|
kind: User
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ $.Release.Name }}-{{ $dnsName }}
|
name: {{ $.Release.Name }}-{{ $dnsName }}
|
||||||
@@ -15,7 +15,7 @@ spec:
|
|||||||
key: {{ $name }}-password
|
key: {{ $name }}-password
|
||||||
maxUserConnections: {{ $u.maxUserConnections }}
|
maxUserConnections: {{ $u.maxUserConnections }}
|
||||||
---
|
---
|
||||||
apiVersion: k8s.mariadb.com/v1alpha1
|
apiVersion: mariadb.mmontes.io/v1alpha1
|
||||||
kind: Grant
|
kind: Grant
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ $.Release.Name }}-{{ $dnsName }}
|
name: {{ $.Release.Name }}-{{ $dnsName }}
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
http-cache 0.1.0 HEAD
|
http-cache 0.1.0 HEAD
|
||||||
kubernetes 0.1.0 HEAD
|
kubernetes 0.1.0 HEAD
|
||||||
mysql 0.1.0 f642698
|
mysql 0.1.0 HEAD
|
||||||
mysql 0.2.0 HEAD
|
|
||||||
postgres 0.1.0 HEAD
|
postgres 0.1.0 HEAD
|
||||||
rabbitmq 0.1.0 HEAD
|
rabbitmq 0.1.0 HEAD
|
||||||
redis 0.1.1 HEAD
|
redis 0.1.1 HEAD
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
VERSION := 0.2.0
|
|
||||||
|
|
||||||
gen: fix-chartnames
|
gen: fix-chartnames
|
||||||
|
|
||||||
fix-chartnames:
|
fix-chartnames:
|
||||||
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do printf "name: cozy-%s\nversion: $(VERSION)\n" "$$i" > "$$i/Chart.yaml"; done
|
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do printf "name: cozy-%s\nversion: 1.0.0\n" "$$i" > "$$i/Chart.yaml"; done
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
NAMESPACE=cozy-fluxcd
|
|
||||||
NAME=fluxcd
|
|
||||||
|
|
||||||
API_VERSIONS_FLAGS=$(addprefix -a ,$(shell kubectl api-versions))
|
|
||||||
|
|
||||||
show:
|
|
||||||
helm template -n $(NAMESPACE) $(NAME) . --no-hooks --dry-run=server $(API_VERSIONS_FLAGS)
|
|
||||||
|
|
||||||
apply:
|
|
||||||
helm template -n $(NAMESPACE) $(NAME) . --no-hooks --dry-run=server $(API_VERSIONS_FLAGS) | kubectl apply -n $(NAMESPACE) -f-
|
|
||||||
|
|
||||||
diff:
|
|
||||||
helm template -n $(NAMESPACE) $(NAME) . --no-hooks --dry-run=server $(API_VERSIONS_FLAGS) | kubectl diff -n $(NAMESPACE) -f-
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,2 +1,2 @@
|
|||||||
name: cozy-installer
|
name: cozy-installer
|
||||||
version: 0.2.0
|
version: 1.0.0
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
NAMESPACE=cozy-system
|
NAMESPACE=cozy-installer
|
||||||
NAME=installer
|
NAME=installer
|
||||||
PUSH := 1
|
PUSH := 1
|
||||||
LOAD := 0
|
LOAD := 0
|
||||||
REGISTRY := ghcr.io/aenix-io/cozystack
|
REGISTRY := ghcr.io/aenix-io/cozystack
|
||||||
TAG := v0.2.0
|
TAG := v0.1.0
|
||||||
TALOS_VERSION=$(shell awk '/^version:/ {print $$2}' images/talos/profiles/installer.yaml)
|
TALOS_VERSION=$(shell awk '/^version:/ {print $$2}' images/talos/profiles/installer.yaml)
|
||||||
|
|
||||||
show:
|
show:
|
||||||
@@ -21,7 +21,6 @@ update:
|
|||||||
image: image-cozystack image-talos image-matchbox
|
image: image-cozystack image-talos image-matchbox
|
||||||
|
|
||||||
image-cozystack:
|
image-cozystack:
|
||||||
make -C ../../.. repos
|
|
||||||
docker buildx build -f images/cozystack/Dockerfile ../../.. \
|
docker buildx build -f images/cozystack/Dockerfile ../../.. \
|
||||||
--provenance false \
|
--provenance false \
|
||||||
--tag $(REGISTRY)/cozystack:$(TAG) \
|
--tag $(REGISTRY)/cozystack:$(TAG) \
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{
|
{
|
||||||
"containerimage.config.digest": "sha256:326a169fb5d4277a5c3b0359e0c885b31d1360b58475bbc316be1971c710cd8d",
|
"containerimage.config.digest": "sha256:ec8a4983a663f06a1503507482667a206e83e0d8d3663dff60ced9221855d6b0",
|
||||||
"containerimage.digest": "sha256:a608bdb75b3e06f6365f5f0b3fea82ac93c564d11f316f17e3d46e8a497a321d"
|
"containerimage.digest": "sha256:abb7b2fbc1f143c922f2a35afc4423a74b2b63c0bddfe620750613ed835aa861"
|
||||||
}
|
}
|
||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/aenix-io/cozystack/cozystack:v0.2.0
|
ghcr.io/aenix-io/cozystack/cozystack:v0.1.0
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
{
|
{
|
||||||
"containerimage.config.digest": "sha256:dc584f743bb73e04dcbebca7ab4f602f2c067190fd9609c3fd84412e83c20445",
|
"containerimage.config.digest": "sha256:b869a6324f9c0e6d1dd48eee67cbe3842ee14efd59bdde477736ad2f90568ff7",
|
||||||
"containerimage.digest": "sha256:39ab0bf769b269a8082eeb31a9672e39caa61dd342ba2157b954c642f54a32ff"
|
"containerimage.digest": "sha256:c30b237c5fa4fbbe47e1aba56e8f99569fe865620aa1953f31fc373794123cd7"
|
||||||
}
|
}
|
||||||
@@ -82,9 +82,6 @@ spec:
|
|||||||
- key: "node.kubernetes.io/not-ready"
|
- key: "node.kubernetes.io/not-ready"
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
effect: "NoSchedule"
|
effect: "NoSchedule"
|
||||||
- key: "node.cilium.io/agent-not-ready"
|
|
||||||
operator: "Exists"
|
|
||||||
effect: "NoSchedule"
|
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
name: cozy-platform
|
name: cozy-platform
|
||||||
version: 0.2.0
|
version: 1.0.0
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ namespaces-show:
|
|||||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml
|
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml
|
||||||
|
|
||||||
namespaces-apply:
|
namespaces-apply:
|
||||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml | kubectl apply -n $(NAMESPACE) -f-
|
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml | kubectl apply -f-
|
||||||
|
|
||||||
diff:
|
diff:
|
||||||
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) | kubectl diff -f-
|
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml | kubectl diff -f-
|
||||||
|
|||||||
@@ -1,114 +0,0 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
|
||||||
|
|
||||||
releases:
|
|
||||||
- name: cilium
|
|
||||||
releaseName: cilium
|
|
||||||
chart: cozy-cilium
|
|
||||||
namespace: cozy-cilium
|
|
||||||
privileged: true
|
|
||||||
dependsOn: []
|
|
||||||
values:
|
|
||||||
cilium:
|
|
||||||
bpf:
|
|
||||||
masquerade: true
|
|
||||||
cni:
|
|
||||||
chainingMode: ~
|
|
||||||
customConf: false
|
|
||||||
configMap: ""
|
|
||||||
enableIPv4Masquerade: true
|
|
||||||
enableIdentityMark: true
|
|
||||||
ipv4NativeRoutingCIDR: "{{ index $cozyConfig.data "ipv4-pod-cidr" }}"
|
|
||||||
autoDirectNodeRoutes: true
|
|
||||||
|
|
||||||
- name: cert-manager
|
|
||||||
releaseName: cert-manager
|
|
||||||
chart: cozy-cert-manager
|
|
||||||
namespace: cozy-cert-manager
|
|
||||||
dependsOn: [cilium]
|
|
||||||
|
|
||||||
- name: cert-manager-issuers
|
|
||||||
releaseName: cert-manager-issuers
|
|
||||||
chart: cozy-cert-manager-issuers
|
|
||||||
namespace: cozy-cert-manager
|
|
||||||
dependsOn: [cilium,cert-manager]
|
|
||||||
|
|
||||||
- name: victoria-metrics-operator
|
|
||||||
releaseName: victoria-metrics-operator
|
|
||||||
chart: cozy-victoria-metrics-operator
|
|
||||||
namespace: cozy-victoria-metrics-operator
|
|
||||||
dependsOn: [cilium,cert-manager]
|
|
||||||
|
|
||||||
- name: monitoring
|
|
||||||
releaseName: monitoring
|
|
||||||
chart: cozy-monitoring
|
|
||||||
namespace: cozy-monitoring
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [cilium,victoria-metrics-operator]
|
|
||||||
|
|
||||||
- name: metallb
|
|
||||||
releaseName: metallb
|
|
||||||
chart: cozy-metallb
|
|
||||||
namespace: cozy-metallb
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [cilium]
|
|
||||||
|
|
||||||
- name: grafana-operator
|
|
||||||
releaseName: grafana-operator
|
|
||||||
chart: cozy-grafana-operator
|
|
||||||
namespace: cozy-grafana-operator
|
|
||||||
dependsOn: [cilium]
|
|
||||||
|
|
||||||
- name: mariadb-operator
|
|
||||||
releaseName: mariadb-operator
|
|
||||||
chart: cozy-mariadb-operator
|
|
||||||
namespace: cozy-mariadb-operator
|
|
||||||
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
|
|
||||||
|
|
||||||
- name: postgres-operator
|
|
||||||
releaseName: postgres-operator
|
|
||||||
chart: cozy-postgres-operator
|
|
||||||
namespace: cozy-postgres-operator
|
|
||||||
dependsOn: [cilium,cert-manager]
|
|
||||||
|
|
||||||
- name: kafka-operator
|
|
||||||
releaseName: kafka-operator
|
|
||||||
chart: cozy-kafka-operator
|
|
||||||
namespace: cozy-kafka-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: clickhouse-operator
|
|
||||||
releaseName: clickhouse-operator
|
|
||||||
chart: cozy-clickhouse-operator
|
|
||||||
namespace: cozy-clickhouse-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: rabbitmq-operator
|
|
||||||
releaseName: rabbitmq-operator
|
|
||||||
chart: cozy-rabbitmq-operator
|
|
||||||
namespace: cozy-rabbitmq-operator
|
|
||||||
dependsOn: [cilium]
|
|
||||||
|
|
||||||
- name: redis-operator
|
|
||||||
releaseName: redis-operator
|
|
||||||
chart: cozy-redis-operator
|
|
||||||
namespace: cozy-redis-operator
|
|
||||||
dependsOn: [cilium]
|
|
||||||
|
|
||||||
- name: piraeus-operator
|
|
||||||
releaseName: piraeus-operator
|
|
||||||
chart: cozy-piraeus-operator
|
|
||||||
namespace: cozy-linstor
|
|
||||||
dependsOn: [cilium,cert-manager]
|
|
||||||
|
|
||||||
- name: linstor
|
|
||||||
releaseName: linstor
|
|
||||||
chart: cozy-linstor
|
|
||||||
namespace: cozy-linstor
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [piraeus-operator,cilium,cert-manager]
|
|
||||||
|
|
||||||
- name: telepresence
|
|
||||||
releaseName: traffic-manager
|
|
||||||
chart: cozy-telepresence
|
|
||||||
namespace: cozy-telepresence
|
|
||||||
dependsOn: []
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
|
||||||
|
|
||||||
releases:
|
|
||||||
- name: cert-manager
|
|
||||||
releaseName: cert-manager
|
|
||||||
chart: cozy-cert-manager
|
|
||||||
namespace: cozy-cert-manager
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: cert-manager-issuers
|
|
||||||
releaseName: cert-manager-issuers
|
|
||||||
chart: cozy-cert-manager-issuers
|
|
||||||
namespace: cozy-cert-manager
|
|
||||||
dependsOn: [cert-manager]
|
|
||||||
|
|
||||||
- name: victoria-metrics-operator
|
|
||||||
releaseName: victoria-metrics-operator
|
|
||||||
chart: cozy-victoria-metrics-operator
|
|
||||||
namespace: cozy-victoria-metrics-operator
|
|
||||||
dependsOn: [cert-manager]
|
|
||||||
|
|
||||||
- name: monitoring
|
|
||||||
releaseName: monitoring
|
|
||||||
chart: cozy-monitoring
|
|
||||||
namespace: cozy-monitoring
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [victoria-metrics-operator]
|
|
||||||
|
|
||||||
- name: grafana-operator
|
|
||||||
releaseName: grafana-operator
|
|
||||||
chart: cozy-grafana-operator
|
|
||||||
namespace: cozy-grafana-operator
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: mariadb-operator
|
|
||||||
releaseName: mariadb-operator
|
|
||||||
chart: cozy-mariadb-operator
|
|
||||||
namespace: cozy-mariadb-operator
|
|
||||||
dependsOn: [victoria-metrics-operator]
|
|
||||||
|
|
||||||
- name: postgres-operator
|
|
||||||
releaseName: postgres-operator
|
|
||||||
chart: cozy-postgres-operator
|
|
||||||
namespace: cozy-postgres-operator
|
|
||||||
dependsOn: [cert-manager]
|
|
||||||
|
|
||||||
- name: kafka-operator
|
|
||||||
releaseName: kafka-operator
|
|
||||||
chart: cozy-kafka-operator
|
|
||||||
namespace: cozy-kafka-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: clickhouse-operator
|
|
||||||
releaseName: clickhouse-operator
|
|
||||||
chart: cozy-clickhouse-operator
|
|
||||||
namespace: cozy-clickhouse-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: rabbitmq-operator
|
|
||||||
releaseName: rabbitmq-operator
|
|
||||||
chart: cozy-rabbitmq-operator
|
|
||||||
namespace: cozy-rabbitmq-operator
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: redis-operator
|
|
||||||
releaseName: redis-operator
|
|
||||||
chart: cozy-redis-operator
|
|
||||||
namespace: cozy-redis-operator
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: telepresence
|
|
||||||
releaseName: traffic-manager
|
|
||||||
chart: cozy-telepresence
|
|
||||||
namespace: cozy-telepresence
|
|
||||||
dependsOn: []
|
|
||||||
@@ -1,183 +0,0 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
|
||||||
|
|
||||||
releases:
|
|
||||||
- name: cilium
|
|
||||||
releaseName: cilium
|
|
||||||
chart: cozy-cilium
|
|
||||||
namespace: cozy-cilium
|
|
||||||
privileged: true
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: kubeovn
|
|
||||||
releaseName: kubeovn
|
|
||||||
chart: cozy-kubeovn
|
|
||||||
namespace: cozy-kubeovn
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [cilium]
|
|
||||||
values:
|
|
||||||
cozystack:
|
|
||||||
nodesHash: {{ include "cozystack.master-node-ips" . | sha256sum }}
|
|
||||||
kube-ovn:
|
|
||||||
ipv4:
|
|
||||||
POD_CIDR: "{{ index $cozyConfig.data "ipv4-pod-cidr" }}"
|
|
||||||
POD_GATEWAY: "{{ index $cozyConfig.data "ipv4-pod-gateway" }}"
|
|
||||||
SVC_CIDR: "{{ index $cozyConfig.data "ipv4-svc-cidr" }}"
|
|
||||||
JOIN_CIDR: "{{ index $cozyConfig.data "ipv4-join-cidr" }}"
|
|
||||||
|
|
||||||
- name: cert-manager
|
|
||||||
releaseName: cert-manager
|
|
||||||
chart: cozy-cert-manager
|
|
||||||
namespace: cozy-cert-manager
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: cert-manager-issuers
|
|
||||||
releaseName: cert-manager-issuers
|
|
||||||
chart: cozy-cert-manager-issuers
|
|
||||||
namespace: cozy-cert-manager
|
|
||||||
dependsOn: [cilium,kubeovn,cert-manager]
|
|
||||||
|
|
||||||
- name: victoria-metrics-operator
|
|
||||||
releaseName: victoria-metrics-operator
|
|
||||||
chart: cozy-victoria-metrics-operator
|
|
||||||
namespace: cozy-victoria-metrics-operator
|
|
||||||
dependsOn: [cilium,kubeovn,cert-manager]
|
|
||||||
|
|
||||||
- name: monitoring
|
|
||||||
releaseName: monitoring
|
|
||||||
chart: cozy-monitoring
|
|
||||||
namespace: cozy-monitoring
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
|
||||||
|
|
||||||
- name: kubevirt-operator
|
|
||||||
releaseName: kubevirt-operator
|
|
||||||
chart: cozy-kubevirt-operator
|
|
||||||
namespace: cozy-kubevirt
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: kubevirt
|
|
||||||
releaseName: kubevirt
|
|
||||||
chart: cozy-kubevirt
|
|
||||||
namespace: cozy-kubevirt
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [cilium,kubeovn,kubevirt-operator]
|
|
||||||
|
|
||||||
- name: kubevirt-cdi-operator
|
|
||||||
releaseName: kubevirt-cdi-operator
|
|
||||||
chart: cozy-kubevirt-cdi-operator
|
|
||||||
namespace: cozy-kubevirt-cdi
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: kubevirt-cdi
|
|
||||||
releaseName: kubevirt-cdi
|
|
||||||
chart: cozy-kubevirt-cdi
|
|
||||||
namespace: cozy-kubevirt-cdi
|
|
||||||
dependsOn: [cilium,kubeovn,kubevirt-cdi-operator]
|
|
||||||
|
|
||||||
- name: metallb
|
|
||||||
releaseName: metallb
|
|
||||||
chart: cozy-metallb
|
|
||||||
namespace: cozy-metallb
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: grafana-operator
|
|
||||||
releaseName: grafana-operator
|
|
||||||
chart: cozy-grafana-operator
|
|
||||||
namespace: cozy-grafana-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: mariadb-operator
|
|
||||||
releaseName: mariadb-operator
|
|
||||||
chart: cozy-mariadb-operator
|
|
||||||
namespace: cozy-mariadb-operator
|
|
||||||
dependsOn: [cilium,kubeovn,cert-manager,victoria-metrics-operator]
|
|
||||||
|
|
||||||
- name: postgres-operator
|
|
||||||
releaseName: postgres-operator
|
|
||||||
chart: cozy-postgres-operator
|
|
||||||
namespace: cozy-postgres-operator
|
|
||||||
dependsOn: [cilium,kubeovn,cert-manager]
|
|
||||||
|
|
||||||
- name: kafka-operator
|
|
||||||
releaseName: kafka-operator
|
|
||||||
chart: cozy-kafka-operator
|
|
||||||
namespace: cozy-kafka-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: clickhouse-operator
|
|
||||||
releaseName: clickhouse-operator
|
|
||||||
chart: cozy-clickhouse-operator
|
|
||||||
namespace: cozy-clickhouse-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: rabbitmq-operator
|
|
||||||
releaseName: rabbitmq-operator
|
|
||||||
chart: cozy-rabbitmq-operator
|
|
||||||
namespace: cozy-rabbitmq-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: redis-operator
|
|
||||||
releaseName: redis-operator
|
|
||||||
chart: cozy-redis-operator
|
|
||||||
namespace: cozy-redis-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: piraeus-operator
|
|
||||||
releaseName: piraeus-operator
|
|
||||||
chart: cozy-piraeus-operator
|
|
||||||
namespace: cozy-linstor
|
|
||||||
dependsOn: [cilium,kubeovn,cert-manager]
|
|
||||||
|
|
||||||
- name: linstor
|
|
||||||
releaseName: linstor
|
|
||||||
chart: cozy-linstor
|
|
||||||
namespace: cozy-linstor
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [piraeus-operator,cilium,kubeovn,cert-manager]
|
|
||||||
|
|
||||||
- name: telepresence
|
|
||||||
releaseName: traffic-manager
|
|
||||||
chart: cozy-telepresence
|
|
||||||
namespace: cozy-telepresence
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: dashboard
|
|
||||||
releaseName: dashboard
|
|
||||||
chart: cozy-dashboard
|
|
||||||
namespace: cozy-dashboard
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
|
|
||||||
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
|
|
||||||
values:
|
|
||||||
kubeapps:
|
|
||||||
redis:
|
|
||||||
master:
|
|
||||||
podAnnotations:
|
|
||||||
{{- range $index, $repo := . }}
|
|
||||||
{{- with (($repo.status).artifact).revision }}
|
|
||||||
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
- name: kamaji
|
|
||||||
releaseName: kamaji
|
|
||||||
chart: cozy-kamaji
|
|
||||||
namespace: cozy-kamaji
|
|
||||||
dependsOn: [cilium,kubeovn,cert-manager]
|
|
||||||
|
|
||||||
- name: capi-operator
|
|
||||||
releaseName: capi-operator
|
|
||||||
chart: cozy-capi-operator
|
|
||||||
namespace: cozy-cluster-api
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [cilium,kubeovn,cert-manager]
|
|
||||||
|
|
||||||
- name: capi-providers
|
|
||||||
releaseName: capi-providers
|
|
||||||
chart: cozy-capi-providers
|
|
||||||
namespace: cozy-cluster-api
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [cilium,kubeovn,capi-operator]
|
|
||||||
@@ -1,101 +0,0 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
|
||||||
|
|
||||||
releases:
|
|
||||||
- name: cert-manager
|
|
||||||
releaseName: cert-manager
|
|
||||||
chart: cozy-cert-manager
|
|
||||||
namespace: cozy-cert-manager
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: cert-manager-issuers
|
|
||||||
releaseName: cert-manager-issuers
|
|
||||||
chart: cozy-cert-manager-issuers
|
|
||||||
namespace: cozy-cert-manager
|
|
||||||
dependsOn: [cert-manager]
|
|
||||||
|
|
||||||
- name: victoria-metrics-operator
|
|
||||||
releaseName: victoria-metrics-operator
|
|
||||||
chart: cozy-victoria-metrics-operator
|
|
||||||
namespace: cozy-victoria-metrics-operator
|
|
||||||
dependsOn: [cert-manager]
|
|
||||||
|
|
||||||
- name: monitoring
|
|
||||||
releaseName: monitoring
|
|
||||||
chart: cozy-monitoring
|
|
||||||
namespace: cozy-monitoring
|
|
||||||
privileged: true
|
|
||||||
dependsOn: [victoria-metrics-operator]
|
|
||||||
|
|
||||||
- name: grafana-operator
|
|
||||||
releaseName: grafana-operator
|
|
||||||
chart: cozy-grafana-operator
|
|
||||||
namespace: cozy-grafana-operator
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: mariadb-operator
|
|
||||||
releaseName: mariadb-operator
|
|
||||||
chart: cozy-mariadb-operator
|
|
||||||
namespace: cozy-mariadb-operator
|
|
||||||
dependsOn: [cert-manager,victoria-metrics-operator]
|
|
||||||
|
|
||||||
- name: postgres-operator
|
|
||||||
releaseName: postgres-operator
|
|
||||||
chart: cozy-postgres-operator
|
|
||||||
namespace: cozy-postgres-operator
|
|
||||||
dependsOn: [cert-manager]
|
|
||||||
|
|
||||||
- name: kafka-operator
|
|
||||||
releaseName: kafka-operator
|
|
||||||
chart: cozy-kafka-operator
|
|
||||||
namespace: cozy-kafka-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: clickhouse-operator
|
|
||||||
releaseName: clickhouse-operator
|
|
||||||
chart: cozy-clickhouse-operator
|
|
||||||
namespace: cozy-clickhouse-operator
|
|
||||||
dependsOn: [cilium,kubeovn]
|
|
||||||
|
|
||||||
- name: rabbitmq-operator
|
|
||||||
releaseName: rabbitmq-operator
|
|
||||||
chart: cozy-rabbitmq-operator
|
|
||||||
namespace: cozy-rabbitmq-operator
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: redis-operator
|
|
||||||
releaseName: redis-operator
|
|
||||||
chart: cozy-redis-operator
|
|
||||||
namespace: cozy-redis-operator
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: piraeus-operator
|
|
||||||
releaseName: piraeus-operator
|
|
||||||
chart: cozy-piraeus-operator
|
|
||||||
namespace: cozy-linstor
|
|
||||||
dependsOn: [cert-manager]
|
|
||||||
|
|
||||||
- name: telepresence
|
|
||||||
releaseName: traffic-manager
|
|
||||||
chart: cozy-telepresence
|
|
||||||
namespace: cozy-telepresence
|
|
||||||
dependsOn: []
|
|
||||||
|
|
||||||
- name: dashboard
|
|
||||||
releaseName: dashboard
|
|
||||||
chart: cozy-dashboard
|
|
||||||
namespace: cozy-dashboard
|
|
||||||
dependsOn: []
|
|
||||||
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
|
|
||||||
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
|
|
||||||
values:
|
|
||||||
kubeapps:
|
|
||||||
redis:
|
|
||||||
master:
|
|
||||||
podAnnotations:
|
|
||||||
{{- range $index, $repo := . }}
|
|
||||||
{{- with (($repo.status).artifact).revision }}
|
|
||||||
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
{{/*
|
{{/*
|
||||||
Get IP-addresses of master nodes
|
Get IP-addresses of master nodes
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "cozystack.master-node-ips" -}}
|
{{- define "master.nodeIPs" -}}
|
||||||
{{- $nodes := lookup "v1" "Node" "" "" -}}
|
{{- $nodes := lookup "v1" "Node" "" "" -}}
|
||||||
{{- $ips := list -}}
|
{{- $ips := list -}}
|
||||||
{{- range $node := $nodes.items -}}
|
{{- range $node := $nodes.items -}}
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
|
||||||
{{- $bundleName := index $cozyConfig.data "bundle-name" }}
|
|
||||||
{{- $bundle := tpl (.Files.Get (printf "bundles/%s.yaml" $bundleName)) . | fromYaml }}
|
|
||||||
{{- $host := "example.org" }}
|
{{- $host := "example.org" }}
|
||||||
{{- $tenantRoot := list }}
|
{{- $tenantRoot := list }}
|
||||||
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2beta2" }}
|
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2beta1" }}
|
||||||
{{- $tenantRoot = lookup "helm.toolkit.fluxcd.io/v2beta2" "HelmRelease" "tenant-root" "tenant-root" }}
|
{{- $tenantRoot = lookup "helm.toolkit.fluxcd.io/v2beta1" "HelmRelease" "tenant-root" "tenant-root" }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if and $tenantRoot $tenantRoot.spec $tenantRoot.spec.values $tenantRoot.spec.values.host }}
|
{{- if and $tenantRoot $tenantRoot.spec $tenantRoot.spec.values $tenantRoot.spec.values.host }}
|
||||||
{{- $host = $tenantRoot.spec.values.host }}
|
{{- $host = $tenantRoot.spec.values.host }}
|
||||||
@@ -22,7 +19,7 @@ metadata:
|
|||||||
namespace.cozystack.io/host: "{{ $host }}"
|
namespace.cozystack.io/host: "{{ $host }}"
|
||||||
name: tenant-root
|
name: tenant-root
|
||||||
---
|
---
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2beta2
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
metadata:
|
metadata:
|
||||||
name: tenant-root
|
name: tenant-root
|
||||||
@@ -48,9 +45,7 @@ spec:
|
|||||||
values:
|
values:
|
||||||
host: "{{ $host }}"
|
host: "{{ $host }}"
|
||||||
dependsOn:
|
dependsOn:
|
||||||
{{- range $x := $bundle.releases }}
|
- name: cilium
|
||||||
{{- if has $x.name (list "cilium" "kubeovn") }}
|
namespace: cozy-cilium
|
||||||
- name: {{ $x.name }}
|
- name: kubeovn
|
||||||
namespace: {{ $x.namespace }}
|
namespace: cozy-kubeovn
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|||||||
@@ -1,27 +1,13 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
{{- $bundleName := index $cozyConfig.data "bundle-name" }}
|
|
||||||
{{- $bundle := tpl (.Files.Get (printf "bundles/%s.yaml" $bundleName)) . | fromYaml }}
|
|
||||||
{{- $dependencyNamespaces := dict }}
|
|
||||||
{{- $disabledComponents := splitList "," ((index $cozyConfig.data "bundle-disable") | default "") }}
|
|
||||||
|
|
||||||
{{/* collect dependency namespaces from releases */}}
|
|
||||||
{{- range $x := $bundle.releases }}
|
|
||||||
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{- range $x := $bundle.releases }}
|
|
||||||
{{- if not (has $x.name $disabledComponents) }}
|
|
||||||
---
|
|
||||||
apiVersion: helm.toolkit.fluxcd.io/v2beta2
|
|
||||||
kind: HelmRelease
|
kind: HelmRelease
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ $x.name }}
|
name: cilium
|
||||||
namespace: {{ $x.namespace }}
|
namespace: cozy-cilium
|
||||||
labels:
|
labels:
|
||||||
cozystack.io/repository: system
|
cozystack.io/repository: system
|
||||||
spec:
|
spec:
|
||||||
interval: 1m
|
interval: 1m
|
||||||
releaseName: {{ $x.releaseName | default $x.name }}
|
releaseName: cilium
|
||||||
install:
|
install:
|
||||||
remediation:
|
remediation:
|
||||||
retries: -1
|
retries: -1
|
||||||
@@ -30,31 +16,743 @@ spec:
|
|||||||
retries: -1
|
retries: -1
|
||||||
chart:
|
chart:
|
||||||
spec:
|
spec:
|
||||||
chart: {{ $x.chart }}
|
chart: cozy-cilium
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: kubeovn
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-kubeovn
|
||||||
reconcileStrategy: Revision
|
reconcileStrategy: Revision
|
||||||
sourceRef:
|
sourceRef:
|
||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: cozystack-system
|
name: cozystack-system
|
||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
{{- $values := dict }}
|
|
||||||
{{- with $x.values }}
|
|
||||||
{{- $values = merge . $values }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with index $cozyConfig.data (printf "values-%s" $x.name) }}
|
|
||||||
{{- $values = merge (fromYaml .) $values }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with $values }}
|
|
||||||
values:
|
values:
|
||||||
{{- toYaml . | nindent 4}}
|
cozystack:
|
||||||
{{- end }}
|
configHash: {{ index (lookup "v1" "ConfigMap" "cozy-system" "cozystack") "data" | toJson | sha256sum }}
|
||||||
{{- with $x.dependsOn }}
|
nodesHash: {{ include "master.nodeIPs" . | sha256sum }}
|
||||||
dependsOn:
|
dependsOn:
|
||||||
{{- range $dep := . }}
|
- name: cilium
|
||||||
{{- if not (has $dep $disabledComponents) }}
|
namespace: cozy-cilium
|
||||||
- name: {{ $dep }}
|
---
|
||||||
namespace: {{ index $dependencyNamespaces $dep }}
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: cozy-fluxcd
|
||||||
|
namespace: cozy-fluxcd
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: fluxcd
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-fluxcd
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: cert-manager
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-cert-manager
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: cert-manager-issuers
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: cert-manager-issuers
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-cert-manager-issuers
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: victoria-metrics-operator
|
||||||
|
namespace: cozy-victoria-metrics-operator
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: victoria-metrics-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-victoria-metrics-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: monitoring
|
||||||
|
namespace: cozy-monitoring
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: monitoring
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-monitoring
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: victoria-metrics-operator
|
||||||
|
namespace: cozy-victoria-metrics-operator
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: kubevirt-operator
|
||||||
|
namespace: cozy-kubevirt
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: kubevirt-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-kubevirt-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: kubevirt
|
||||||
|
namespace: cozy-kubevirt
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: kubevirt
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-kubevirt
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: kubevirt-operator
|
||||||
|
namespace: cozy-kubevirt
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: kubevirt-cdi-operator
|
||||||
|
namespace: cozy-kubevirt-cdi
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: kubevirt-cdi-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-kubevirt-cdi-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: kubevirt-cdi
|
||||||
|
namespace: cozy-kubevirt-cdi
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: kubevirt-cdi
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-kubevirt-cdi
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: kubevirt-cdi-operator
|
||||||
|
namespace: cozy-kubevirt-cdi
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: metallb
|
||||||
|
namespace: cozy-metallb
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: metallb
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-metallb
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: grafana-operator
|
||||||
|
namespace: cozy-grafana-operator
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: grafana-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-grafana-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: mariadb-operator
|
||||||
|
namespace: cozy-mariadb-operator
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: mariadb-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-mariadb-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
- name: victoria-metrics-operator
|
||||||
|
namespace: cozy-victoria-metrics-operator
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: postgres-operator
|
||||||
|
namespace: cozy-postgres-operator
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: postgres-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-postgres-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: rabbitmq-operator
|
||||||
|
namespace: cozy-rabbitmq-operator
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: rabbitmq-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-rabbitmq-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: redis-operator
|
||||||
|
namespace: cozy-redis-operator
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: redis-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-redis-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: piraeus-operator
|
||||||
|
namespace: cozy-linstor
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: piraeus-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-piraeus-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: linstor
|
||||||
|
namespace: cozy-linstor
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: linstor
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-linstor
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: piraeus-operator
|
||||||
|
namespace: cozy-linstor
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: telepresence
|
||||||
|
namespace: cozy-telepresence
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: traffic-manager
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-telepresence
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: dashboard
|
||||||
|
namespace: cozy-dashboard
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: dashboard
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-dashboard
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
|
||||||
|
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
|
||||||
|
values:
|
||||||
|
kubeapps:
|
||||||
|
redis:
|
||||||
|
master:
|
||||||
|
podAnnotations:
|
||||||
|
{{- range $index, $repo := . }}
|
||||||
|
{{- with (($repo.status).artifact).revision }}
|
||||||
|
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: kamaji
|
||||||
|
namespace: cozy-kamaji
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: kamaji
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-kamaji
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: capi-operator
|
||||||
|
namespace: cozy-cluster-api
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: capi-operator
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-capi-operator
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: cozy-cert-manager
|
||||||
|
---
|
||||||
|
apiVersion: helm.toolkit.fluxcd.io/v2beta1
|
||||||
|
kind: HelmRelease
|
||||||
|
metadata:
|
||||||
|
name: capi-providers
|
||||||
|
namespace: cozy-cluster-api
|
||||||
|
labels:
|
||||||
|
cozystack.io/repository: system
|
||||||
|
spec:
|
||||||
|
interval: 1m
|
||||||
|
releaseName: capi-providers
|
||||||
|
install:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
upgrade:
|
||||||
|
remediation:
|
||||||
|
retries: -1
|
||||||
|
chart:
|
||||||
|
spec:
|
||||||
|
chart: cozy-capi-providers
|
||||||
|
reconcileStrategy: Revision
|
||||||
|
sourceRef:
|
||||||
|
kind: HelmRepository
|
||||||
|
name: cozystack-system
|
||||||
|
namespace: cozy-system
|
||||||
|
dependsOn:
|
||||||
|
- name: capi-operator
|
||||||
|
namespace: cozy-cluster-api
|
||||||
|
- name: cilium
|
||||||
|
namespace: cozy-cilium
|
||||||
|
- name: kubeovn
|
||||||
|
namespace: cozy-kubeovn
|
||||||
|
|||||||
@@ -1,33 +1,13 @@
|
|||||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
{{- range $ns := .Values.namespaces }}
|
||||||
{{- $bundleName := index $cozyConfig.data "bundle-name" }}
|
|
||||||
{{- $bundle := tpl (.Files.Get (printf "bundles/%s.yaml" $bundleName)) . | fromYaml }}
|
|
||||||
{{- $namespaces := dict }}
|
|
||||||
|
|
||||||
{{/* collect namespaces from releases */}}
|
|
||||||
{{- range $x := $bundle.releases }}
|
|
||||||
{{- if not (hasKey $namespaces $x.namespace) }}
|
|
||||||
{{- $_ := set $namespaces $x.namespace false }}
|
|
||||||
{{- end }}
|
|
||||||
{{/* if at least one release requires a privileged namespace, then it should be privileged */}}
|
|
||||||
{{- if or $x.privileged (index $namespaces $x.namespace) }}
|
|
||||||
{{- $_ := set $namespaces $x.namespace true }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{/* Add extra namespaces */}}
|
|
||||||
{{- $_ := set $namespaces "cozy-public" false }}
|
|
||||||
{{- $_ := set $namespaces "cozy-fluxcd" false }}
|
|
||||||
|
|
||||||
{{- range $namespace, $privileged := $namespaces }}
|
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Namespace
|
kind: Namespace
|
||||||
metadata:
|
metadata:
|
||||||
annotations:
|
annotations:
|
||||||
"helm.sh/resource-policy": keep
|
"helm.sh/resource-policy": keep
|
||||||
{{- if $privileged }}
|
{{- if $ns.privileged }}
|
||||||
labels:
|
labels:
|
||||||
pod-security.kubernetes.io/enforce: privileged
|
pod-security.kubernetes.io/enforce: privileged
|
||||||
{{- end }}
|
{{- end }}
|
||||||
name: {{ $namespace }}
|
name: {{ $ns.name }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
30
packages/core/platform/values.yaml
Normal file
30
packages/core/platform/values.yaml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
namespaces:
|
||||||
|
- name: cozy-public
|
||||||
|
- name: cozy-system
|
||||||
|
privileged: true
|
||||||
|
- name: cozy-cert-manager
|
||||||
|
- name: cozy-cilium
|
||||||
|
privileged: true
|
||||||
|
- name: cozy-fluxcd
|
||||||
|
- name: cozy-grafana-operator
|
||||||
|
- name: cozy-kamaji
|
||||||
|
- name: cozy-cluster-api
|
||||||
|
privileged: true # for capk only
|
||||||
|
- name: cozy-dashboard
|
||||||
|
- name: cozy-kubeovn
|
||||||
|
privileged: true
|
||||||
|
- name: cozy-kubevirt
|
||||||
|
privileged: true
|
||||||
|
- name: cozy-kubevirt-cdi
|
||||||
|
- name: cozy-linstor
|
||||||
|
privileged: true
|
||||||
|
- name: cozy-mariadb-operator
|
||||||
|
- name: cozy-metallb
|
||||||
|
privileged: true
|
||||||
|
- name: cozy-monitoring
|
||||||
|
privileged: true
|
||||||
|
- name: cozy-postgres-operator
|
||||||
|
- name: cozy-rabbitmq-operator
|
||||||
|
- name: cozy-redis-operator
|
||||||
|
- name: cozy-telepresence
|
||||||
|
- name: cozy-victoria-metrics-operator
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
OUT=../../_out/repos/system
|
OUT=../../_out/repos/system
|
||||||
VERSION := 0.2.0
|
|
||||||
|
|
||||||
gen: fix-chartnames
|
gen: fix-chartnames
|
||||||
|
|
||||||
@@ -10,4 +9,4 @@ repo: fix-chartnames
|
|||||||
cd "$(OUT)" && helm repo index .
|
cd "$(OUT)" && helm repo index .
|
||||||
|
|
||||||
fix-chartnames:
|
fix-chartnames:
|
||||||
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do printf "name: cozy-%s\nversion: $(VERSION)\n" "$$i" > "$$i/Chart.yaml"; done
|
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do printf "name: cozy-%s\nversion: 1.0.0\n" "$$i" > "$$i/Chart.yaml"; done
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
name: cozy-capi-operator
|
name: cozy-capi-operator
|
||||||
version: 0.2.0
|
version: 1.0.0
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
name: cozy-capi-providers
|
name: cozy-capi-providers
|
||||||
version: 0.2.0
|
version: 1.0.0
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
name: cozy-cert-manager-issuers
|
name: cozy-cert-manager-issuers
|
||||||
version: 0.2.0
|
version: 1.0.0
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
name: cozy-cert-manager
|
name: cozy-cert-manager
|
||||||
version: 0.2.0
|
version: 1.0.0
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
name: cozy-cilium
|
name: cozy-cilium
|
||||||
version: 0.2.0
|
version: 1.0.0
|
||||||
|
|||||||
@@ -2,18 +2,18 @@ NAMESPACE=cozy-cilium
|
|||||||
NAME=cilium
|
NAME=cilium
|
||||||
|
|
||||||
show:
|
show:
|
||||||
kubectl get hr -n cozy-cilium cilium -o jsonpath='{.spec.values}' | helm template --dry-run=server -n $(NAMESPACE) $(NAME) . -f -
|
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
|
||||||
|
|
||||||
apply:
|
apply:
|
||||||
kubectl get hr -n cozy-cilium cilium -o jsonpath='{.spec.values}' | helm upgrade -i -n $(NAMESPACE) $(NAME) . -f -
|
helm upgrade -i -n $(NAMESPACE) $(NAME) .
|
||||||
|
|
||||||
diff:
|
diff:
|
||||||
kubectl get hr -n cozy-cilium cilium -o jsonpath='{.spec.values}' | helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) . -f -
|
helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) .
|
||||||
|
|
||||||
update:
|
update:
|
||||||
rm -rf charts
|
rm -rf charts
|
||||||
helm repo add cilium https://helm.cilium.io/
|
helm repo add cilium https://helm.cilium.io/
|
||||||
helm repo update cilium
|
helm repo update cilium
|
||||||
helm pull cilium/cilium --untar --untardir charts --version 1.14
|
helm pull cilium/cilium --untar --untardir charts
|
||||||
sed -i -e '/Used in iptables/d' -e '/SYS_MODULE/d' charts/cilium/values.yaml
|
sed -i -e '/Used in iptables/d' -e '/SYS_MODULE/d' charts/cilium/values.yaml
|
||||||
patch -p3 --no-backup-if-mismatch < patches/fix-cgroups.patch
|
patch -p3 < patches/fix-cgroups.patch
|
||||||
|
|||||||
@@ -122,7 +122,7 @@ annotations:
|
|||||||
description: |
|
description: |
|
||||||
CiliumPodIPPool defines an IP pool that can be used for pooled IPAM (i.e. the multi-pool IPAM mode).
|
CiliumPodIPPool defines an IP pool that can be used for pooled IPAM (i.e. the multi-pool IPAM mode).
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 1.14.9
|
appVersion: 1.14.5
|
||||||
description: eBPF-based Networking, Security, and Observability
|
description: eBPF-based Networking, Security, and Observability
|
||||||
home: https://cilium.io/
|
home: https://cilium.io/
|
||||||
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.14/Documentation/images/logo-solo.svg
|
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.14/Documentation/images/logo-solo.svg
|
||||||
@@ -138,4 +138,4 @@ kubeVersion: '>= 1.16.0-0'
|
|||||||
name: cilium
|
name: cilium
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/cilium/cilium
|
- https://github.com/cilium/cilium
|
||||||
version: 1.14.9
|
version: 1.14.5
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# cilium
|
# cilium
|
||||||
|
|
||||||
 
|
 
|
||||||
|
|
||||||
Cilium is open source software for providing and transparently securing
|
Cilium is open source software for providing and transparently securing
|
||||||
network connectivity and loadbalancing between application workloads such as
|
network connectivity and loadbalancing between application workloads such as
|
||||||
@@ -76,7 +76,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| authentication.mutual.spire.install.agent.securityContext | object | `{}` | Security context to be added to spire agent containers. SecurityContext holds pod-level security attributes and common container settings. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container |
|
| authentication.mutual.spire.install.agent.securityContext | object | `{}` | Security context to be added to spire agent containers. SecurityContext holds pod-level security attributes and common container settings. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container |
|
||||||
| authentication.mutual.spire.install.agent.serviceAccount | object | `{"create":true,"name":"spire-agent"}` | SPIRE agent service account |
|
| authentication.mutual.spire.install.agent.serviceAccount | object | `{"create":true,"name":"spire-agent"}` | SPIRE agent service account |
|
||||||
| authentication.mutual.spire.install.agent.skipKubeletVerification | bool | `true` | SPIRE Workload Attestor kubelet verification. |
|
| authentication.mutual.spire.install.agent.skipKubeletVerification | bool | `true` | SPIRE Workload Attestor kubelet verification. |
|
||||||
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
| authentication.mutual.spire.install.agent.tolerations | list | `[]` | SPIRE agent tolerations configuration ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||||
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
|
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
|
||||||
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
|
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
|
||||||
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
|
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
|
||||||
@@ -155,12 +155,12 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| clustermesh.apiserver.extraEnv | list | `[]` | Additional clustermesh-apiserver environment variables. |
|
| clustermesh.apiserver.extraEnv | list | `[]` | Additional clustermesh-apiserver environment variables. |
|
||||||
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
|
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
|
||||||
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
|
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
|
||||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:5c16f8b8e22ce41e11998e70846fbcecea3a6b683a38253809ead8d871f6d8a3","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.14.9","useDigest":true}` | Clustermesh API server image. |
|
| clustermesh.apiserver.image | object | `{"digest":"sha256:7eaa35cf5452c43b1f7d0cde0d707823ae7e49965bcb54c053e31ea4e04c3d96","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.14.5","useDigest":true}` | Clustermesh API server image. |
|
||||||
| clustermesh.apiserver.kvstoremesh.enabled | bool | `false` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
|
| clustermesh.apiserver.kvstoremesh.enabled | bool | `false` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
|
||||||
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
|
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
|
||||||
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
|
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
|
||||||
| clustermesh.apiserver.kvstoremesh.extraVolumeMounts | list | `[]` | Additional KVStoreMesh volumeMounts. |
|
| clustermesh.apiserver.kvstoremesh.extraVolumeMounts | list | `[]` | Additional KVStoreMesh volumeMounts. |
|
||||||
| clustermesh.apiserver.kvstoremesh.image | object | `{"digest":"sha256:9d9efb25806660f3663b9cd803fb8679f2b115763470002a9770e2c1eb1e5b22","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/kvstoremesh","tag":"v1.14.9","useDigest":true}` | KVStoreMesh image. |
|
| clustermesh.apiserver.kvstoremesh.image | object | `{"digest":"sha256:d7137edd0efa2b1407b20088af3980a9993bb616d85bf9b55ea2891d1b99023a","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/kvstoremesh","tag":"v1.14.5","useDigest":true}` | KVStoreMesh image. |
|
||||||
| clustermesh.apiserver.kvstoremesh.resources | object | `{}` | Resource requests and limits for the KVStoreMesh container |
|
| clustermesh.apiserver.kvstoremesh.resources | object | `{}` | Resource requests and limits for the KVStoreMesh container |
|
||||||
| clustermesh.apiserver.kvstoremesh.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]}}` | KVStoreMesh Security context |
|
| clustermesh.apiserver.kvstoremesh.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]}}` | KVStoreMesh Security context |
|
||||||
| clustermesh.apiserver.metrics.enabled | bool | `true` | Enables exporting apiserver metrics in OpenMetrics format. |
|
| clustermesh.apiserver.metrics.enabled | bool | `true` | Enables exporting apiserver metrics in OpenMetrics format. |
|
||||||
@@ -300,7 +300,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| eni.subnetIDsFilter | list | `[]` | Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. |
|
| eni.subnetIDsFilter | list | `[]` | Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. |
|
||||||
| eni.subnetTagsFilter | list | `[]` | Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. |
|
| eni.subnetTagsFilter | list | `[]` | Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. |
|
||||||
| eni.updateEC2AdapterLimitViaAPI | bool | `true` | Update ENI Adapter limits from the EC2 API |
|
| eni.updateEC2AdapterLimitViaAPI | bool | `true` | Update ENI Adapter limits from the EC2 API |
|
||||||
| envoy.affinity | object | `{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"cilium.io/no-schedule","operator":"NotIn","values":["true"]}]}]}},"podAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium"}},"topologyKey":"kubernetes.io/hostname"}]},"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium-envoy"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-envoy. |
|
| envoy.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium-envoy"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-envoy. |
|
||||||
| envoy.connectTimeoutSeconds | int | `2` | Time in seconds after which a TCP connection attempt times out |
|
| envoy.connectTimeoutSeconds | int | `2` | Time in seconds after which a TCP connection attempt times out |
|
||||||
| envoy.dnsPolicy | string | `nil` | DNS policy for Cilium envoy pods. Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy |
|
| envoy.dnsPolicy | string | `nil` | DNS policy for Cilium envoy pods. Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy |
|
||||||
| envoy.enabled | bool | `false` | Enable Envoy Proxy in standalone DaemonSet. |
|
| envoy.enabled | bool | `false` | Enable Envoy Proxy in standalone DaemonSet. |
|
||||||
@@ -312,7 +312,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| envoy.extraVolumes | list | `[]` | Additional envoy volumes. |
|
| envoy.extraVolumes | list | `[]` | Additional envoy volumes. |
|
||||||
| envoy.healthPort | int | `9878` | TCP port for the health API. |
|
| envoy.healthPort | int | `9878` | TCP port for the health API. |
|
||||||
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
|
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
|
||||||
| envoy.image | object | `{"digest":"sha256:39b75548447978230dedcf25da8940e4d3540c741045ef391a8e74dbb9661a86","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.26.7-bbde4095997ea57ead209f56158790d47224a0f5","useDigest":true}` | Envoy container image. |
|
| envoy.image | object | `{"digest":"sha256:992998398dadfff7117bfa9fdb7c9474fefab7f0237263f7c8114e106c67baca","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.26.6-ad82c7c56e88989992fd25d8d67747de865c823b","useDigest":true}` | Envoy container image. |
|
||||||
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
|
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
|
||||||
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
|
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
|
||||||
| envoy.log.format | string | `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` | The format string to use for laying out the log message metadata of Envoy. |
|
| envoy.log.format | string | `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` | The format string to use for laying out the log message metadata of Envoy. |
|
||||||
@@ -324,15 +324,14 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| envoy.podLabels | object | `{}` | Labels to be added to envoy pods |
|
| envoy.podLabels | object | `{}` | Labels to be added to envoy pods |
|
||||||
| envoy.podSecurityContext | object | `{}` | Security Context for cilium-envoy pods. |
|
| envoy.podSecurityContext | object | `{}` | Security Context for cilium-envoy pods. |
|
||||||
| envoy.priorityClassName | string | `nil` | The priority class to use for cilium-envoy. |
|
| envoy.priorityClassName | string | `nil` | The priority class to use for cilium-envoy. |
|
||||||
| envoy.prometheus | object | `{"enabled":true,"port":"9964","serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Configure Cilium Envoy Prometheus options. Note that some of these apply to either cilium-agent or cilium-envoy. |
|
|
||||||
| envoy.prometheus.enabled | bool | `true` | Enable prometheus metrics for cilium-envoy |
|
| envoy.prometheus.enabled | bool | `true` | Enable prometheus metrics for cilium-envoy |
|
||||||
| envoy.prometheus.port | string | `"9964"` | Serve prometheus metrics for cilium-envoy on the configured port |
|
| envoy.prometheus.port | string | `"9964"` | Serve prometheus metrics for cilium-envoy on the configured port |
|
||||||
| envoy.prometheus.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor cilium-envoy |
|
| envoy.prometheus.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor cilium-envoy |
|
||||||
| envoy.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) Note that this setting applies to both cilium-envoy _and_ cilium-agent with Envoy enabled. |
|
| envoy.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) |
|
||||||
| envoy.prometheus.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. |
|
| envoy.prometheus.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. |
|
||||||
| envoy.prometheus.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor cilium-envoy |
|
| envoy.prometheus.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor cilium-envoy |
|
||||||
| envoy.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-envoy or for cilium-agent with Envoy configured. |
|
| envoy.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-envoy |
|
||||||
| envoy.prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-envoy or for cilium-agent with Envoy configured. |
|
| envoy.prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-envoy |
|
||||||
| envoy.readinessProbe.failureThreshold | int | `3` | failure threshold of readiness probe |
|
| envoy.readinessProbe.failureThreshold | int | `3` | failure threshold of readiness probe |
|
||||||
| envoy.readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe |
|
| envoy.readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe |
|
||||||
| envoy.resources | object | `{}` | Envoy resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ |
|
| envoy.resources | object | `{}` | Envoy resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ |
|
||||||
@@ -419,7 +418,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
|
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
|
||||||
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
|
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
|
||||||
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
|
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
|
||||||
| hubble.relay.image | object | `{"digest":"sha256:f506f3c6e0a979437cde79eb781654fda4f10ddb5642cebc4dc81254cfb7eeaa","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.14.9","useDigest":true}` | Hubble-relay container image. |
|
| hubble.relay.image | object | `{"digest":"sha256:dbef89f924a927043d02b40c18e417c1ea0e8f58b44523b80fef7e3652db24d4","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.14.5","useDigest":true}` | Hubble-relay container image. |
|
||||||
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
|
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
|
||||||
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
|
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
|
||||||
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||||
@@ -476,7 +475,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. |
|
| hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. |
|
||||||
| hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. |
|
| hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. |
|
||||||
| hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. |
|
| hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. |
|
||||||
| hubble.ui.backend.image | object | `{"digest":"sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.0","useDigest":true}` | Hubble-ui backend image. |
|
| hubble.ui.backend.image | object | `{"digest":"sha256:1f86f3400827a0451e6332262467f894eeb7caf0eb8779bd951e2caa9d027cbe","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.12.1","useDigest":true}` | Hubble-ui backend image. |
|
||||||
| hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. |
|
| hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. |
|
||||||
| hubble.ui.backend.securityContext | object | `{}` | Hubble-ui backend security context. |
|
| hubble.ui.backend.securityContext | object | `{}` | Hubble-ui backend security context. |
|
||||||
| hubble.ui.baseUrl | string | `"/"` | Defines base url prefix for all hubble-ui http requests. It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. Trailing `/` is required for custom path, ex. `/service-map/` |
|
| hubble.ui.baseUrl | string | `"/"` | Defines base url prefix for all hubble-ui http requests. It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. Trailing `/` is required for custom path, ex. `/service-map/` |
|
||||||
@@ -484,7 +483,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. |
|
| hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. |
|
||||||
| hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. |
|
| hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. |
|
||||||
| hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. |
|
| hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. |
|
||||||
| hubble.ui.frontend.image | object | `{"digest":"sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.0","useDigest":true}` | Hubble-ui frontend image. |
|
| hubble.ui.frontend.image | object | `{"digest":"sha256:9e5f81ee747866480ea1ac4630eb6975ff9227f9782b7c93919c081c33f38267","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.12.1","useDigest":true}` | Hubble-ui frontend image. |
|
||||||
| hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. |
|
| hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. |
|
||||||
| hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. |
|
| hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. |
|
||||||
| hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 |
|
| hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 |
|
||||||
@@ -511,7 +510,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
|
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
|
||||||
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd` or `kvstore`). |
|
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd` or `kvstore`). |
|
||||||
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
|
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
|
||||||
| image | object | `{"digest":"sha256:4ef1eb7a3bc39d0fefe14685e6c0d4e01301c40df2a89bc93ffca9a1ab927301","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.14.9","useDigest":true}` | Agent container image. |
|
| image | object | `{"digest":"sha256:d3b287029755b6a47dee01420e2ea469469f1b174a2089c10af7e5e9289ef05b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.14.5","useDigest":true}` | Agent container image. |
|
||||||
| imagePullSecrets | string | `nil` | Configure image pull secrets for pulling container images |
|
| imagePullSecrets | string | `nil` | Configure image pull secrets for pulling container images |
|
||||||
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
|
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
|
||||||
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
|
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
|
||||||
@@ -619,7 +618,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| operator.extraVolumes | list | `[]` | Additional cilium-operator volumes. |
|
| operator.extraVolumes | list | `[]` | Additional cilium-operator volumes. |
|
||||||
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
|
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
|
||||||
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
|
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
|
||||||
| operator.image | object | `{"alibabacloudDigest":"sha256:765314779093b54750f83280f009229f20fe1f28466a633d9bb4143d2ad669c5","awsDigest":"sha256:041ad5b49ae63ba0f1974e1a1d9ebf9f52541cd2813088fa687f9d544125a1ec","azureDigest":"sha256:2d3b9d868eb03fa9256d34192a734a2abab283f527a9c97b7cefcd3401649d17","genericDigest":"sha256:1552d653870dd8ebbd16ee985a5497dd78a2097370978b0cfbd2da2072f30712","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.14.9","useDigest":true}` | cilium-operator image. |
|
| operator.image | object | `{"alibabacloudDigest":"sha256:e0152c498ba73c56a82eee2a706c8f400e9a6999c665af31a935bdf08e659bc3","awsDigest":"sha256:785ccf1267d0ed3ba9e4bd8166577cb4f9e4ce996af26b27c9d5c554a0d5b09a","azureDigest":"sha256:9203f5583aa34e716d7a6588ebd144e43ce3b77873f578fc12b2679e33591353","genericDigest":"sha256:303f9076bdc73b3fc32aaedee64a14f6f44c8bb08ee9e3956d443021103ebe7a","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.14.5","useDigest":true}` | cilium-operator image. |
|
||||||
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
|
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
|
||||||
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||||
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
|
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
|
||||||
@@ -666,7 +665,7 @@ contributors across the globe, there is almost always someone available to help.
|
|||||||
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
|
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
|
||||||
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
|
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
|
||||||
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
|
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
|
||||||
| preflight.image | object | `{"digest":"sha256:4ef1eb7a3bc39d0fefe14685e6c0d4e01301c40df2a89bc93ffca9a1ab927301","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.14.9","useDigest":true}` | Cilium pre-flight image. |
|
| preflight.image | object | `{"digest":"sha256:d3b287029755b6a47dee01420e2ea469469f1b174a2089c10af7e5e9289ef05b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.14.5","useDigest":true}` | Cilium pre-flight image. |
|
||||||
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||||
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
|
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
|
||||||
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
|
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ set -o nounset
|
|||||||
# dependencies on anything that is part of the startup script
|
# dependencies on anything that is part of the startup script
|
||||||
# itself, and can be safely run multiple times per node (e.g. in
|
# itself, and can be safely run multiple times per node (e.g. in
|
||||||
# case of a restart).
|
# case of a restart).
|
||||||
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
|
if [[ "$(iptables-save | grep -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
|
||||||
then
|
then
|
||||||
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
|
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
|
||||||
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
|
iptables-save | grep -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
|
||||||
fi
|
fi
|
||||||
echo 'Done!'
|
echo 'Done!'
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ then
|
|||||||
# Since that version containerd no longer allows missing configuration for the CNI,
|
# Since that version containerd no longer allows missing configuration for the CNI,
|
||||||
# not even for pods with hostNetwork set to true. Thus, we add a temporary one.
|
# not even for pods with hostNetwork set to true. Thus, we add a temporary one.
|
||||||
# This will be replaced with the real config by the agent pod.
|
# This will be replaced with the real config by the agent pod.
|
||||||
echo -e '{\n\t"cniVersion": "0.3.1",\n\t"name": "cilium",\n\t"type": "cilium-cni"\n}' > /etc/cni/net.d/05-cilium.conf
|
echo -e "{\n\t"cniVersion": "0.3.1",\n\t"name": "cilium",\n\t"type": "cilium-cni"\n}" > /etc/cni/net.d/05-cilium.conf
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Start containerd. It won't create it's CNI configuration file anymore.
|
# Start containerd. It won't create it's CNI configuration file anymore.
|
||||||
|
|||||||
@@ -447,9 +447,6 @@ spec:
|
|||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: tmp
|
- name: tmp
|
||||||
mountPath: /tmp
|
mountPath: /tmp
|
||||||
{{- with .Values.extraVolumeMounts }}
|
|
||||||
{{- toYaml . | nindent 8 }}
|
|
||||||
{{- end }}
|
|
||||||
terminationMessagePolicy: FallbackToLogsOnError
|
terminationMessagePolicy: FallbackToLogsOnError
|
||||||
{{- if .Values.cgroup.autoMount.enabled }}
|
{{- if .Values.cgroup.autoMount.enabled }}
|
||||||
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.
|
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.
|
||||||
|
|||||||
@@ -34,20 +34,6 @@ spec:
|
|||||||
metricRelabelings:
|
metricRelabelings:
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.envoy.prometheus.serviceMonitor.enabled }}
|
|
||||||
- port: envoy-metrics
|
|
||||||
interval: {{ .Values.envoy.prometheus.serviceMonitor.interval | quote }}
|
|
||||||
honorLabels: true
|
|
||||||
path: /metrics
|
|
||||||
{{- with .Values.envoy.prometheus.serviceMonitor.relabelings }}
|
|
||||||
relabelings:
|
|
||||||
{{- toYaml . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.envoy.prometheus.serviceMonitor.metricRelabelings }}
|
|
||||||
metricRelabelings:
|
|
||||||
{{- toYaml . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
targetLabels:
|
targetLabels:
|
||||||
- k8s-app
|
- k8s-app
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -13,7 +13,6 @@
|
|||||||
{{- $fragmentTracking := "true" -}}
|
{{- $fragmentTracking := "true" -}}
|
||||||
{{- $defaultKubeProxyReplacement := "false" -}}
|
{{- $defaultKubeProxyReplacement := "false" -}}
|
||||||
{{- $azureUsePrimaryAddress := "true" -}}
|
{{- $azureUsePrimaryAddress := "true" -}}
|
||||||
{{- $defaultDNSProxyEnableTransparentMode := "false" -}}
|
|
||||||
|
|
||||||
{{- /* Default values when 1.8 was initially deployed */ -}}
|
{{- /* Default values when 1.8 was initially deployed */ -}}
|
||||||
{{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}}
|
{{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}}
|
||||||
@@ -49,7 +48,6 @@
|
|||||||
{{- $azureUsePrimaryAddress = "false" -}}
|
{{- $azureUsePrimaryAddress = "false" -}}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- $defaultKubeProxyReplacement = "disabled" -}}
|
{{- $defaultKubeProxyReplacement = "disabled" -}}
|
||||||
{{- $defaultDNSProxyEnableTransparentMode = "true" -}}
|
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
{{- /* Default values when 1.14 was initially deployed */ -}}
|
{{- /* Default values when 1.14 was initially deployed */ -}}
|
||||||
@@ -432,16 +430,10 @@ data:
|
|||||||
# - vxlan (default)
|
# - vxlan (default)
|
||||||
# - geneve
|
# - geneve
|
||||||
{{- if .Values.gke.enabled }}
|
{{- if .Values.gke.enabled }}
|
||||||
{{- if ne (.Values.routingMode | default "native") "native" }}
|
|
||||||
{{- fail (printf "RoutingMode must be set to native when gke.enabled=true" )}}
|
|
||||||
{{- end }}
|
|
||||||
routing-mode: "native"
|
routing-mode: "native"
|
||||||
enable-endpoint-routes: "true"
|
enable-endpoint-routes: "true"
|
||||||
enable-local-node-route: "false"
|
enable-local-node-route: "false"
|
||||||
{{- else if .Values.aksbyocni.enabled }}
|
{{- else if .Values.aksbyocni.enabled }}
|
||||||
{{- if ne (.Values.routingMode | default "tunnel") "tunnel" }}
|
|
||||||
{{- fail (printf "RoutingMode must be set to tunnel when aksbyocni.enabled=true" )}}
|
|
||||||
{{- end }}
|
|
||||||
routing-mode: "tunnel"
|
routing-mode: "tunnel"
|
||||||
tunnel-protocol: "vxlan"
|
tunnel-protocol: "vxlan"
|
||||||
{{- else if .Values.routingMode }}
|
{{- else if .Values.routingMode }}
|
||||||
@@ -1100,13 +1092,6 @@ data:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
{{- if .Values.dnsProxy }}
|
{{- if .Values.dnsProxy }}
|
||||||
{{- if hasKey .Values.dnsProxy "enableTransparentMode" }}
|
|
||||||
# explicit setting gets precedence
|
|
||||||
dnsproxy-enable-transparent-mode: {{ .Values.dnsProxy.enableTransparentMode | quote }}
|
|
||||||
{{- else if eq $cniChainingMode "none" }}
|
|
||||||
# default DNS proxy to transparent mode in non-chaining modes
|
|
||||||
dnsproxy-enable-transparent-mode: {{ $defaultDNSProxyEnableTransparentMode | quote }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.dnsProxy.dnsRejectResponseCode }}
|
{{- if .Values.dnsProxy.dnsRejectResponseCode }}
|
||||||
tofqdns-dns-reject-response-code: {{ .Values.dnsProxy.dnsRejectResponseCode | quote }}
|
tofqdns-dns-reject-response-code: {{ .Values.dnsProxy.dnsRejectResponseCode | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ spec:
|
|||||||
{{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }}
|
{{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }}
|
||||||
startupProbe:
|
startupProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
|
host: "localhost"
|
||||||
path: /healthz
|
path: /healthz
|
||||||
port: {{ .Values.envoy.healthPort }}
|
port: {{ .Values.envoy.healthPort }}
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
@@ -92,7 +92,7 @@ spec:
|
|||||||
{{- end }}
|
{{- end }}
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
|
host: "localhost"
|
||||||
path: /healthz
|
path: /healthz
|
||||||
port: {{ .Values.envoy.healthPort }}
|
port: {{ .Values.envoy.healthPort }}
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
@@ -110,7 +110,7 @@ spec:
|
|||||||
timeoutSeconds: 5
|
timeoutSeconds: 5
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
|
host: "localhost"
|
||||||
path: /healthz
|
path: /healthz
|
||||||
port: {{ .Values.envoy.healthPort }}
|
port: {{ .Values.envoy.healthPort }}
|
||||||
scheme: HTTP
|
scheme: HTTP
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ metadata:
|
|||||||
namespace: {{ .Values.envoy.prometheus.serviceMonitor.namespace | default .Release.Namespace }}
|
namespace: {{ .Values.envoy.prometheus.serviceMonitor.namespace | default .Release.Namespace }}
|
||||||
labels:
|
labels:
|
||||||
app.kubernetes.io/part-of: cilium
|
app.kubernetes.io/part-of: cilium
|
||||||
app.kubernetes.io/name: cilium-envoy
|
|
||||||
{{- with .Values.envoy.prometheus.serviceMonitor.labels }}
|
{{- with .Values.envoy.prometheus.serviceMonitor.labels }}
|
||||||
{{- toYaml . | nindent 4 }}
|
{{- toYaml . | nindent 4 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
@@ -23,7 +22,7 @@ spec:
|
|||||||
matchNames:
|
matchNames:
|
||||||
- {{ .Release.Namespace }}
|
- {{ .Release.Namespace }}
|
||||||
endpoints:
|
endpoints:
|
||||||
- port: envoy-metrics
|
- port: metrics
|
||||||
interval: {{ .Values.envoy.prometheus.serviceMonitor.interval | quote }}
|
interval: {{ .Values.envoy.prometheus.serviceMonitor.interval | quote }}
|
||||||
honorLabels: true
|
honorLabels: true
|
||||||
path: /metrics
|
path: /metrics
|
||||||
|
|||||||
@@ -66,13 +66,8 @@ spec:
|
|||||||
- /tmp/ready
|
- /tmp/ready
|
||||||
initialDelaySeconds: 5
|
initialDelaySeconds: 5
|
||||||
periodSeconds: 5
|
periodSeconds: 5
|
||||||
env:
|
|
||||||
- name: K8S_NODE_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
apiVersion: v1
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
{{- with .Values.preflight.extraEnv }}
|
{{- with .Values.preflight.extraEnv }}
|
||||||
|
env:
|
||||||
{{- toYaml . | trim | nindent 12 }}
|
{{- toYaml . | trim | nindent 12 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
|||||||
@@ -88,12 +88,10 @@ spec:
|
|||||||
nodeSelector:
|
nodeSelector:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
tolerations:
|
|
||||||
{{- with .Values.authentication.mutual.spire.install.agent.tolerations }}
|
{{- with .Values.authentication.mutual.spire.install.agent.tolerations }}
|
||||||
|
tolerations:
|
||||||
{{- toYaml . | trim | nindent 8 }}
|
{{- toYaml . | trim | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
- key: {{ .Values.agentNotReadyTaintKey | default "node.cilium.io/agent-not-ready" }}
|
|
||||||
effect: NoSchedule
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: spire-config
|
- name: spire-config
|
||||||
configMap:
|
configMap:
|
||||||
|
|||||||
@@ -143,10 +143,10 @@ rollOutCiliumPods: false
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/cilium"
|
repository: "quay.io/cilium/cilium"
|
||||||
tag: "v1.14.9"
|
tag: "v1.14.5"
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
# cilium-digest
|
# cilium-digest
|
||||||
digest: "sha256:4ef1eb7a3bc39d0fefe14685e6c0d4e01301c40df2a89bc93ffca9a1ab927301"
|
digest: "sha256:d3b287029755b6a47dee01420e2ea469469f1b174a2089c10af7e5e9289ef05b"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
|
|
||||||
# -- Affinity for cilium-agent.
|
# -- Affinity for cilium-agent.
|
||||||
@@ -1109,9 +1109,9 @@ hubble:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/hubble-relay"
|
repository: "quay.io/cilium/hubble-relay"
|
||||||
tag: "v1.14.9"
|
tag: "v1.14.5"
|
||||||
# hubble-relay-digest
|
# hubble-relay-digest
|
||||||
digest: "sha256:f506f3c6e0a979437cde79eb781654fda4f10ddb5642cebc4dc81254cfb7eeaa"
|
digest: "sha256:dbef89f924a927043d02b40c18e417c1ea0e8f58b44523b80fef7e3652db24d4"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
@@ -1337,8 +1337,8 @@ hubble:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/hubble-ui-backend"
|
repository: "quay.io/cilium/hubble-ui-backend"
|
||||||
tag: "v0.13.0"
|
tag: "v0.12.1"
|
||||||
digest: "sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803"
|
digest: "sha256:1f86f3400827a0451e6332262467f894eeb7caf0eb8779bd951e2caa9d027cbe"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
@@ -1368,8 +1368,8 @@ hubble:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/hubble-ui"
|
repository: "quay.io/cilium/hubble-ui"
|
||||||
tag: "v0.13.0"
|
tag: "v0.12.1"
|
||||||
digest: "sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666"
|
digest: "sha256:9e5f81ee747866480ea1ac4630eb6975ff9227f9782b7c93919c081c33f38267"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
@@ -1853,9 +1853,9 @@ envoy:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/cilium-envoy"
|
repository: "quay.io/cilium/cilium-envoy"
|
||||||
tag: "v1.26.7-bbde4095997ea57ead209f56158790d47224a0f5"
|
tag: "v1.26.6-ad82c7c56e88989992fd25d8d67747de865c823b"
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
digest: "sha256:39b75548447978230dedcf25da8940e4d3540c741045ef391a8e74dbb9661a86"
|
digest: "sha256:992998398dadfff7117bfa9fdb7c9474fefab7f0237263f7c8114e106c67baca"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
|
|
||||||
# -- Additional containers added to the cilium Envoy DaemonSet.
|
# -- Additional containers added to the cilium Envoy DaemonSet.
|
||||||
@@ -1968,20 +1968,7 @@ envoy:
|
|||||||
labelSelector:
|
labelSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
k8s-app: cilium-envoy
|
k8s-app: cilium-envoy
|
||||||
podAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- topologyKey: kubernetes.io/hostname
|
|
||||||
labelSelector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: cilium
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: cilium.io/no-schedule
|
|
||||||
operator: NotIn
|
|
||||||
values:
|
|
||||||
- "true"
|
|
||||||
# -- Node selector for cilium-envoy.
|
# -- Node selector for cilium-envoy.
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
kubernetes.io/os: linux
|
kubernetes.io/os: linux
|
||||||
@@ -2002,16 +1989,12 @@ envoy:
|
|||||||
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
||||||
dnsPolicy: ~
|
dnsPolicy: ~
|
||||||
|
|
||||||
# -- Configure Cilium Envoy Prometheus options.
|
|
||||||
# Note that some of these apply to either cilium-agent or cilium-envoy.
|
|
||||||
prometheus:
|
prometheus:
|
||||||
# -- Enable prometheus metrics for cilium-envoy
|
# -- Enable prometheus metrics for cilium-envoy
|
||||||
enabled: true
|
enabled: true
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
# -- Enable service monitors.
|
# -- Enable service monitors.
|
||||||
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
||||||
# Note that this setting applies to both cilium-envoy _and_ cilium-agent
|
|
||||||
# with Envoy enabled.
|
|
||||||
enabled: false
|
enabled: false
|
||||||
# -- Labels to add to ServiceMonitor cilium-envoy
|
# -- Labels to add to ServiceMonitor cilium-envoy
|
||||||
labels: {}
|
labels: {}
|
||||||
@@ -2023,14 +2006,12 @@ envoy:
|
|||||||
# service monitors configured.
|
# service monitors configured.
|
||||||
# namespace: ""
|
# namespace: ""
|
||||||
# -- Relabeling configs for the ServiceMonitor cilium-envoy
|
# -- Relabeling configs for the ServiceMonitor cilium-envoy
|
||||||
# or for cilium-agent with Envoy configured.
|
|
||||||
relabelings:
|
relabelings:
|
||||||
- sourceLabels:
|
- sourceLabels:
|
||||||
- __meta_kubernetes_pod_node_name
|
- __meta_kubernetes_pod_node_name
|
||||||
targetLabel: node
|
targetLabel: node
|
||||||
replacement: ${1}
|
replacement: ${1}
|
||||||
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
|
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
|
||||||
# or for cilium-agent with Envoy configured.
|
|
||||||
metricRelabelings: ~
|
metricRelabelings: ~
|
||||||
# -- Serve prometheus metrics for cilium-envoy on the configured port
|
# -- Serve prometheus metrics for cilium-envoy on the configured port
|
||||||
port: "9964"
|
port: "9964"
|
||||||
@@ -2269,15 +2250,15 @@ operator:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/operator"
|
repository: "quay.io/cilium/operator"
|
||||||
tag: "v1.14.9"
|
tag: "v1.14.5"
|
||||||
# operator-generic-digest
|
# operator-generic-digest
|
||||||
genericDigest: "sha256:1552d653870dd8ebbd16ee985a5497dd78a2097370978b0cfbd2da2072f30712"
|
genericDigest: "sha256:303f9076bdc73b3fc32aaedee64a14f6f44c8bb08ee9e3956d443021103ebe7a"
|
||||||
# operator-azure-digest
|
# operator-azure-digest
|
||||||
azureDigest: "sha256:2d3b9d868eb03fa9256d34192a734a2abab283f527a9c97b7cefcd3401649d17"
|
azureDigest: "sha256:9203f5583aa34e716d7a6588ebd144e43ce3b77873f578fc12b2679e33591353"
|
||||||
# operator-aws-digest
|
# operator-aws-digest
|
||||||
awsDigest: "sha256:041ad5b49ae63ba0f1974e1a1d9ebf9f52541cd2813088fa687f9d544125a1ec"
|
awsDigest: "sha256:785ccf1267d0ed3ba9e4bd8166577cb4f9e4ce996af26b27c9d5c554a0d5b09a"
|
||||||
# operator-alibabacloud-digest
|
# operator-alibabacloud-digest
|
||||||
alibabacloudDigest: "sha256:765314779093b54750f83280f009229f20fe1f28466a633d9bb4143d2ad669c5"
|
alibabacloudDigest: "sha256:e0152c498ba73c56a82eee2a706c8f400e9a6999c665af31a935bdf08e659bc3"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
suffix: ""
|
suffix: ""
|
||||||
@@ -2554,9 +2535,9 @@ preflight:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/cilium"
|
repository: "quay.io/cilium/cilium"
|
||||||
tag: "v1.14.9"
|
tag: "v1.14.5"
|
||||||
# cilium-digest
|
# cilium-digest
|
||||||
digest: "sha256:4ef1eb7a3bc39d0fefe14685e6c0d4e01301c40df2a89bc93ffca9a1ab927301"
|
digest: "sha256:d3b287029755b6a47dee01420e2ea469469f1b174a2089c10af7e5e9289ef05b"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
@@ -2704,9 +2685,9 @@ clustermesh:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/clustermesh-apiserver"
|
repository: "quay.io/cilium/clustermesh-apiserver"
|
||||||
tag: "v1.14.9"
|
tag: "v1.14.5"
|
||||||
# clustermesh-apiserver-digest
|
# clustermesh-apiserver-digest
|
||||||
digest: "sha256:5c16f8b8e22ce41e11998e70846fbcecea3a6b683a38253809ead8d871f6d8a3"
|
digest: "sha256:7eaa35cf5452c43b1f7d0cde0d707823ae7e49965bcb54c053e31ea4e04c3d96"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
@@ -2751,9 +2732,9 @@ clustermesh:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/kvstoremesh"
|
repository: "quay.io/cilium/kvstoremesh"
|
||||||
tag: "v1.14.9"
|
tag: "v1.14.5"
|
||||||
# kvstoremesh-digest
|
# kvstoremesh-digest
|
||||||
digest: "sha256:9d9efb25806660f3663b9cd803fb8679f2b115763470002a9770e2c1eb1e5b22"
|
digest: "sha256:d7137edd0efa2b1407b20088af3980a9993bb616d85bf9b55ea2891d1b99023a"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
pullPolicy: "IfNotPresent"
|
pullPolicy: "IfNotPresent"
|
||||||
|
|
||||||
@@ -3105,8 +3086,6 @@ dnsProxy:
|
|||||||
proxyPort: 0
|
proxyPort: 0
|
||||||
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
|
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
|
||||||
proxyResponseMaxDelay: 100ms
|
proxyResponseMaxDelay: 100ms
|
||||||
# -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
|
|
||||||
# enableTransparentMode: true
|
|
||||||
|
|
||||||
# -- SCTP Configuration Values
|
# -- SCTP Configuration Values
|
||||||
sctp:
|
sctp:
|
||||||
@@ -3157,21 +3136,8 @@ authentication:
|
|||||||
# -- SPIRE Workload Attestor kubelet verification.
|
# -- SPIRE Workload Attestor kubelet verification.
|
||||||
skipKubeletVerification: true
|
skipKubeletVerification: true
|
||||||
# -- SPIRE agent tolerations configuration
|
# -- SPIRE agent tolerations configuration
|
||||||
# By default it follows the same tolerations as the agent itself
|
|
||||||
# to allow the Cilium agent on this node to connect to SPIRE.
|
|
||||||
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
||||||
tolerations:
|
tolerations: []
|
||||||
- key: node.kubernetes.io/not-ready
|
|
||||||
effect: NoSchedule
|
|
||||||
- key: node-role.kubernetes.io/master
|
|
||||||
effect: NoSchedule
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
effect: NoSchedule
|
|
||||||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
|
||||||
effect: NoSchedule
|
|
||||||
value: "true"
|
|
||||||
- key: CriticalAddonsOnly
|
|
||||||
operator: "Exists"
|
|
||||||
# -- SPIRE agent affinity configuration
|
# -- SPIRE agent affinity configuration
|
||||||
affinity: {}
|
affinity: {}
|
||||||
# -- SPIRE agent nodeSelector configuration
|
# -- SPIRE agent nodeSelector configuration
|
||||||
|
|||||||
@@ -1854,9 +1854,9 @@ envoy:
|
|||||||
image:
|
image:
|
||||||
override: ~
|
override: ~
|
||||||
repository: "quay.io/cilium/cilium-envoy"
|
repository: "quay.io/cilium/cilium-envoy"
|
||||||
tag: "v1.26.7-bbde4095997ea57ead209f56158790d47224a0f5"
|
tag: "v1.26.6-ad82c7c56e88989992fd25d8d67747de865c823b"
|
||||||
pullPolicy: "${PULL_POLICY}"
|
pullPolicy: "${PULL_POLICY}"
|
||||||
digest: "sha256:39b75548447978230dedcf25da8940e4d3540c741045ef391a8e74dbb9661a86"
|
digest: "sha256:992998398dadfff7117bfa9fdb7c9474fefab7f0237263f7c8114e106c67baca"
|
||||||
useDigest: true
|
useDigest: true
|
||||||
|
|
||||||
# -- Additional containers added to the cilium Envoy DaemonSet.
|
# -- Additional containers added to the cilium Envoy DaemonSet.
|
||||||
@@ -1969,20 +1969,7 @@ envoy:
|
|||||||
labelSelector:
|
labelSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
k8s-app: cilium-envoy
|
k8s-app: cilium-envoy
|
||||||
podAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
- topologyKey: kubernetes.io/hostname
|
|
||||||
labelSelector:
|
|
||||||
matchLabels:
|
|
||||||
k8s-app: cilium
|
|
||||||
nodeAffinity:
|
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
|
||||||
nodeSelectorTerms:
|
|
||||||
- matchExpressions:
|
|
||||||
- key: cilium.io/no-schedule
|
|
||||||
operator: NotIn
|
|
||||||
values:
|
|
||||||
- "true"
|
|
||||||
# -- Node selector for cilium-envoy.
|
# -- Node selector for cilium-envoy.
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
kubernetes.io/os: linux
|
kubernetes.io/os: linux
|
||||||
@@ -2003,16 +1990,12 @@ envoy:
|
|||||||
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
||||||
dnsPolicy: ~
|
dnsPolicy: ~
|
||||||
|
|
||||||
# -- Configure Cilium Envoy Prometheus options.
|
|
||||||
# Note that some of these apply to either cilium-agent or cilium-envoy.
|
|
||||||
prometheus:
|
prometheus:
|
||||||
# -- Enable prometheus metrics for cilium-envoy
|
# -- Enable prometheus metrics for cilium-envoy
|
||||||
enabled: true
|
enabled: true
|
||||||
serviceMonitor:
|
serviceMonitor:
|
||||||
# -- Enable service monitors.
|
# -- Enable service monitors.
|
||||||
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
||||||
# Note that this setting applies to both cilium-envoy _and_ cilium-agent
|
|
||||||
# with Envoy enabled.
|
|
||||||
enabled: false
|
enabled: false
|
||||||
# -- Labels to add to ServiceMonitor cilium-envoy
|
# -- Labels to add to ServiceMonitor cilium-envoy
|
||||||
labels: {}
|
labels: {}
|
||||||
@@ -2024,14 +2007,12 @@ envoy:
|
|||||||
# service monitors configured.
|
# service monitors configured.
|
||||||
# namespace: ""
|
# namespace: ""
|
||||||
# -- Relabeling configs for the ServiceMonitor cilium-envoy
|
# -- Relabeling configs for the ServiceMonitor cilium-envoy
|
||||||
# or for cilium-agent with Envoy configured.
|
|
||||||
relabelings:
|
relabelings:
|
||||||
- sourceLabels:
|
- sourceLabels:
|
||||||
- __meta_kubernetes_pod_node_name
|
- __meta_kubernetes_pod_node_name
|
||||||
targetLabel: node
|
targetLabel: node
|
||||||
replacement: ${1}
|
replacement: ${1}
|
||||||
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
|
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
|
||||||
# or for cilium-agent with Envoy configured.
|
|
||||||
metricRelabelings: ~
|
metricRelabelings: ~
|
||||||
# -- Serve prometheus metrics for cilium-envoy on the configured port
|
# -- Serve prometheus metrics for cilium-envoy on the configured port
|
||||||
port: "9964"
|
port: "9964"
|
||||||
@@ -3108,8 +3089,6 @@ dnsProxy:
|
|||||||
proxyPort: 0
|
proxyPort: 0
|
||||||
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
|
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
|
||||||
proxyResponseMaxDelay: 100ms
|
proxyResponseMaxDelay: 100ms
|
||||||
# -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
|
|
||||||
# enableTransparentMode: true
|
|
||||||
|
|
||||||
# -- SCTP Configuration Values
|
# -- SCTP Configuration Values
|
||||||
sctp:
|
sctp:
|
||||||
@@ -3160,21 +3139,8 @@ authentication:
|
|||||||
# -- SPIRE Workload Attestor kubelet verification.
|
# -- SPIRE Workload Attestor kubelet verification.
|
||||||
skipKubeletVerification: true
|
skipKubeletVerification: true
|
||||||
# -- SPIRE agent tolerations configuration
|
# -- SPIRE agent tolerations configuration
|
||||||
# By default it follows the same tolerations as the agent itself
|
|
||||||
# to allow the Cilium agent on this node to connect to SPIRE.
|
|
||||||
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
||||||
tolerations:
|
tolerations: []
|
||||||
- key: node.kubernetes.io/not-ready
|
|
||||||
effect: NoSchedule
|
|
||||||
- key: node-role.kubernetes.io/master
|
|
||||||
effect: NoSchedule
|
|
||||||
- key: node-role.kubernetes.io/control-plane
|
|
||||||
effect: NoSchedule
|
|
||||||
- key: node.cloudprovider.kubernetes.io/uninitialized
|
|
||||||
effect: NoSchedule
|
|
||||||
value: "true"
|
|
||||||
- key: CriticalAddonsOnly
|
|
||||||
operator: "Exists"
|
|
||||||
# -- SPIRE agent affinity configuration
|
# -- SPIRE agent affinity configuration
|
||||||
affinity: {}
|
affinity: {}
|
||||||
# -- SPIRE agent nodeSelector configuration
|
# -- SPIRE agent nodeSelector configuration
|
||||||
|
|||||||
@@ -3,10 +3,11 @@ cilium:
|
|||||||
enabled: false
|
enabled: false
|
||||||
externalIPs:
|
externalIPs:
|
||||||
enabled: true
|
enabled: true
|
||||||
|
tunnel: disabled
|
||||||
autoDirectNodeRoutes: false
|
autoDirectNodeRoutes: false
|
||||||
kubeProxyReplacement: strict
|
kubeProxyReplacement: strict
|
||||||
bpf:
|
bpf:
|
||||||
masquerade: false
|
masquerade: true
|
||||||
loadBalancer:
|
loadBalancer:
|
||||||
algorithm: maglev
|
algorithm: maglev
|
||||||
cgroup:
|
cgroup:
|
||||||
@@ -24,4 +25,3 @@ cilium:
|
|||||||
configMap: cni-configuration
|
configMap: cni-configuration
|
||||||
routingMode: native
|
routingMode: native
|
||||||
enableIPv4Masquerade: false
|
enableIPv4Masquerade: false
|
||||||
enableIdentityMark: false
|
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
name: cozy-clickhouse-operator
|
|
||||||
version: 0.3.0
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
NAME=clickhouse-operator
|
|
||||||
NAMESPACE=cozy-clickhouse-operator
|
|
||||||
|
|
||||||
show:
|
|
||||||
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
|
|
||||||
|
|
||||||
apply:
|
|
||||||
helm upgrade -i -n $(NAMESPACE) $(NAME) .
|
|
||||||
|
|
||||||
diff:
|
|
||||||
helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) .
|
|
||||||
|
|
||||||
update:
|
|
||||||
rm -rf charts
|
|
||||||
helm repo add clickhouse-operator https://docs.altinity.com/clickhouse-operator/
|
|
||||||
helm repo update clickhouse-operator
|
|
||||||
helm pull clickhouse-operator/altinity-clickhouse-operator --untar --untardir charts
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
apiVersion: v2
|
|
||||||
appVersion: 0.23.4
|
|
||||||
description: 'Helm chart to deploy [altinity-clickhouse-operator](https://github.com/Altinity/clickhouse-operator). The
|
|
||||||
ClickHouse Operator creates, configures and manages ClickHouse clusters running
|
|
||||||
on Kubernetes. For upgrade please install CRDs separately: ```bash kubectl apply
|
|
||||||
-f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml kubectl
|
|
||||||
apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml kubectl
|
|
||||||
apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
|
|
||||||
```'
|
|
||||||
home: https://github.com/Altinity/clickhouse-operator
|
|
||||||
icon: https://logosandtypes.com/wp-content/uploads/2020/12/altinity.svg
|
|
||||||
maintainers:
|
|
||||||
- email: support@altinity.com
|
|
||||||
name: altinity
|
|
||||||
name: altinity-clickhouse-operator
|
|
||||||
type: application
|
|
||||||
version: 0.23.4
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
# altinity-clickhouse-operator
|
|
||||||
|
|
||||||
  
|
|
||||||
|
|
||||||
Helm chart to deploy [altinity-clickhouse-operator](https://github.com/Altinity/clickhouse-operator).
|
|
||||||
|
|
||||||
The ClickHouse Operator creates, configures and manages ClickHouse clusters running on Kubernetes.
|
|
||||||
|
|
||||||
For upgrade please install CRDs separately:
|
|
||||||
```bash
|
|
||||||
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
|
|
||||||
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
|
|
||||||
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
**Homepage:** <https://github.com/Altinity/clickhouse-operator>
|
|
||||||
|
|
||||||
## Maintainers
|
|
||||||
|
|
||||||
| Name | Email | Url |
|
|
||||||
| ---- | ------ | --- |
|
|
||||||
| altinity | <support@altinity.com> | |
|
|
||||||
|
|
||||||
## Values
|
|
||||||
|
|
||||||
| Key | Type | Default | Description |
|
|
||||||
|-----|------|---------|-------------|
|
|
||||||
| additionalResources | list | `[]` | list of additional resources to create (are processed via `tpl` function), useful for create ClickHouse clusters together with clickhouse-operator, look `kubectl explain chi` for details |
|
|
||||||
| affinity | object | `{}` | affinity for scheduler pod assignment, look `kubectl explain pod.spec.affinity` for details |
|
|
||||||
| configs | object | check the values.yaml file for the config content, auto-generated from latest operator release | clickhouse-operator configs |
|
|
||||||
| dashboards.additionalLabels | object | `{"grafana_dashboard":""}` | labels to add to a secret with dashboards |
|
|
||||||
| dashboards.annotations | object | `{}` | annotations to add to a secret with dashboards |
|
|
||||||
| dashboards.enabled | bool | `false` | provision grafana dashboards as secrets (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-6.33.1/charts/grafana/values.yaml#L679 ) |
|
|
||||||
| dashboards.grafana_folder | string | `"clickhouse"` | |
|
|
||||||
| fullnameOverride | string | `""` | full name of the chart. |
|
|
||||||
| imagePullSecrets | list | `[]` | image pull secret for private images in clickhouse-operator pod possible value format [{"name":"your-secret-name"}] look `kubectl explain pod.spec.imagePullSecrets` for details |
|
|
||||||
| metrics.containerSecurityContext | object | `{}` | |
|
|
||||||
| metrics.enabled | bool | `true` | |
|
|
||||||
| metrics.env | list | `[]` | additional environment variables for the deployment of metrics-exporter containers possible format value [{"name": "SAMPLE", "value": "text"}] |
|
|
||||||
| metrics.image.pullPolicy | string | `"IfNotPresent"` | image pull policy |
|
|
||||||
| metrics.image.repository | string | `"altinity/metrics-exporter"` | image repository |
|
|
||||||
| metrics.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) |
|
|
||||||
| metrics.resources | object | `{}` | custom resource configuration |
|
|
||||||
| nameOverride | string | `""` | override name of the chart |
|
|
||||||
| nodeSelector | object | `{}` | node for scheduler pod assignment, look `kubectl explain pod.spec.nodeSelector` for details |
|
|
||||||
| operator.containerSecurityContext | object | `{}` | |
|
|
||||||
| operator.env | list | `[]` | additional environment variables for the clickhouse-operator container in deployment possible format value [{"name": "SAMPLE", "value": "text"}] |
|
|
||||||
| operator.image.pullPolicy | string | `"IfNotPresent"` | image pull policy |
|
|
||||||
| operator.image.repository | string | `"altinity/clickhouse-operator"` | image repository |
|
|
||||||
| operator.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) |
|
|
||||||
| operator.resources | object | `{}` | custom resource configuration, look `kubectl explain pod.spec.containers.resources` for details |
|
|
||||||
| podAnnotations | object | `{"clickhouse-operator-metrics/port":"9999","clickhouse-operator-metrics/scrape":"true","prometheus.io/port":"8888","prometheus.io/scrape":"true"}` | annotations to add to the clickhouse-operator pod, look `kubectl explain pod.spec.annotations` for details |
|
|
||||||
| podLabels | object | `{}` | labels to add to the clickhouse-operator pod |
|
|
||||||
| podSecurityContext | object | `{}` | |
|
|
||||||
| rbac.create | bool | `true` | specifies whether cluster roles and cluster role bindings should be created |
|
|
||||||
| secret.create | bool | `true` | create a secret with operator credentials |
|
|
||||||
| secret.password | string | `"clickhouse_operator_password"` | operator credentials password |
|
|
||||||
| secret.username | string | `"clickhouse_operator"` | operator credentials username |
|
|
||||||
| serviceAccount.annotations | object | `{}` | annotations to add to the service account |
|
|
||||||
| serviceAccount.create | bool | `true` | specifies whether a service account should be created |
|
|
||||||
| serviceAccount.name | string | `nil` | the name of the service account to use; if not set and create is true, a name is generated using the fullname template |
|
|
||||||
| serviceMonitor.additionalLabels | object | `{}` | additional labels for service monitor |
|
|
||||||
| serviceMonitor.enabled | bool | `false` | ServiceMonitor Custom resource is created for a (prometheus-operator)[https://github.com/prometheus-operator/prometheus-operator] |
|
|
||||||
| tolerations | list | `[]` | tolerations for scheduler pod assignment, look `kubectl explain pod.spec.tolerations` for details |
|
|
||||||
|
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,263 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# OPERATOR_VERSION=0.23.4
|
|
||||||
#
|
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
metadata:
|
|
||||||
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
|
|
||||||
labels:
|
|
||||||
clickhouse-keeper.altinity.com/chop: 0.23.4
|
|
||||||
spec:
|
|
||||||
group: clickhouse-keeper.altinity.com
|
|
||||||
scope: Namespaced
|
|
||||||
names:
|
|
||||||
kind: ClickHouseKeeperInstallation
|
|
||||||
singular: clickhousekeeperinstallation
|
|
||||||
plural: clickhousekeeperinstallations
|
|
||||||
shortNames:
|
|
||||||
- chk
|
|
||||||
versions:
|
|
||||||
- name: v1
|
|
||||||
served: true
|
|
||||||
storage: true
|
|
||||||
additionalPrinterColumns:
|
|
||||||
- name: status
|
|
||||||
type: string
|
|
||||||
description: CHK status
|
|
||||||
jsonPath: .status.status
|
|
||||||
- name: replicas
|
|
||||||
type: integer
|
|
||||||
description: Replica count
|
|
||||||
priority: 1 # show in wide view
|
|
||||||
jsonPath: .status.replicas
|
|
||||||
- name: age
|
|
||||||
type: date
|
|
||||||
description: Age of the resource
|
|
||||||
# Displayed in all priorities
|
|
||||||
jsonPath: .metadata.creationTimestamp
|
|
||||||
subresources:
|
|
||||||
status: {}
|
|
||||||
schema:
|
|
||||||
openAPIV3Schema:
|
|
||||||
type: object
|
|
||||||
required:
|
|
||||||
- spec
|
|
||||||
description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one ClickHouse Keeper cluster"
|
|
||||||
properties:
|
|
||||||
apiVersion:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
APIVersion defines the versioned schema of this representation
|
|
||||||
of an object. Servers should convert recognized schemas to the latest
|
|
||||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
|
||||||
kind:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Kind is a string value representing the REST resource this
|
|
||||||
object represents. Servers may infer this from the endpoint the client
|
|
||||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
|
||||||
metadata:
|
|
||||||
type: object
|
|
||||||
status:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
Current ClickHouseKeeperInstallation status, contains many fields like overall status, desired replicas and ready replica list with their endpoints
|
|
||||||
properties:
|
|
||||||
chop-version:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse operator version"
|
|
||||||
chop-commit:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse operator git commit SHA"
|
|
||||||
chop-date:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse operator build date"
|
|
||||||
chop-ip:
|
|
||||||
type: string
|
|
||||||
description: "IP address of the operator's pod which managed this CHI"
|
|
||||||
status:
|
|
||||||
type: string
|
|
||||||
description: "Status"
|
|
||||||
replicas:
|
|
||||||
type: integer
|
|
||||||
format: int32
|
|
||||||
description: Replicas is the number of number of desired replicas in the cluster
|
|
||||||
readyReplicas:
|
|
||||||
type: array
|
|
||||||
description: ReadyReplicas is the array of endpoints of those ready replicas in the cluster
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
host:
|
|
||||||
type: string
|
|
||||||
description: dns name or ip address for Keeper node
|
|
||||||
port:
|
|
||||||
type: integer
|
|
||||||
minimum: 0
|
|
||||||
maximum: 65535
|
|
||||||
description: TCP port which used to connect to Keeper node
|
|
||||||
secure:
|
|
||||||
type: string
|
|
||||||
description: if a secure connection to Keeper is required
|
|
||||||
normalized:
|
|
||||||
type: object
|
|
||||||
description: "Normalized CHK requested"
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
normalizedCompleted:
|
|
||||||
type: object
|
|
||||||
description: "Normalized CHK completed"
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
spec:
|
|
||||||
type: object
|
|
||||||
description: KeeperSpec defines the desired state of a Keeper cluster
|
|
||||||
properties:
|
|
||||||
namespaceDomainPattern:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
|
|
||||||
Typical use scenario - custom cluster domain in Kubernetes cluster
|
|
||||||
Example: %s.svc.my.test
|
|
||||||
replicas:
|
|
||||||
type: integer
|
|
||||||
format: int32
|
|
||||||
description: |
|
|
||||||
Replicas is the expected size of the keeper cluster.
|
|
||||||
The valid range of size is from 1 to 7.
|
|
||||||
minimum: 1
|
|
||||||
maximum: 7
|
|
||||||
configuration:
|
|
||||||
type: object
|
|
||||||
description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
|
|
||||||
# nullable: true
|
|
||||||
properties:
|
|
||||||
settings:
|
|
||||||
type: object
|
|
||||||
description: "allows configure multiple aspects and behavior for `clickhouse-keeper` instance"
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
clusters:
|
|
||||||
type: array
|
|
||||||
description: |
|
|
||||||
describes ClickHouseKeeper clusters layout and allows change settings on cluster-level and replica-level
|
|
||||||
# nullable: true
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
#required:
|
|
||||||
# - name
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: "cluster name, used to identify set of ClickHouseKeeper servers and wide used during generate names of related Kubernetes resources"
|
|
||||||
minLength: 1
|
|
||||||
# See namePartClusterMaxLen const
|
|
||||||
maxLength: 15
|
|
||||||
pattern: "^[a-zA-Z0-9-]{0,15}$"
|
|
||||||
layout:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
describe current cluster layout, how many replicas
|
|
||||||
# nullable: true
|
|
||||||
properties:
|
|
||||||
replicasCount:
|
|
||||||
type: integer
|
|
||||||
description: "how many replicas in ClickHouseKeeper cluster"
|
|
||||||
templates:
|
|
||||||
type: object
|
|
||||||
description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
|
|
||||||
# nullable: true
|
|
||||||
properties:
|
|
||||||
podTemplates:
|
|
||||||
type: array
|
|
||||||
description: |
|
|
||||||
podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
|
|
||||||
More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
|
|
||||||
# nullable: true
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
#required:
|
|
||||||
# - name
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
|
|
||||||
metadata:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
allows pass standard object's metadata from template to Pod
|
|
||||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
|
||||||
# nullable: true
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
spec:
|
|
||||||
# TODO specify PodSpec
|
|
||||||
type: object
|
|
||||||
description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
|
|
||||||
# nullable: true
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
volumeClaimTemplates:
|
|
||||||
type: array
|
|
||||||
description: "allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else"
|
|
||||||
# nullable: true
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
#required:
|
|
||||||
# - name
|
|
||||||
# - spec
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
template name, could use to link inside
|
|
||||||
top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
|
|
||||||
cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
|
|
||||||
shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
|
|
||||||
replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
|
|
||||||
metadata:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
allows to pass standard object's metadata from template to PVC
|
|
||||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
|
||||||
# nullable: true
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
spec:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
allows define all aspects of `PVC` resource
|
|
||||||
More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
|
|
||||||
# nullable: true
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
serviceTemplates:
|
|
||||||
type: array
|
|
||||||
description: |
|
|
||||||
allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
|
|
||||||
# nullable: true
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
#required:
|
|
||||||
# - name
|
|
||||||
# - spec
|
|
||||||
properties:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
template name, could use to link inside
|
|
||||||
chi-level `chi.spec.defaults.templates.serviceTemplate`
|
|
||||||
cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
|
|
||||||
shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
|
|
||||||
replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
|
|
||||||
metadata:
|
|
||||||
# TODO specify ObjectMeta
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
allows pass standard object's metadata from template to Service
|
|
||||||
Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
|
|
||||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
|
||||||
# nullable: true
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
spec:
|
|
||||||
# TODO specify ServiceSpec
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
describe behavior of generated Service
|
|
||||||
More info: https://kubernetes.io/docs/concepts/services-networking/service/
|
|
||||||
# nullable: true
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
@@ -1,415 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NONE
|
|
||||||
#
|
|
||||||
apiVersion: apiextensions.k8s.io/v1
|
|
||||||
kind: CustomResourceDefinition
|
|
||||||
metadata:
|
|
||||||
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
|
|
||||||
labels:
|
|
||||||
clickhouse.altinity.com/chop: 0.23.4
|
|
||||||
spec:
|
|
||||||
group: clickhouse.altinity.com
|
|
||||||
scope: Namespaced
|
|
||||||
names:
|
|
||||||
kind: ClickHouseOperatorConfiguration
|
|
||||||
singular: clickhouseoperatorconfiguration
|
|
||||||
plural: clickhouseoperatorconfigurations
|
|
||||||
shortNames:
|
|
||||||
- chopconf
|
|
||||||
versions:
|
|
||||||
- name: v1
|
|
||||||
served: true
|
|
||||||
storage: true
|
|
||||||
additionalPrinterColumns:
|
|
||||||
- name: namespaces
|
|
||||||
type: string
|
|
||||||
description: Watch namespaces
|
|
||||||
jsonPath: .status
|
|
||||||
- name: age
|
|
||||||
type: date
|
|
||||||
description: Age of the resource
|
|
||||||
# Displayed in all priorities
|
|
||||||
jsonPath: .metadata.creationTimestamp
|
|
||||||
schema:
|
|
||||||
openAPIV3Schema:
|
|
||||||
type: object
|
|
||||||
description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md"
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
properties:
|
|
||||||
status:
|
|
||||||
type: object
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
spec:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
Allows to define settings of the clickhouse-operator.
|
|
||||||
More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml
|
|
||||||
Check into etc-clickhouse-operator* ConfigMaps if you need more control
|
|
||||||
x-kubernetes-preserve-unknown-fields: true
|
|
||||||
properties:
|
|
||||||
watch:
|
|
||||||
type: object
|
|
||||||
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
|
|
||||||
properties:
|
|
||||||
namespaces:
|
|
||||||
type: array
|
|
||||||
description: "List of namespaces where clickhouse-operator watches for events."
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
clickhouse:
|
|
||||||
type: object
|
|
||||||
description: "Clickhouse related parameters used by clickhouse-operator"
|
|
||||||
properties:
|
|
||||||
configuration:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
file:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
path:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
Each 'path' can be either absolute or relative.
|
|
||||||
In case path is absolute - it is used as is.
|
|
||||||
In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
|
|
||||||
properties:
|
|
||||||
common:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
|
|
||||||
Default value - config.d
|
|
||||||
host:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
|
|
||||||
Default value - conf.d
|
|
||||||
user:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Path to the folder where ClickHouse configuration files with users settings are located.
|
|
||||||
Files are common for all instances within a CHI.
|
|
||||||
Default value - users.d
|
|
||||||
user:
|
|
||||||
type: object
|
|
||||||
description: "Default parameters for any user which will create"
|
|
||||||
properties:
|
|
||||||
default:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
profile:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse server configuration `<profile>...</profile>` for any <user>"
|
|
||||||
quota:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse server configuration `<quota>...</quota>` for any <user>"
|
|
||||||
networksIP:
|
|
||||||
type: array
|
|
||||||
description: "ClickHouse server configuration `<networks><ip>...</ip></networks>` for any <user>"
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
password:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse server configuration `<password>...</password>` for any <user>"
|
|
||||||
network:
|
|
||||||
type: object
|
|
||||||
description: "Default network parameters for any user which will create"
|
|
||||||
properties:
|
|
||||||
hostRegexpTemplate:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse server configuration `<host_regexp>...</host_regexp>` for any <user>"
|
|
||||||
configurationRestartPolicy:
|
|
||||||
type: object
|
|
||||||
description: "Configuration restart policy describes what configuration changes require ClickHouse restart"
|
|
||||||
properties:
|
|
||||||
rules:
|
|
||||||
type: array
|
|
||||||
description: "Array of set of rules per specified ClickHouse versions"
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
version:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse version expression"
|
|
||||||
rules:
|
|
||||||
type: array
|
|
||||||
description: "Set of configuration rules for specified ClickHouse version"
|
|
||||||
items:
|
|
||||||
type: object
|
|
||||||
description: "setting: value pairs for configuration restart policy"
|
|
||||||
access:
|
|
||||||
type: object
|
|
||||||
description: "parameters which use for connect to clickhouse from clickhouse-operator deployment"
|
|
||||||
properties:
|
|
||||||
scheme:
|
|
||||||
type: string
|
|
||||||
description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto"
|
|
||||||
username:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
|
|
||||||
password:
|
|
||||||
type: string
|
|
||||||
description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
|
|
||||||
rootCA:
|
|
||||||
type: string
|
|
||||||
description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse"
|
|
||||||
secret:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
namespace:
|
|
||||||
type: string
|
|
||||||
description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
|
|
||||||
port:
|
|
||||||
type: integer
|
|
||||||
minimum: 1
|
|
||||||
maximum: 65535
|
|
||||||
description: "Port to be used by operator to connect to ClickHouse instances"
|
|
||||||
timeouts:
|
|
||||||
type: object
|
|
||||||
description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds"
|
|
||||||
properties:
|
|
||||||
connect:
|
|
||||||
type: integer
|
|
||||||
minimum: 1
|
|
||||||
maximum: 10
|
|
||||||
description: "Timout to setup connection from the operator to ClickHouse instances. In seconds."
|
|
||||||
query:
|
|
||||||
type: integer
|
|
||||||
minimum: 1
|
|
||||||
maximum: 600
|
|
||||||
description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds."
|
|
||||||
metrics:
|
|
||||||
type: object
|
|
||||||
description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator"
|
|
||||||
properties:
|
|
||||||
timeouts:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
|
|
||||||
Specified in seconds.
|
|
||||||
properties:
|
|
||||||
collect:
|
|
||||||
type: integer
|
|
||||||
minimum: 1
|
|
||||||
maximum: 600
|
|
||||||
description: |
|
|
||||||
Timeout used to limit metrics collection request. In seconds.
|
|
||||||
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
|
|
||||||
All collected metrics are returned.
|
|
||||||
template:
|
|
||||||
type: object
|
|
||||||
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
|
|
||||||
properties:
|
|
||||||
chi:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
policy:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
CHI template updates handling policy
|
|
||||||
Possible policy values:
|
|
||||||
- ReadOnStart. Accept CHIT updates on the operators start only.
|
|
||||||
- ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
|
|
||||||
enum:
|
|
||||||
- ""
|
|
||||||
- "ReadOnStart"
|
|
||||||
- "ApplyOnNextReconcile"
|
|
||||||
path:
|
|
||||||
type: string
|
|
||||||
description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located."
|
|
||||||
reconcile:
|
|
||||||
type: object
|
|
||||||
description: "allow tuning reconciling process"
|
|
||||||
properties:
|
|
||||||
runtime:
|
|
||||||
type: object
|
|
||||||
description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
|
|
||||||
properties:
|
|
||||||
reconcileCHIsThreadsNumber:
|
|
||||||
type: integer
|
|
||||||
minimum: 1
|
|
||||||
maximum: 65535
|
|
||||||
description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default"
|
|
||||||
reconcileShardsThreadsNumber:
|
|
||||||
type: integer
|
|
||||||
minimum: 1
|
|
||||||
maximum: 65535
|
|
||||||
description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
|
|
||||||
reconcileShardsMaxConcurrencyPercent:
|
|
||||||
type: integer
|
|
||||||
minimum: 0
|
|
||||||
maximum: 100
|
|
||||||
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
|
|
||||||
statefulSet:
|
|
||||||
type: object
|
|
||||||
description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator"
|
|
||||||
properties:
|
|
||||||
create:
|
|
||||||
type: object
|
|
||||||
description: "Behavior during create StatefulSet"
|
|
||||||
properties:
|
|
||||||
onFailure:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
|
|
||||||
Possible options:
|
|
||||||
1. abort - do nothing, just break the process and wait for admin.
|
|
||||||
2. delete - delete newly created problematic StatefulSet.
|
|
||||||
3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet.
|
|
||||||
update:
|
|
||||||
type: object
|
|
||||||
description: "Behavior during update StatefulSet"
|
|
||||||
properties:
|
|
||||||
timeout:
|
|
||||||
type: integer
|
|
||||||
description: "How many seconds to wait for created/updated StatefulSet to be Ready"
|
|
||||||
pollInterval:
|
|
||||||
type: integer
|
|
||||||
description: "How many seconds to wait between checks for created/updated StatefulSet status"
|
|
||||||
onFailure:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
|
|
||||||
Possible options:
|
|
||||||
1. abort - do nothing, just break the process and wait for admin.
|
|
||||||
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
|
|
||||||
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
|
|
||||||
host:
|
|
||||||
type: object
|
|
||||||
description: |
|
|
||||||
Whether the operator during reconcile procedure should wait for a ClickHouse host:
|
|
||||||
- to be excluded from a ClickHouse cluster
|
|
||||||
- to complete all running queries
|
|
||||||
- to be included into a ClickHouse cluster
|
|
||||||
respectfully before moving forward
|
|
||||||
properties:
|
|
||||||
wait:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
exclude: &TypeStringBool
|
|
||||||
type: string
|
|
||||||
description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster"
|
|
||||||
enum:
|
|
||||||
# List StringBoolXXX constants from model
|
|
||||||
- ""
|
|
||||||
- "0"
|
|
||||||
- "1"
|
|
||||||
- "False"
|
|
||||||
- "false"
|
|
||||||
- "True"
|
|
||||||
- "true"
|
|
||||||
- "No"
|
|
||||||
- "no"
|
|
||||||
- "Yes"
|
|
||||||
- "yes"
|
|
||||||
- "Off"
|
|
||||||
- "off"
|
|
||||||
- "On"
|
|
||||||
- "on"
|
|
||||||
- "Disable"
|
|
||||||
- "disable"
|
|
||||||
- "Enable"
|
|
||||||
- "enable"
|
|
||||||
- "Disabled"
|
|
||||||
- "disabled"
|
|
||||||
- "Enabled"
|
|
||||||
- "enabled"
|
|
||||||
queries:
|
|
||||||
!!merge <<: *TypeStringBool
|
|
||||||
description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
|
|
||||||
include:
|
|
||||||
!!merge <<: *TypeStringBool
|
|
||||||
description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
|
|
||||||
annotation:
|
|
||||||
type: object
|
|
||||||
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
|
|
||||||
properties:
|
|
||||||
include:
|
|
||||||
type: array
|
|
||||||
description: |
|
|
||||||
When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
|
|
||||||
include annotations with names from the following list
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
exclude:
|
|
||||||
type: array
|
|
||||||
description: |
|
|
||||||
When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
|
|
||||||
exclude annotations with names from the following list
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
label:
|
|
||||||
type: object
|
|
||||||
description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources"
|
|
||||||
properties:
|
|
||||||
include:
|
|
||||||
type: array
|
|
||||||
description: |
|
|
||||||
When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
|
|
||||||
include labels from the following list
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
exclude:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
|
|
||||||
exclude labels from the following list
|
|
||||||
appendScope:
|
|
||||||
!!merge <<: *TypeStringBool
|
|
||||||
description: |
|
|
||||||
Whether to append *Scope* labels to StatefulSet and Pod
|
|
||||||
- "LabelShardScopeIndex"
|
|
||||||
- "LabelReplicaScopeIndex"
|
|
||||||
- "LabelCHIScopeIndex"
|
|
||||||
- "LabelCHIScopeCycleSize"
|
|
||||||
- "LabelCHIScopeCycleIndex"
|
|
||||||
- "LabelCHIScopeCycleOffset"
|
|
||||||
- "LabelClusterScopeIndex"
|
|
||||||
- "LabelClusterScopeCycleSize"
|
|
||||||
- "LabelClusterScopeCycleIndex"
|
|
||||||
- "LabelClusterScopeCycleOffset"
|
|
||||||
statefulSet:
|
|
||||||
type: object
|
|
||||||
description: "define StatefulSet-specific parameters"
|
|
||||||
properties:
|
|
||||||
revisionHistoryLimit:
|
|
||||||
type: integer
|
|
||||||
description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n"
|
|
||||||
pod:
|
|
||||||
type: object
|
|
||||||
description: "define pod specific parameters"
|
|
||||||
properties:
|
|
||||||
terminationGracePeriod:
|
|
||||||
type: integer
|
|
||||||
description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n"
|
|
||||||
logger:
|
|
||||||
type: object
|
|
||||||
description: "allow setup clickhouse-operator logger behavior"
|
|
||||||
properties:
|
|
||||||
logtostderr:
|
|
||||||
type: string
|
|
||||||
description: "boolean, allows logs to stderr"
|
|
||||||
alsologtostderr:
|
|
||||||
type: string
|
|
||||||
description: "boolean allows logs to stderr and files both"
|
|
||||||
v:
|
|
||||||
type: string
|
|
||||||
description: "verbosity level of clickhouse-operator log, default - 1 max - 9"
|
|
||||||
stderrthreshold:
|
|
||||||
type: string
|
|
||||||
vmodule:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level.
|
|
||||||
Ex.: file*=2 sets the 'V' to 2 in all files with names like file*.
|
|
||||||
log_backtrace_at:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
It can be set to a file and line number with a logging line.
|
|
||||||
Ex.: file.go:123
|
|
||||||
Each time when this line is being executed, a stack trace will be written to the Info log.
|
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,102 +0,0 @@
|
|||||||
{{/* vim: set filetype=go-template: */}}
|
|
||||||
{{/*
|
|
||||||
Expand the name of the chart.
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.name" -}}
|
|
||||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create a default fully qualified app name.
|
|
||||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
|
||||||
If release name contains chart name it will be used as a full name.
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.fullname" -}}
|
|
||||||
{{- if .Values.fullnameOverride -}}
|
|
||||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
|
||||||
{{- if contains $name .Release.Name -}}
|
|
||||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create chart name and version as used by the chart label.
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.chart" -}}
|
|
||||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Common labels
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.labels" -}}
|
|
||||||
helm.sh/chart: {{ include "altinity-clickhouse-operator.chart" . }}
|
|
||||||
{{ include "altinity-clickhouse-operator.selectorLabels" . }}
|
|
||||||
{{- if .Chart.AppVersion }}
|
|
||||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if .Values.podLabels }}
|
|
||||||
{{ toYaml .Values.podLabels }}
|
|
||||||
{{- end }}
|
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Selector labels
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.selectorLabels" -}}
|
|
||||||
app.kubernetes.io/name: {{ include "altinity-clickhouse-operator.name" . }}
|
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create the name of the service account to use
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.serviceAccountName" -}}
|
|
||||||
{{ default (include "altinity-clickhouse-operator.fullname" .) .Values.serviceAccount.name }}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create the tag for the docker image to use
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.operator.tag" -}}
|
|
||||||
{{- .Values.operator.image.tag | default .Chart.AppVersion -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Create the tag for the docker image to use
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.metrics.tag" -}}
|
|
||||||
{{- .Values.metrics.image.tag | default .Chart.AppVersion -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
altinity-clickhouse-operator.rawResource will create a resource template that can be
|
|
||||||
merged with each item in `.Values.additionalResources`.
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.rawResource" -}}
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
{{- include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
*/}}
|
|
||||||
{{- define "altinity-clickhouse-operator.configmap-data" }}
|
|
||||||
{{- $root := index . 0 }}
|
|
||||||
{{- $data := index . 1 }}
|
|
||||||
{{- if not $data -}}
|
|
||||||
null
|
|
||||||
{{ end }}
|
|
||||||
{{- range $k, $v := $data }}
|
|
||||||
{{- if not (kindIs "string" $v) }}
|
|
||||||
{{- $v = toYaml $v }}
|
|
||||||
{{- end }}
|
|
||||||
{{- tpl (toYaml (dict $k $v)) $root }}
|
|
||||||
{{ end }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
{{- $template := fromYaml (include "altinity-clickhouse-operator.rawResource" .) -}}
|
|
||||||
{{- range $i, $t := .Values.additionalResources }}
|
|
||||||
---
|
|
||||||
{{ toYaml (merge (tpl $t $ | fromYaml) $template) -}}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
{{- if .Values.dashboards.enabled }}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}-dashboards
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
{{- include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
{{- if .Values.dashboards.additionalLabels }}
|
|
||||||
{{- toYaml .Values.dashboards.additionalLabels | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
{{- with .Values.dashboards.annotations }}
|
|
||||||
annotations:
|
|
||||||
{{- toYaml . | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
type: Opaque
|
|
||||||
data:
|
|
||||||
{{- range $path, $_ := .Files.Glob "files/*.json" }}
|
|
||||||
{{ $path | trimPrefix "files/" }}: {{ $.Files.Get $path | b64enc -}}
|
|
||||||
{{ end }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,211 +0,0 @@
|
|||||||
{{- if .Values.rbac.create -}}
|
|
||||||
# Specifies either
|
|
||||||
# ClusterRole
|
|
||||||
# or
|
|
||||||
# Role
|
|
||||||
# to be bound to ServiceAccount.
|
|
||||||
# ClusterRole is namespace-less and must have unique name
|
|
||||||
# Role is namespace-bound
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}
|
|
||||||
#namespace: kube-system
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
rules:
|
|
||||||
#
|
|
||||||
# Core API group
|
|
||||||
#
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- configmaps
|
|
||||||
- services
|
|
||||||
- persistentvolumeclaims
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- endpoints
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- events
|
|
||||||
verbs:
|
|
||||||
- create
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- persistentvolumes
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- pods
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- delete
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- secrets
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
#
|
|
||||||
# apps.* resources
|
|
||||||
#
|
|
||||||
- apiGroups:
|
|
||||||
- apps
|
|
||||||
resources:
|
|
||||||
- statefulsets
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
- apiGroups:
|
|
||||||
- apps
|
|
||||||
resources:
|
|
||||||
- replicasets
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- delete
|
|
||||||
# The operator deployment personally, identified by name
|
|
||||||
- apiGroups:
|
|
||||||
- apps
|
|
||||||
resources:
|
|
||||||
- deployments
|
|
||||||
resourceNames:
|
|
||||||
- {{ include "altinity-clickhouse-operator.fullname" . }}
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- delete
|
|
||||||
#
|
|
||||||
# policy.* resources
|
|
||||||
#
|
|
||||||
- apiGroups:
|
|
||||||
- policy
|
|
||||||
resources:
|
|
||||||
- poddisruptionbudgets
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- watch
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
#
|
|
||||||
# apiextensions
|
|
||||||
#
|
|
||||||
- apiGroups:
|
|
||||||
- apiextensions.k8s.io
|
|
||||||
resources:
|
|
||||||
- customresourcedefinitions
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
# clickhouse - related resources
|
|
||||||
- apiGroups:
|
|
||||||
- clickhouse.altinity.com
|
|
||||||
#
|
|
||||||
# The operators specific Custom Resources
|
|
||||||
#
|
|
||||||
|
|
||||||
resources:
|
|
||||||
- clickhouseinstallations
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- delete
|
|
||||||
- apiGroups:
|
|
||||||
- clickhouse.altinity.com
|
|
||||||
resources:
|
|
||||||
- clickhouseinstallationtemplates
|
|
||||||
- clickhouseoperatorconfigurations
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- apiGroups:
|
|
||||||
- clickhouse.altinity.com
|
|
||||||
resources:
|
|
||||||
- clickhouseinstallations/finalizers
|
|
||||||
- clickhouseinstallationtemplates/finalizers
|
|
||||||
- clickhouseoperatorconfigurations/finalizers
|
|
||||||
verbs:
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
|
||||||
- clickhouse.altinity.com
|
|
||||||
resources:
|
|
||||||
- clickhouseinstallations/status
|
|
||||||
- clickhouseinstallationtemplates/status
|
|
||||||
- clickhouseoperatorconfigurations/status
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- update
|
|
||||||
- patch
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
# clickhouse-keeper - related resources
|
|
||||||
- apiGroups:
|
|
||||||
- clickhouse-keeper.altinity.com
|
|
||||||
resources:
|
|
||||||
- clickhousekeeperinstallations
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
- watch
|
|
||||||
- patch
|
|
||||||
- update
|
|
||||||
- delete
|
|
||||||
- apiGroups:
|
|
||||||
- clickhouse-keeper.altinity.com
|
|
||||||
resources:
|
|
||||||
- clickhousekeeperinstallations/finalizers
|
|
||||||
verbs:
|
|
||||||
- update
|
|
||||||
- apiGroups:
|
|
||||||
- clickhouse-keeper.altinity.com
|
|
||||||
resources:
|
|
||||||
- clickhousekeeperinstallations/status
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- update
|
|
||||||
- patch
|
|
||||||
- create
|
|
||||||
- delete
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
{{- if .Values.rbac.create -}}
|
|
||||||
# Specifies either
|
|
||||||
# ClusterRoleBinding between ClusterRole and ServiceAccount.
|
|
||||||
# or
|
|
||||||
# RoleBinding between Role and ServiceAccount.
|
|
||||||
# ClusterRoleBinding is namespace-less and must have unique name
|
|
||||||
# RoleBinding is namespace-bound
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}
|
|
||||||
#namespace: kube-system
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: {{ include "altinity-clickhouse-operator.serviceAccountName" . }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NAME=etc-clickhouse-operator-confd-files
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=
|
|
||||||
#
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: {{ printf "%s-confd-files" (include "altinity-clickhouse-operator.fullname" .) }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.confdFiles) | nindent 2 }}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NAME=etc-clickhouse-operator-configd-files
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=
|
|
||||||
#
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: {{ printf "%s-configd-files" (include "altinity-clickhouse-operator.fullname" .) }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.configdFiles) | nindent 2 }}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NAME=etc-clickhouse-operator-files
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=
|
|
||||||
#
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: {{ printf "%s-files" (include "altinity-clickhouse-operator.fullname" .) }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.files) | nindent 2 }}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NAME=etc-clickhouse-operator-templatesd-files
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=
|
|
||||||
#
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: {{ printf "%s-templatesd-files" (include "altinity-clickhouse-operator.fullname" .) }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.templatesdFiles) | nindent 2 }}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NAME=etc-clickhouse-operator-usersd-files
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=
|
|
||||||
#
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: {{ printf "%s-usersd-files" (include "altinity-clickhouse-operator.fullname" .) }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.usersdFiles) | nindent 2 }}
|
|
||||||
@@ -1,195 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=
|
|
||||||
# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.4
|
|
||||||
# OPERATOR_IMAGE_PULL_POLICY=Always
|
|
||||||
# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.4
|
|
||||||
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
|
|
||||||
#
|
|
||||||
# Setup Deployment for clickhouse-operator
|
|
||||||
# Deployment would be created in kubectl-specified namespace
|
|
||||||
kind: Deployment
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels: {{ include "altinity-clickhouse-operator.selectorLabels" . | nindent 6 }}
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 8 }}
|
|
||||||
annotations:
|
|
||||||
{{ toYaml .Values.podAnnotations | nindent 8 }}
|
|
||||||
checksum/files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-files.yaml") . | sha256sum }}
|
|
||||||
checksum/confd-files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-confd-files.yaml") . | sha256sum }}
|
|
||||||
checksum/configd-files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-configd-files.yaml") . | sha256sum }}
|
|
||||||
checksum/templatesd-files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-templatesd-files.yaml") . | sha256sum }}
|
|
||||||
checksum/usersd-files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-usersd-files.yaml") . | sha256sum }}
|
|
||||||
spec:
|
|
||||||
serviceAccountName: {{ include "altinity-clickhouse-operator.serviceAccountName" . }}
|
|
||||||
volumes:
|
|
||||||
- name: etc-clickhouse-operator-folder
|
|
||||||
configMap:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}-files
|
|
||||||
- name: etc-clickhouse-operator-confd-folder
|
|
||||||
configMap:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}-confd-files
|
|
||||||
- name: etc-clickhouse-operator-configd-folder
|
|
||||||
configMap:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}-configd-files
|
|
||||||
- name: etc-clickhouse-operator-templatesd-folder
|
|
||||||
configMap:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}-templatesd-files
|
|
||||||
- name: etc-clickhouse-operator-usersd-folder
|
|
||||||
configMap:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}-usersd-files
|
|
||||||
containers:
|
|
||||||
- name: {{ .Chart.Name }}
|
|
||||||
image: {{ .Values.operator.image.repository }}:{{ include "altinity-clickhouse-operator.operator.tag" . }}
|
|
||||||
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: etc-clickhouse-operator-folder
|
|
||||||
mountPath: /etc/clickhouse-operator
|
|
||||||
- name: etc-clickhouse-operator-confd-folder
|
|
||||||
mountPath: /etc/clickhouse-operator/conf.d
|
|
||||||
- name: etc-clickhouse-operator-configd-folder
|
|
||||||
mountPath: /etc/clickhouse-operator/config.d
|
|
||||||
- name: etc-clickhouse-operator-templatesd-folder
|
|
||||||
mountPath: /etc/clickhouse-operator/templates.d
|
|
||||||
- name: etc-clickhouse-operator-usersd-folder
|
|
||||||
mountPath: /etc/clickhouse-operator/users.d
|
|
||||||
env:
|
|
||||||
# Pod-specific
|
|
||||||
# spec.nodeName: ip-172-20-52-62.ec2.internal
|
|
||||||
- name: OPERATOR_POD_NODE_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
# metadata.name: clickhouse-operator-6f87589dbb-ftcsf
|
|
||||||
- name: OPERATOR_POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
# metadata.namespace: kube-system
|
|
||||||
- name: OPERATOR_POD_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
# status.podIP: 100.96.3.2
|
|
||||||
- name: OPERATOR_POD_IP
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: status.podIP
|
|
||||||
# spec.serviceAccount: clickhouse-operator
|
|
||||||
# spec.serviceAccountName: clickhouse-operator
|
|
||||||
- name: OPERATOR_POD_SERVICE_ACCOUNT
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.serviceAccountName
|
|
||||||
# Container-specific
|
|
||||||
- name: OPERATOR_CONTAINER_CPU_REQUEST
|
|
||||||
valueFrom:
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: {{ .Chart.Name }}
|
|
||||||
resource: requests.cpu
|
|
||||||
- name: OPERATOR_CONTAINER_CPU_LIMIT
|
|
||||||
valueFrom:
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: {{ .Chart.Name }}
|
|
||||||
resource: limits.cpu
|
|
||||||
- name: OPERATOR_CONTAINER_MEM_REQUEST
|
|
||||||
valueFrom:
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: {{ .Chart.Name }}
|
|
||||||
resource: requests.memory
|
|
||||||
- name: OPERATOR_CONTAINER_MEM_LIMIT
|
|
||||||
valueFrom:
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: {{ .Chart.Name }}
|
|
||||||
resource: limits.memory
|
|
||||||
{{ with .Values.operator.env }}{{ toYaml . | nindent 12 }}{{ end }}
|
|
||||||
ports:
|
|
||||||
- containerPort: 9999
|
|
||||||
name: metrics
|
|
||||||
resources: {{ toYaml .Values.operator.resources | nindent 12 }}
|
|
||||||
securityContext: {{ toYaml .Values.operator.containerSecurityContext | nindent 12 }}
|
|
||||||
{{ if .Values.metrics.enabled }}
|
|
||||||
- name: metrics-exporter
|
|
||||||
image: {{ .Values.metrics.image.repository }}:{{ include "altinity-clickhouse-operator.metrics.tag" . }}
|
|
||||||
imagePullPolicy: {{ .Values.metrics.image.pullPolicy }}
|
|
||||||
volumeMounts:
|
|
||||||
- name: etc-clickhouse-operator-folder
|
|
||||||
mountPath: /etc/clickhouse-operator
|
|
||||||
- name: etc-clickhouse-operator-confd-folder
|
|
||||||
mountPath: /etc/clickhouse-operator/conf.d
|
|
||||||
- name: etc-clickhouse-operator-configd-folder
|
|
||||||
mountPath: /etc/clickhouse-operator/config.d
|
|
||||||
- name: etc-clickhouse-operator-templatesd-folder
|
|
||||||
mountPath: /etc/clickhouse-operator/templates.d
|
|
||||||
- name: etc-clickhouse-operator-usersd-folder
|
|
||||||
mountPath: /etc/clickhouse-operator/users.d
|
|
||||||
env:
|
|
||||||
# Pod-specific
|
|
||||||
# spec.nodeName: ip-172-20-52-62.ec2.internal
|
|
||||||
- name: OPERATOR_POD_NODE_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.nodeName
|
|
||||||
# metadata.name: clickhouse-operator-6f87589dbb-ftcsf
|
|
||||||
- name: OPERATOR_POD_NAME
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.name
|
|
||||||
# metadata.namespace: kube-system
|
|
||||||
- name: OPERATOR_POD_NAMESPACE
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: metadata.namespace
|
|
||||||
# status.podIP: 100.96.3.2
|
|
||||||
- name: OPERATOR_POD_IP
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: status.podIP
|
|
||||||
# spec.serviceAccount: clickhouse-operator
|
|
||||||
# spec.serviceAccountName: clickhouse-operator
|
|
||||||
- name: OPERATOR_POD_SERVICE_ACCOUNT
|
|
||||||
valueFrom:
|
|
||||||
fieldRef:
|
|
||||||
fieldPath: spec.serviceAccountName
|
|
||||||
# Container-specific
|
|
||||||
- name: OPERATOR_CONTAINER_CPU_REQUEST
|
|
||||||
valueFrom:
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: {{ .Chart.Name }}
|
|
||||||
resource: requests.cpu
|
|
||||||
- name: OPERATOR_CONTAINER_CPU_LIMIT
|
|
||||||
valueFrom:
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: {{ .Chart.Name }}
|
|
||||||
resource: limits.cpu
|
|
||||||
- name: OPERATOR_CONTAINER_MEM_REQUEST
|
|
||||||
valueFrom:
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: {{ .Chart.Name }}
|
|
||||||
resource: requests.memory
|
|
||||||
- name: OPERATOR_CONTAINER_MEM_LIMIT
|
|
||||||
valueFrom:
|
|
||||||
resourceFieldRef:
|
|
||||||
containerName: {{ .Chart.Name }}
|
|
||||||
resource: limits.memory
|
|
||||||
{{ with .Values.metrics.env }}{{ toYaml . | nindent 12 }}{{ end }}
|
|
||||||
ports:
|
|
||||||
- containerPort: 8888
|
|
||||||
name: metrics
|
|
||||||
resources: {{ toYaml .Values.metrics.resources | nindent 12 }}
|
|
||||||
securityContext: {{ toYaml .Values.metrics.containerSecurityContext | nindent 12 }}
|
|
||||||
{{ end }}
|
|
||||||
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
|
|
||||||
nodeSelector: {{ toYaml .Values.nodeSelector | nindent 8 }}
|
|
||||||
affinity: {{ toYaml .Values.affinity | nindent 8 }}
|
|
||||||
tolerations: {{ toYaml .Values.tolerations | nindent 8 }}
|
|
||||||
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
{{- if .Values.secret.create -}}
|
|
||||||
#
|
|
||||||
# Template parameters available:
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=
|
|
||||||
# OPERATOR_VERSION=0.23.4
|
|
||||||
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
|
|
||||||
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
|
|
||||||
#
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.fullname" . }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
type: Opaque
|
|
||||||
data:
|
|
||||||
username: {{ .Values.secret.username | b64enc }}
|
|
||||||
password: {{ .Values.secret.password | b64enc }}
|
|
||||||
{{- end -}}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=
|
|
||||||
#
|
|
||||||
# Setup ClusterIP Service to provide monitoring metrics for Prometheus
|
|
||||||
# Service would be created in kubectl-specified namespace
|
|
||||||
# In order to get access outside of k8s it should be exposed as:
|
|
||||||
# kubectl --namespace prometheus port-forward service/prometheus 9090
|
|
||||||
# and point browser to localhost:9090
|
|
||||||
kind: Service
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: {{ printf "%s-metrics" (include "altinity-clickhouse-operator.fullname" .) }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- port: 8888
|
|
||||||
name: clickhouse-metrics
|
|
||||||
- port: 9999
|
|
||||||
name: operator-metrics
|
|
||||||
selector: {{ include "altinity-clickhouse-operator.selectorLabels" . | nindent 4 }}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
{{- if .Values.serviceAccount.create -}}
|
|
||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# COMMENT=
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# NAME=clickhouse-operator
|
|
||||||
#
|
|
||||||
# Setup ServiceAccount
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: {{ include "altinity-clickhouse-operator.serviceAccountName" . }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
|
|
||||||
|
|
||||||
# Template Parameters:
|
|
||||||
#
|
|
||||||
# NAMESPACE=kube-system
|
|
||||||
# COMMENT=#
|
|
||||||
# ROLE_KIND=ClusterRole
|
|
||||||
# ROLE_NAME=clickhouse-operator-kube-system
|
|
||||||
# ROLE_BINDING_KIND=ClusterRoleBinding
|
|
||||||
# ROLE_BINDING_NAME=clickhouse-operator-kube-system
|
|
||||||
#
|
|
||||||
{{- end -}}
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
{{- if .Values.serviceMonitor.enabled }}
|
|
||||||
apiVersion: monitoring.coreos.com/v1
|
|
||||||
kind: ServiceMonitor
|
|
||||||
metadata:
|
|
||||||
name: {{ printf "%s-clickhouse-metrics" (include "altinity-clickhouse-operator.fullname" .) }}
|
|
||||||
namespace: {{ .Release.Namespace }}
|
|
||||||
labels:
|
|
||||||
{{- include "altinity-clickhouse-operator.labels" . | nindent 4 }}
|
|
||||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
|
||||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
spec:
|
|
||||||
endpoints:
|
|
||||||
- port: clickhouse-metrics # 8888
|
|
||||||
- port: operator-metrics # 9999
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
{{- include "altinity-clickhouse-operator.selectorLabels" . | nindent 6 }}
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,670 +0,0 @@
|
|||||||
operator:
|
|
||||||
image:
|
|
||||||
# operator.image.repository -- image repository
|
|
||||||
repository: altinity/clickhouse-operator
|
|
||||||
# operator.image.tag -- image tag (chart's appVersion value will be used if not set)
|
|
||||||
tag: ""
|
|
||||||
# operator.image.pullPolicy -- image pull policy
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
containerSecurityContext: {}
|
|
||||||
# operator.resources -- custom resource configuration, look `kubectl explain pod.spec.containers.resources` for details
|
|
||||||
resources: {}
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
# operator.env -- additional environment variables for the clickhouse-operator container in deployment
|
|
||||||
# possible format value [{"name": "SAMPLE", "value": "text"}]
|
|
||||||
env: []
|
|
||||||
metrics:
|
|
||||||
enabled: true
|
|
||||||
image:
|
|
||||||
# metrics.image.repository -- image repository
|
|
||||||
repository: altinity/metrics-exporter
|
|
||||||
# metrics.image.tag -- image tag (chart's appVersion value will be used if not set)
|
|
||||||
tag: ""
|
|
||||||
# metrics.image.pullPolicy -- image pull policy
|
|
||||||
pullPolicy: IfNotPresent
|
|
||||||
containerSecurityContext: {}
|
|
||||||
# metrics.resources -- custom resource configuration
|
|
||||||
resources: {}
|
|
||||||
# limits:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 128Mi
|
|
||||||
|
|
||||||
# metrics.env -- additional environment variables for the deployment of metrics-exporter containers
|
|
||||||
# possible format value [{"name": "SAMPLE", "value": "text"}]
|
|
||||||
env: []
|
|
||||||
# imagePullSecrets -- image pull secret for private images in clickhouse-operator pod
|
|
||||||
# possible value format [{"name":"your-secret-name"}]
|
|
||||||
# look `kubectl explain pod.spec.imagePullSecrets` for details
|
|
||||||
imagePullSecrets: []
|
|
||||||
# podLabels -- labels to add to the clickhouse-operator pod
|
|
||||||
podLabels: {}
|
|
||||||
# podAnnotations -- annotations to add to the clickhouse-operator pod, look `kubectl explain pod.spec.annotations` for details
|
|
||||||
podAnnotations:
|
|
||||||
prometheus.io/port: '8888'
|
|
||||||
prometheus.io/scrape: 'true'
|
|
||||||
clickhouse-operator-metrics/port: '9999'
|
|
||||||
clickhouse-operator-metrics/scrape: 'true'
|
|
||||||
# nameOverride -- override name of the chart
|
|
||||||
nameOverride: ""
|
|
||||||
# fullnameOverride -- full name of the chart.
|
|
||||||
fullnameOverride: ""
|
|
||||||
serviceAccount:
|
|
||||||
# serviceAccount.create -- specifies whether a service account should be created
|
|
||||||
create: true
|
|
||||||
# serviceAccount.annotations -- annotations to add to the service account
|
|
||||||
annotations: {}
|
|
||||||
# serviceAccount.name -- the name of the service account to use; if not set and create is true, a name is generated using the fullname template
|
|
||||||
name:
|
|
||||||
rbac:
|
|
||||||
# rbac.create -- specifies whether cluster roles and cluster role bindings should be created
|
|
||||||
create: true
|
|
||||||
secret:
|
|
||||||
# secret.create -- create a secret with operator credentials
|
|
||||||
create: true
|
|
||||||
# secret.username -- operator credentials username
|
|
||||||
username: clickhouse_operator
|
|
||||||
# secret.password -- operator credentials password
|
|
||||||
password: clickhouse_operator_password
|
|
||||||
# nodeSelector -- node for scheduler pod assignment, look `kubectl explain pod.spec.nodeSelector` for details
|
|
||||||
nodeSelector: {}
|
|
||||||
# tolerations -- tolerations for scheduler pod assignment, look `kubectl explain pod.spec.tolerations` for details
|
|
||||||
tolerations: []
|
|
||||||
# affinity -- affinity for scheduler pod assignment, look `kubectl explain pod.spec.affinity` for details
|
|
||||||
affinity: {}
|
|
||||||
# podSecurityContext - operator deployment SecurityContext, look `kubectl explain pod.spec.securityContext` for details
|
|
||||||
podSecurityContext: {}
|
|
||||||
serviceMonitor:
|
|
||||||
# serviceMonitor.enabled -- ServiceMonitor Custom resource is created for a (prometheus-operator)[https://github.com/prometheus-operator/prometheus-operator]
|
|
||||||
enabled: false
|
|
||||||
# serviceMonitor.additionalLabels -- additional labels for service monitor
|
|
||||||
additionalLabels: {}
|
|
||||||
# configs -- clickhouse-operator configs
|
|
||||||
# @default -- check the values.yaml file for the config content, auto-generated from latest operator release
|
|
||||||
configs:
|
|
||||||
confdFiles: null
|
|
||||||
configdFiles:
|
|
||||||
01-clickhouse-01-listen.xml: |
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<!-- This file is auto-generated -->
|
|
||||||
<!-- Do not edit this file - all changes would be lost -->
|
|
||||||
<!-- Edit appropriate template in the following folder: -->
|
|
||||||
<!-- deploy/builder/templates-config -->
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<yandex>
|
|
||||||
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
|
|
||||||
<listen_host>::</listen_host>
|
|
||||||
<listen_host>0.0.0.0</listen_host>
|
|
||||||
<listen_try>1</listen_try>
|
|
||||||
</yandex>
|
|
||||||
01-clickhouse-02-logger.xml: |
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<!-- This file is auto-generated -->
|
|
||||||
<!-- Do not edit this file - all changes would be lost -->
|
|
||||||
<!-- Edit appropriate template in the following folder: -->
|
|
||||||
<!-- deploy/builder/templates-config -->
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<yandex>
|
|
||||||
<logger>
|
|
||||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/devel/Foundation/include/Poco/Logger.h#L439 -->
|
|
||||||
<level>debug</level>
|
|
||||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
|
||||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
|
||||||
<size>1000M</size>
|
|
||||||
<count>10</count>
|
|
||||||
<!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
|
||||||
<console>1</console>
|
|
||||||
</logger>
|
|
||||||
</yandex>
|
|
||||||
01-clickhouse-03-query_log.xml: |
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<!-- This file is auto-generated -->
|
|
||||||
<!-- Do not edit this file - all changes would be lost -->
|
|
||||||
<!-- Edit appropriate template in the following folder: -->
|
|
||||||
<!-- deploy/builder/templates-config -->
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<yandex>
|
|
||||||
<query_log replace="1">
|
|
||||||
<database>system</database>
|
|
||||||
<table>query_log</table>
|
|
||||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
|
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
|
||||||
</query_log>
|
|
||||||
<query_thread_log remove="1"/>
|
|
||||||
</yandex>
|
|
||||||
01-clickhouse-04-part_log.xml: |
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<!-- This file is auto-generated -->
|
|
||||||
<!-- Do not edit this file - all changes would be lost -->
|
|
||||||
<!-- Edit appropriate template in the following folder: -->
|
|
||||||
<!-- deploy/builder/templates-config -->
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<yandex>
|
|
||||||
<part_log replace="1">
|
|
||||||
<database>system</database>
|
|
||||||
<table>part_log</table>
|
|
||||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
|
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
|
||||||
</part_log>
|
|
||||||
</yandex>
|
|
||||||
01-clickhouse-05-trace_log.xml: |-
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<!-- This file is auto-generated -->
|
|
||||||
<!-- Do not edit this file - all changes would be lost -->
|
|
||||||
<!-- Edit appropriate template in the following folder: -->
|
|
||||||
<!-- deploy/builder/templates-config -->
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<yandex>
|
|
||||||
<trace_log replace="1">
|
|
||||||
<database>system</database>
|
|
||||||
<table>trace_log</table>
|
|
||||||
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
|
|
||||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
|
||||||
</trace_log>
|
|
||||||
</yandex>
|
|
||||||
files:
|
|
||||||
config.yaml:
|
|
||||||
# IMPORTANT
|
|
||||||
# This file is auto-generated
|
|
||||||
# Do not edit this file - all changes would be lost
|
|
||||||
# Edit appropriate template in the following folder:
|
|
||||||
# deploy/builder/templates-config
|
|
||||||
# IMPORTANT
|
|
||||||
#
|
|
||||||
# Template parameters available:
|
|
||||||
# WATCH_NAMESPACES=
|
|
||||||
# CH_USERNAME_PLAIN=
|
|
||||||
# CH_PASSWORD_PLAIN=
|
|
||||||
# CH_CREDENTIALS_SECRET_NAMESPACE=
|
|
||||||
# CH_CREDENTIALS_SECRET_NAME=clickhouse-operator
|
|
||||||
# VERBOSITY=1
|
|
||||||
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Watch section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
watch:
|
|
||||||
# List of namespaces where clickhouse-operator watches for events.
|
|
||||||
# Concurrently running operators should watch on different namespaces.
|
|
||||||
# IMPORTANT
|
|
||||||
# Regexp is applicable.
|
|
||||||
#namespaces: ["dev", "test"]
|
|
||||||
namespaces: []
|
|
||||||
clickhouse:
|
|
||||||
configuration:
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Configuration files section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
file:
|
|
||||||
# Each 'path' can be either absolute or relative.
|
|
||||||
# In case path is absolute - it is used as is
|
|
||||||
# In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
|
|
||||||
path:
|
|
||||||
# Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
|
|
||||||
common: config.d
|
|
||||||
# Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
|
|
||||||
host: conf.d
|
|
||||||
# Path to the folder where ClickHouse configuration files with users' settings are located.
|
|
||||||
# Files are common for all instances within a CHI.
|
|
||||||
user: users.d
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Configuration users section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
user:
|
|
||||||
# Default settings for user accounts, created by the operator.
|
|
||||||
# IMPORTANT. These are not access credentials or settings for 'default' user account,
|
|
||||||
# it is a template for filling out missing fields for all user accounts to be created by the operator,
|
|
||||||
# with the following EXCEPTIONS:
|
|
||||||
# 1. 'default' user account DOES NOT use provided password, but uses all the rest of the fields.
|
|
||||||
# Password for 'default' user account has to be provided explicitly, if to be used.
|
|
||||||
# 2. CHOP user account DOES NOT use:
|
|
||||||
# - profile setting. It uses predefined profile called 'clickhouse_operator'
|
|
||||||
# - quota setting. It uses empty quota name.
|
|
||||||
# - networks IP setting. Operator specifies 'networks/ip' user setting to match operators' pod IP only.
|
|
||||||
# - password setting. Password for CHOP account is used from 'clickhouse.access.*' section
|
|
||||||
default:
|
|
||||||
# Default values for ClickHouse user account(s) created by the operator
|
|
||||||
# 1. user/profile - string
|
|
||||||
# 2. user/quota - string
|
|
||||||
# 3. user/networks/ip - multiple strings
|
|
||||||
# 4. user/password - string
|
|
||||||
# These values can be overwritten on per-user basis.
|
|
||||||
profile: "default"
|
|
||||||
quota: "default"
|
|
||||||
networksIP:
|
|
||||||
- "::1"
|
|
||||||
- "127.0.0.1"
|
|
||||||
password: "default"
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Configuration network section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
network:
|
|
||||||
# Default host_regexp to limit network connectivity from outside
|
|
||||||
hostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Configuration restart policy section
|
|
||||||
## Configuration restart policy describes what configuration changes require ClickHouse restart
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
configurationRestartPolicy:
|
|
||||||
rules:
|
|
||||||
# IMPORTANT!
|
|
||||||
# Special version of "*" - default version - has to satisfy all ClickHouse versions.
|
|
||||||
# Default version will also be used in case ClickHouse version is unknown.
|
|
||||||
# ClickHouse version may be unknown due to host being down - for example, because of incorrect "settings" section.
|
|
||||||
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
|
|
||||||
- version: "*"
|
|
||||||
rules:
|
|
||||||
# see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
|
|
||||||
# to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
|
|
||||||
- settings/*: "yes"
|
|
||||||
# single values
|
|
||||||
- settings/access_control_path: "no"
|
|
||||||
- settings/dictionaries_config: "no"
|
|
||||||
- settings/max_server_memory_*: "no"
|
|
||||||
- settings/max_*_to_drop: "no"
|
|
||||||
- settings/max_concurrent_queries: "no"
|
|
||||||
- settings/models_config: "no"
|
|
||||||
- settings/user_defined_executable_functions_config: "no"
|
|
||||||
# structured XML
|
|
||||||
- settings/logger/*: "no"
|
|
||||||
- settings/macros/*: "no"
|
|
||||||
- settings/remote_servers/*: "no"
|
|
||||||
- settings/user_directories/*: "no"
|
|
||||||
- zookeeper/*: "yes"
|
|
||||||
- files/*.xml: "yes"
|
|
||||||
- files/config.d/*.xml: "yes"
|
|
||||||
- files/config.d/*dict*.xml: "no"
|
|
||||||
# exceptions in default profile
|
|
||||||
- profiles/default/background_*_pool_size: "yes"
|
|
||||||
- profiles/default/max_*_for_server: "yes"
|
|
||||||
- version: "21.*"
|
|
||||||
rules:
|
|
||||||
- settings/logger: "yes"
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Access to ClickHouse instances
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
access:
|
|
||||||
# Possible values for 'scheme' are:
|
|
||||||
# 1. http - force http to be used to connect to ClickHouse instances
|
|
||||||
# 2. https - force https to be used to connect to ClickHouse instances
|
|
||||||
# 3. auto - either http or https is selected based on open ports
|
|
||||||
scheme: "auto"
|
|
||||||
# ClickHouse credentials (username, password and port) to be used by the operator to connect to ClickHouse instances.
|
|
||||||
# These credentials are used for:
|
|
||||||
# 1. Metrics requests
|
|
||||||
# 2. Schema maintenance
|
|
||||||
# 3. DROP DNS CACHE
|
|
||||||
# User with these credentials can be specified in additional ClickHouse .xml config files,
|
|
||||||
# located in 'clickhouse.configuration.file.path.user' folder
|
|
||||||
username: ""
|
|
||||||
password: ""
|
|
||||||
rootCA: ""
|
|
||||||
# Location of the k8s Secret with username and password to be used by the operator to connect to ClickHouse instances.
|
|
||||||
# Can be used instead of explicitly specified username and password available in sections:
|
|
||||||
# - clickhouse.access.username
|
|
||||||
# - clickhouse.access.password
|
|
||||||
# Secret should have two keys:
|
|
||||||
# 1. username
|
|
||||||
# 2. password
|
|
||||||
secret:
|
|
||||||
# Empty `namespace` means that k8s secret would be looked in the same namespace where operator's pod is running.
|
|
||||||
namespace: ""
|
|
||||||
# Empty `name` means no k8s Secret would be looked for
|
|
||||||
name: '{{ include "altinity-clickhouse-operator.fullname" . }}'
|
|
||||||
# Port where to connect to ClickHouse instances to
|
|
||||||
port: 8123
|
|
||||||
# Timeouts used to limit connection and queries from the operator to ClickHouse instances
|
|
||||||
# Specified in seconds.
|
|
||||||
timeouts:
|
|
||||||
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
|
|
||||||
connect: 1
|
|
||||||
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
|
|
||||||
query: 4
|
|
||||||
#################################################
|
|
||||||
##
|
|
||||||
## Metrics collection
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
metrics:
|
|
||||||
# Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
|
|
||||||
# Specified in seconds.
|
|
||||||
timeouts:
|
|
||||||
# Timeout used to limit metrics collection request. In seconds.
|
|
||||||
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
|
|
||||||
# All collected metrics are returned.
|
|
||||||
collect: 9
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Template(s) management section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
template:
|
|
||||||
chi:
|
|
||||||
# CHI template updates handling policy
|
|
||||||
# Possible policy values:
|
|
||||||
# - ReadOnStart. Accept CHIT updates on the operators start only.
|
|
||||||
# - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
|
|
||||||
policy: ApplyOnNextReconcile
|
|
||||||
# Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
|
|
||||||
# Templates are added to the list of all templates and used when CHI is reconciled.
|
|
||||||
# Templates are applied in sorted alpha-numeric order.
|
|
||||||
path: templates.d
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Reconcile section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
reconcile:
|
|
||||||
# Reconcile runtime settings
|
|
||||||
runtime:
|
|
||||||
# Max number of concurrent CHI reconciles in progress
|
|
||||||
reconcileCHIsThreadsNumber: 10
|
|
||||||
# The operator reconciles shards concurrently in each CHI with the following limitations:
|
|
||||||
# 1. Number of shards being reconciled (and thus having hosts down) in each CHI concurrently
|
|
||||||
# can not be greater than 'reconcileShardsThreadsNumber'.
|
|
||||||
# 2. Percentage of shards being reconciled (and thus having hosts down) in each CHI concurrently
|
|
||||||
# can not be greater than 'reconcileShardsMaxConcurrencyPercent'.
|
|
||||||
# 3. The first shard is always reconciled alone. Concurrency starts from the second shard and onward.
|
|
||||||
# Thus limiting number of shards being reconciled (and thus having hosts down) in each CHI by both number and percentage
|
|
||||||
|
|
||||||
# Max number of concurrent shard reconciles within one CHI in progress
|
|
||||||
reconcileShardsThreadsNumber: 5
|
|
||||||
# Max percentage of concurrent shard reconciles within one CHI in progress
|
|
||||||
reconcileShardsMaxConcurrencyPercent: 50
|
|
||||||
# Reconcile StatefulSet scenario
|
|
||||||
statefulSet:
|
|
||||||
# Create StatefulSet scenario
|
|
||||||
create:
|
|
||||||
# What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
|
|
||||||
# Possible options:
|
|
||||||
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
|
|
||||||
# do not try to fix or delete or update it, just abort reconcile cycle.
|
|
||||||
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
|
|
||||||
# 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
|
|
||||||
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
|
|
||||||
onFailure: ignore
|
|
||||||
# Update StatefulSet scenario
|
|
||||||
update:
|
|
||||||
# How many seconds to wait for created/updated StatefulSet to be 'Ready'
|
|
||||||
timeout: 300
|
|
||||||
# How many seconds to wait between checks/polls for created/updated StatefulSet status
|
|
||||||
pollInterval: 5
|
|
||||||
# What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
|
|
||||||
# Possible options:
|
|
||||||
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
|
|
||||||
# do not try to fix or delete or update it, just abort reconcile cycle.
|
|
||||||
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
|
|
||||||
# 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
|
|
||||||
# Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration.
|
|
||||||
# Follow 'abort' path afterwards.
|
|
||||||
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
|
|
||||||
onFailure: abort
|
|
||||||
# Reconcile Host scenario
|
|
||||||
host:
|
|
||||||
# Whether the operator during reconcile procedure should wait for a ClickHouse host:
|
|
||||||
# - to be excluded from a ClickHouse cluster
|
|
||||||
# - to complete all running queries
|
|
||||||
# - to be included into a ClickHouse cluster
|
|
||||||
# respectfully before moving forward
|
|
||||||
wait:
|
|
||||||
exclude: true
|
|
||||||
queries: true
|
|
||||||
include: false
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Annotations management section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
annotation:
|
|
||||||
# Applied when:
|
|
||||||
# 1. Propagating annotations from the CHI's `metadata.annotations` to child objects' `metadata.annotations`,
|
|
||||||
# 2. Propagating annotations from the CHI Template's `metadata.annotations` to CHI's `metadata.annotations`,
|
|
||||||
# Include annotations from the following list:
|
|
||||||
# Applied only when not empty. Empty list means "include all, no selection"
|
|
||||||
include: []
|
|
||||||
# Exclude annotations from the following list:
|
|
||||||
exclude: []
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Labels management section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
label:
|
|
||||||
# Applied when:
|
|
||||||
# 1. Propagating labels from the CHI's `metadata.labels` to child objects' `metadata.labels`,
|
|
||||||
# 2. Propagating labels from the CHI Template's `metadata.labels` to CHI's `metadata.labels`,
|
|
||||||
# Include labels from the following list:
|
|
||||||
# Applied only when not empty. Empty list means "include all, no selection"
|
|
||||||
include: []
|
|
||||||
# Exclude labels from the following list:
|
|
||||||
# Applied only when not empty. Empty list means "nothing to exclude, no selection"
|
|
||||||
exclude: []
|
|
||||||
# Whether to append *Scope* labels to StatefulSet and Pod.
|
|
||||||
# Full list of available *scope* labels check in 'labeler.go'
|
|
||||||
# LabelShardScopeIndex
|
|
||||||
# LabelReplicaScopeIndex
|
|
||||||
# LabelCHIScopeIndex
|
|
||||||
# LabelCHIScopeCycleSize
|
|
||||||
# LabelCHIScopeCycleIndex
|
|
||||||
# LabelCHIScopeCycleOffset
|
|
||||||
# LabelClusterScopeIndex
|
|
||||||
# LabelClusterScopeCycleSize
|
|
||||||
# LabelClusterScopeCycleIndex
|
|
||||||
# LabelClusterScopeCycleOffset
|
|
||||||
appendScope: "no"
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## StatefulSet management section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
statefulSet:
|
|
||||||
revisionHistoryLimit: 0
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Pod management section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
pod:
|
|
||||||
# Grace period for Pod termination.
|
|
||||||
# How many seconds to wait between sending
|
|
||||||
# SIGTERM and SIGKILL during Pod termination process.
|
|
||||||
# Increase this number is case of slow shutdown.
|
|
||||||
terminationGracePeriod: 30
|
|
||||||
################################################
|
|
||||||
##
|
|
||||||
## Log parameters section
|
|
||||||
##
|
|
||||||
################################################
|
|
||||||
logger:
|
|
||||||
logtostderr: "true"
|
|
||||||
alsologtostderr: "false"
|
|
||||||
v: "1"
|
|
||||||
stderrthreshold: ""
|
|
||||||
vmodule: ""
|
|
||||||
log_backtrace_at: ""
|
|
||||||
templatesdFiles:
|
|
||||||
001-templates.json.example: |
|
|
||||||
{
|
|
||||||
"apiVersion": "clickhouse.altinity.com/v1",
|
|
||||||
"kind": "ClickHouseInstallationTemplate",
|
|
||||||
"metadata": {
|
|
||||||
"name": "01-default-volumeclaimtemplate"
|
|
||||||
},
|
|
||||||
"spec": {
|
|
||||||
"templates": {
|
|
||||||
"volumeClaimTemplates": [
|
|
||||||
{
|
|
||||||
"name": "chi-default-volume-claim-template",
|
|
||||||
"spec": {
|
|
||||||
"accessModes": [
|
|
||||||
"ReadWriteOnce"
|
|
||||||
],
|
|
||||||
"resources": {
|
|
||||||
"requests": {
|
|
||||||
"storage": "2Gi"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"podTemplates": [
|
|
||||||
{
|
|
||||||
"name": "chi-default-oneperhost-pod-template",
|
|
||||||
"distribution": "OnePerHost",
|
|
||||||
"spec": {
|
|
||||||
"containers" : [
|
|
||||||
{
|
|
||||||
"name": "clickhouse",
|
|
||||||
"image": "clickhouse/clickhouse-server:23.8",
|
|
||||||
"ports": [
|
|
||||||
{
|
|
||||||
"name": "http",
|
|
||||||
"containerPort": 8123
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "client",
|
|
||||||
"containerPort": 9000
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "interserver",
|
|
||||||
"containerPort": 9009
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default-pod-template.yaml.example: |
|
|
||||||
apiVersion: "clickhouse.altinity.com/v1"
|
|
||||||
kind: "ClickHouseInstallationTemplate"
|
|
||||||
metadata:
|
|
||||||
name: "default-oneperhost-pod-template"
|
|
||||||
spec:
|
|
||||||
templates:
|
|
||||||
podTemplates:
|
|
||||||
- name: default-oneperhost-pod-template
|
|
||||||
distribution: "OnePerHost"
|
|
||||||
default-storage-template.yaml.example: |
|
|
||||||
apiVersion: "clickhouse.altinity.com/v1"
|
|
||||||
kind: "ClickHouseInstallationTemplate"
|
|
||||||
metadata:
|
|
||||||
name: "default-storage-template-2Gi"
|
|
||||||
spec:
|
|
||||||
templates:
|
|
||||||
volumeClaimTemplates:
|
|
||||||
- name: default-storage-template-2Gi
|
|
||||||
spec:
|
|
||||||
accessModes:
|
|
||||||
- ReadWriteOnce
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
storage: 2Gi
|
|
||||||
readme: |-
|
|
||||||
Templates in this folder are packaged with an operator and available via 'useTemplate'
|
|
||||||
usersdFiles:
|
|
||||||
01-clickhouse-operator-profile.xml: |
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<!-- This file is auto-generated -->
|
|
||||||
<!-- Do not edit this file - all changes would be lost -->
|
|
||||||
<!-- Edit appropriate template in the following folder: -->
|
|
||||||
<!-- deploy/builder/templates-config -->
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<!--
|
|
||||||
#
|
|
||||||
# Template parameters available:
|
|
||||||
#
|
|
||||||
-->
|
|
||||||
<yandex>
|
|
||||||
<!-- clickhouse-operator user is generated by the operator based on config.yaml in runtime -->
|
|
||||||
<profiles>
|
|
||||||
<clickhouse_operator>
|
|
||||||
<log_queries>0</log_queries>
|
|
||||||
<skip_unavailable_shards>1</skip_unavailable_shards>
|
|
||||||
<http_connection_timeout>10</http_connection_timeout>
|
|
||||||
<max_concurrent_queries_for_all_users>0</max_concurrent_queries_for_all_users>
|
|
||||||
<os_thread_priority>0</os_thread_priority>
|
|
||||||
</clickhouse_operator>
|
|
||||||
</profiles>
|
|
||||||
</yandex>
|
|
||||||
02-clickhouse-default-profile.xml: |-
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<!-- This file is auto-generated -->
|
|
||||||
<!-- Do not edit this file - all changes would be lost -->
|
|
||||||
<!-- Edit appropriate template in the following folder: -->
|
|
||||||
<!-- deploy/builder/templates-config -->
|
|
||||||
<!-- IMPORTANT -->
|
|
||||||
<yandex>
|
|
||||||
<profiles>
|
|
||||||
<default>
|
|
||||||
<os_thread_priority>2</os_thread_priority>
|
|
||||||
<log_queries>1</log_queries>
|
|
||||||
<connect_timeout_with_failover_ms>1000</connect_timeout_with_failover_ms>
|
|
||||||
<distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
|
|
||||||
<parallel_view_processing>1</parallel_view_processing>
|
|
||||||
<do_not_merge_across_partitions_select_final>1</do_not_merge_across_partitions_select_final>
|
|
||||||
<load_balancing>nearest_hostname</load_balancing>
|
|
||||||
<prefer_localhost_replica>0</prefer_localhost_replica>
|
|
||||||
<!-- materialize_ttl_recalculate_only>1</materialize_ttl_recalculate_only> 21.10 and above -->
|
|
||||||
</default>
|
|
||||||
</profiles>
|
|
||||||
</yandex>
|
|
||||||
# additionalResources -- list of additional resources to create (are processed via `tpl` function), useful for create ClickHouse clusters together with clickhouse-operator, look `kubectl explain chi` for details
|
|
||||||
additionalResources: []
|
|
||||||
# - |
|
|
||||||
# apiVersion: v1
|
|
||||||
# kind: ConfigMap
|
|
||||||
# metadata:
|
|
||||||
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-cm
|
|
||||||
# namespace: {{ .Release.Namespace }}
|
|
||||||
# - |
|
|
||||||
# apiVersion: v1
|
|
||||||
# kind: Secret
|
|
||||||
# metadata:
|
|
||||||
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-s
|
|
||||||
# namespace: {{ .Release.Namespace }}
|
|
||||||
# stringData:
|
|
||||||
# mykey: my-value
|
|
||||||
# - |
|
|
||||||
# apiVersion: clickhouse.altinity.com/v1
|
|
||||||
# kind: ClickHouseInstallation
|
|
||||||
# metadata:
|
|
||||||
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-chi
|
|
||||||
# namespace: {{ .Release.Namespace }}
|
|
||||||
# spec:
|
|
||||||
# configuration:
|
|
||||||
# clusters:
|
|
||||||
# - name: default
|
|
||||||
# layout:
|
|
||||||
# shardsCount: 1
|
|
||||||
dashboards:
|
|
||||||
# dashboards.enabled -- provision grafana dashboards as secrets (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-6.33.1/charts/grafana/values.yaml#L679 )
|
|
||||||
enabled: false
|
|
||||||
# dashboards.additionalLabels -- labels to add to a secret with dashboards
|
|
||||||
additionalLabels:
|
|
||||||
grafana_dashboard: ""
|
|
||||||
# dashboards.annotations -- annotations to add to a secret with dashboards
|
|
||||||
annotations: {}
|
|
||||||
grafana_folder: clickhouse
|
|
||||||
@@ -1,2 +1,2 @@
|
|||||||
name: cozy-dashboard
|
name: cozy-dashboard
|
||||||
version: 0.2.0
|
version: 1.0.0
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ NAMESPACE=cozy-dashboard
|
|||||||
PUSH := 1
|
PUSH := 1
|
||||||
LOAD := 0
|
LOAD := 0
|
||||||
REPOSITORY := ghcr.io/aenix-io/cozystack
|
REPOSITORY := ghcr.io/aenix-io/cozystack
|
||||||
TAG := v0.2.0
|
TAG := v0.1.0
|
||||||
|
|
||||||
show:
|
show:
|
||||||
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
|
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
|
||||||
|
|||||||
@@ -22,5 +22,3 @@
|
|||||||
.project
|
.project
|
||||||
.idea/
|
.idea/
|
||||||
*.tmproj
|
*.tmproj
|
||||||
# img folder
|
|
||||||
img/
|
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
dependencies:
|
dependencies:
|
||||||
- name: redis
|
- name: redis
|
||||||
repository: oci://registry-1.docker.io/bitnamicharts
|
repository: oci://registry-1.docker.io/bitnamicharts
|
||||||
version: 18.19.2
|
version: 18.4.0
|
||||||
- name: postgresql
|
- name: postgresql
|
||||||
repository: oci://registry-1.docker.io/bitnamicharts
|
repository: oci://registry-1.docker.io/bitnamicharts
|
||||||
version: 13.4.6
|
version: 13.2.14
|
||||||
- name: common
|
- name: common
|
||||||
repository: oci://registry-1.docker.io/bitnamicharts
|
repository: oci://registry-1.docker.io/bitnamicharts
|
||||||
version: 2.19.0
|
version: 2.13.3
|
||||||
digest: sha256:b4965a22517e61212e78abb8d1cbe86e800c8664b3139e2047f4bd62b3e55b24
|
digest: sha256:7bede05a463745ea72d332aaaf406d84e335d8af09dce403736f4e4e14c3554d
|
||||||
generated: "2024-03-13T11:51:34.216594+01:00"
|
generated: "2023-11-21T18:18:20.024990735Z"
|
||||||
|
|||||||
@@ -2,21 +2,21 @@ annotations:
|
|||||||
category: Infrastructure
|
category: Infrastructure
|
||||||
images: |
|
images: |
|
||||||
- name: kubeapps-apis
|
- name: kubeapps-apis
|
||||||
image: docker.io/bitnami/kubeapps-apis:2.9.0-debian-12-r19
|
image: docker.io/bitnami/kubeapps-apis:2.9.0-debian-11-r13
|
||||||
- name: kubeapps-apprepository-controller
|
- name: kubeapps-apprepository-controller
|
||||||
image: docker.io/bitnami/kubeapps-apprepository-controller:2.9.0-debian-12-r18
|
image: docker.io/bitnami/kubeapps-apprepository-controller:2.9.0-debian-11-r12
|
||||||
- name: kubeapps-asset-syncer
|
- name: kubeapps-asset-syncer
|
||||||
image: docker.io/bitnami/kubeapps-asset-syncer:2.9.0-debian-12-r19
|
image: docker.io/bitnami/kubeapps-asset-syncer:2.9.0-debian-11-r13
|
||||||
- name: kubeapps-dashboard
|
|
||||||
image: docker.io/bitnami/kubeapps-dashboard:2.9.0-debian-12-r18
|
|
||||||
- name: kubeapps-oci-catalog
|
- name: kubeapps-oci-catalog
|
||||||
image: docker.io/bitnami/kubeapps-oci-catalog:2.9.0-debian-12-r17
|
image: docker.io/bitnami/kubeapps-oci-catalog:2.9.0-debian-11-r6
|
||||||
- name: kubeapps-pinniped-proxy
|
- name: kubeapps-pinniped-proxy
|
||||||
image: docker.io/bitnami/kubeapps-pinniped-proxy:2.9.0-debian-12-r17
|
image: docker.io/bitnami/kubeapps-pinniped-proxy:2.9.0-debian-11-r10
|
||||||
|
- name: kubeapps-dashboard
|
||||||
|
image: docker.io/bitnami/kubeapps-dashboard:2.9.0-debian-11-r16
|
||||||
- name: nginx
|
- name: nginx
|
||||||
image: docker.io/bitnami/nginx:1.25.4-debian-12-r3
|
image: docker.io/bitnami/nginx:1.25.3-debian-11-r1
|
||||||
- name: oauth2-proxy
|
- name: oauth2-proxy
|
||||||
image: docker.io/bitnami/oauth2-proxy:7.6.0-debian-12-r4
|
image: docker.io/bitnami/oauth2-proxy:7.5.1-debian-11-r11
|
||||||
licenses: Apache-2.0
|
licenses: Apache-2.0
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 2.9.0
|
appVersion: 2.9.0
|
||||||
@@ -51,4 +51,4 @@ maintainers:
|
|||||||
name: kubeapps
|
name: kubeapps
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/bitnami/charts/tree/main/bitnami/kubeapps
|
- https://github.com/bitnami/charts/tree/main/bitnami/kubeapps
|
||||||
version: 14.7.2
|
version: 14.1.2
|
||||||
|
|||||||
@@ -62,11 +62,10 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
### Global parameters
|
### Global parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
|
| ------------------------- | ----------------------------------------------- | ----- |
|
||||||
| `global.imageRegistry` | Global Docker image registry | `""` |
|
| `global.imageRegistry` | Global Docker image registry | `""` |
|
||||||
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
|
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
|
||||||
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
|
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
|
||||||
| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `disabled` |
|
|
||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
@@ -113,7 +112,7 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
### Frontend parameters
|
### Frontend parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------- |
|
| ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------- | ----------------------- |
|
||||||
| `frontend.image.registry` | NGINX image registry | `REGISTRY_NAME` |
|
| `frontend.image.registry` | NGINX image registry | `REGISTRY_NAME` |
|
||||||
| `frontend.image.repository` | NGINX image repository | `REPOSITORY_NAME/nginx` |
|
| `frontend.image.repository` | NGINX image repository | `REPOSITORY_NAME/nginx` |
|
||||||
| `frontend.image.digest` | NGINX image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
| `frontend.image.digest` | NGINX image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||||
@@ -125,7 +124,6 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `frontend.largeClientHeaderBuffers` | Set large_client_header_buffers in NGINX config | `4 32k` |
|
| `frontend.largeClientHeaderBuffers` | Set large_client_header_buffers in NGINX config | `4 32k` |
|
||||||
| `frontend.replicaCount` | Number of frontend replicas to deploy | `2` |
|
| `frontend.replicaCount` | Number of frontend replicas to deploy | `2` |
|
||||||
| `frontend.updateStrategy.type` | Frontend deployment strategy type. | `RollingUpdate` |
|
| `frontend.updateStrategy.type` | Frontend deployment strategy type. | `RollingUpdate` |
|
||||||
| `frontend.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if frontend.resources is set (frontend.resources is recommended for production). | `none` |
|
|
||||||
| `frontend.resources.limits.cpu` | The CPU limits for the NGINX container | `250m` |
|
| `frontend.resources.limits.cpu` | The CPU limits for the NGINX container | `250m` |
|
||||||
| `frontend.resources.limits.memory` | The memory limits for the NGINX container | `128Mi` |
|
| `frontend.resources.limits.memory` | The memory limits for the NGINX container | `128Mi` |
|
||||||
| `frontend.resources.requests.cpu` | The requested CPU for the NGINX container | `25m` |
|
| `frontend.resources.requests.cpu` | The requested CPU for the NGINX container | `25m` |
|
||||||
@@ -135,14 +133,9 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `frontend.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for the NGINX container | `""` |
|
| `frontend.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for the NGINX container | `""` |
|
||||||
| `frontend.containerPorts.http` | NGINX HTTP container port | `8080` |
|
| `frontend.containerPorts.http` | NGINX HTTP container port | `8080` |
|
||||||
| `frontend.podSecurityContext.enabled` | Enabled frontend pods' Security Context | `true` |
|
| `frontend.podSecurityContext.enabled` | Enabled frontend pods' Security Context | `true` |
|
||||||
| `frontend.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
|
|
||||||
| `frontend.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
|
|
||||||
| `frontend.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
|
|
||||||
| `frontend.podSecurityContext.fsGroup` | Set frontend pod's Security Context fsGroup | `1001` |
|
| `frontend.podSecurityContext.fsGroup` | Set frontend pod's Security Context fsGroup | `1001` |
|
||||||
| `frontend.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
| `frontend.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
||||||
| `frontend.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `frontend.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
| `frontend.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
||||||
| `frontend.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` |
|
|
||||||
| `frontend.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
| `frontend.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
||||||
| `frontend.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
| `frontend.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
||||||
| `frontend.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
| `frontend.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
||||||
@@ -186,7 +179,6 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `frontend.priorityClassName` | Priority class name for frontend pods | `""` |
|
| `frontend.priorityClassName` | Priority class name for frontend pods | `""` |
|
||||||
| `frontend.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
|
| `frontend.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
|
||||||
| `frontend.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
|
| `frontend.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
|
||||||
| `frontend.automountServiceAccountToken` | Mount Service Account token in pod | `true` |
|
|
||||||
| `frontend.hostAliases` | Custom host aliases for frontend pods | `[]` |
|
| `frontend.hostAliases` | Custom host aliases for frontend pods | `[]` |
|
||||||
| `frontend.extraVolumes` | Optionally specify extra list of additional volumes for frontend pods | `[]` |
|
| `frontend.extraVolumes` | Optionally specify extra list of additional volumes for frontend pods | `[]` |
|
||||||
| `frontend.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for frontend container(s) | `[]` |
|
| `frontend.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for frontend container(s) | `[]` |
|
||||||
@@ -207,7 +199,7 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
### Dashboard parameters
|
### Dashboard parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------ |
|
| ------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------------------ |
|
||||||
| `dashboard.enabled` | Specifies whether Kubeapps Dashboard should be deployed or not | `true` |
|
| `dashboard.enabled` | Specifies whether Kubeapps Dashboard should be deployed or not | `true` |
|
||||||
| `dashboard.image.registry` | Dashboard image registry | `REGISTRY_NAME` |
|
| `dashboard.image.registry` | Dashboard image registry | `REGISTRY_NAME` |
|
||||||
| `dashboard.image.repository` | Dashboard image repository | `REPOSITORY_NAME/kubeapps-dashboard` |
|
| `dashboard.image.repository` | Dashboard image repository | `REPOSITORY_NAME/kubeapps-dashboard` |
|
||||||
@@ -229,20 +221,14 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `dashboard.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for the Dashboard container | `""` |
|
| `dashboard.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for the Dashboard container | `""` |
|
||||||
| `dashboard.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for the Dashboard container | `""` |
|
| `dashboard.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for the Dashboard container | `""` |
|
||||||
| `dashboard.containerPorts.http` | Dashboard HTTP container port | `8080` |
|
| `dashboard.containerPorts.http` | Dashboard HTTP container port | `8080` |
|
||||||
| `dashboard.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if dashboard.resources is set (dashboard.resources is recommended for production). | `none` |
|
|
||||||
| `dashboard.resources.limits.cpu` | The CPU limits for the Dashboard container | `250m` |
|
| `dashboard.resources.limits.cpu` | The CPU limits for the Dashboard container | `250m` |
|
||||||
| `dashboard.resources.limits.memory` | The memory limits for the Dashboard container | `128Mi` |
|
| `dashboard.resources.limits.memory` | The memory limits for the Dashboard container | `128Mi` |
|
||||||
| `dashboard.resources.requests.cpu` | The requested CPU for the Dashboard container | `25m` |
|
| `dashboard.resources.requests.cpu` | The requested CPU for the Dashboard container | `25m` |
|
||||||
| `dashboard.resources.requests.memory` | The requested memory for the Dashboard container | `32Mi` |
|
| `dashboard.resources.requests.memory` | The requested memory for the Dashboard container | `32Mi` |
|
||||||
| `dashboard.podSecurityContext.enabled` | Enabled Dashboard pods' Security Context | `true` |
|
| `dashboard.podSecurityContext.enabled` | Enabled Dashboard pods' Security Context | `true` |
|
||||||
| `dashboard.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
|
|
||||||
| `dashboard.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
|
|
||||||
| `dashboard.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
|
|
||||||
| `dashboard.podSecurityContext.fsGroup` | Set Dashboard pod's Security Context fsGroup | `1001` |
|
| `dashboard.podSecurityContext.fsGroup` | Set Dashboard pod's Security Context fsGroup | `1001` |
|
||||||
| `dashboard.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
| `dashboard.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
||||||
| `dashboard.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `dashboard.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
| `dashboard.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
||||||
| `dashboard.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` |
|
|
||||||
| `dashboard.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
| `dashboard.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
||||||
| `dashboard.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
| `dashboard.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
||||||
| `dashboard.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
| `dashboard.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
||||||
@@ -286,7 +272,6 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `dashboard.priorityClassName` | Priority class name for Dashboard pods | `""` |
|
| `dashboard.priorityClassName` | Priority class name for Dashboard pods | `""` |
|
||||||
| `dashboard.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
|
| `dashboard.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
|
||||||
| `dashboard.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
|
| `dashboard.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
|
||||||
| `dashboard.automountServiceAccountToken` | Mount Service Account token in pod | `true` |
|
|
||||||
| `dashboard.hostAliases` | Custom host aliases for Dashboard pods | `[]` |
|
| `dashboard.hostAliases` | Custom host aliases for Dashboard pods | `[]` |
|
||||||
| `dashboard.extraVolumes` | Optionally specify extra list of additional volumes for Dashboard pods | `[]` |
|
| `dashboard.extraVolumes` | Optionally specify extra list of additional volumes for Dashboard pods | `[]` |
|
||||||
| `dashboard.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Dashboard container(s) | `[]` |
|
| `dashboard.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for Dashboard container(s) | `[]` |
|
||||||
@@ -298,7 +283,7 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
### AppRepository Controller parameters
|
### AppRepository Controller parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------- |
|
| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------- |
|
||||||
| `apprepository.image.registry` | Kubeapps AppRepository Controller image registry | `REGISTRY_NAME` |
|
| `apprepository.image.registry` | Kubeapps AppRepository Controller image registry | `REGISTRY_NAME` |
|
||||||
| `apprepository.image.repository` | Kubeapps AppRepository Controller image repository | `REPOSITORY_NAME/kubeapps-apprepository-controller` |
|
| `apprepository.image.repository` | Kubeapps AppRepository Controller image repository | `REPOSITORY_NAME/kubeapps-apprepository-controller` |
|
||||||
| `apprepository.image.digest` | Kubeapps AppRepository Controller image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
| `apprepository.image.digest` | Kubeapps AppRepository Controller image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||||
@@ -322,20 +307,14 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `apprepository.extraFlags` | Additional command line flags for AppRepository Controller | `[]` |
|
| `apprepository.extraFlags` | Additional command line flags for AppRepository Controller | `[]` |
|
||||||
| `apprepository.replicaCount` | Number of AppRepository Controller replicas to deploy | `1` |
|
| `apprepository.replicaCount` | Number of AppRepository Controller replicas to deploy | `1` |
|
||||||
| `apprepository.updateStrategy.type` | AppRepository Controller deployment strategy type. | `RollingUpdate` |
|
| `apprepository.updateStrategy.type` | AppRepository Controller deployment strategy type. | `RollingUpdate` |
|
||||||
| `apprepository.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if apprepository.resources is set (apprepository.resources is recommended for production). | `none` |
|
|
||||||
| `apprepository.resources.limits.cpu` | The CPU limits for the AppRepository Controller container | `250m` |
|
| `apprepository.resources.limits.cpu` | The CPU limits for the AppRepository Controller container | `250m` |
|
||||||
| `apprepository.resources.limits.memory` | The memory limits for the AppRepository Controller container | `128Mi` |
|
| `apprepository.resources.limits.memory` | The memory limits for the AppRepository Controller container | `128Mi` |
|
||||||
| `apprepository.resources.requests.cpu` | The requested CPU for the AppRepository Controller container | `25m` |
|
| `apprepository.resources.requests.cpu` | The requested CPU for the AppRepository Controller container | `25m` |
|
||||||
| `apprepository.resources.requests.memory` | The requested memory for the AppRepository Controller container | `32Mi` |
|
| `apprepository.resources.requests.memory` | The requested memory for the AppRepository Controller container | `32Mi` |
|
||||||
| `apprepository.podSecurityContext.enabled` | Enabled AppRepository Controller pods' Security Context | `true` |
|
| `apprepository.podSecurityContext.enabled` | Enabled AppRepository Controller pods' Security Context | `true` |
|
||||||
| `apprepository.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
|
|
||||||
| `apprepository.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
|
|
||||||
| `apprepository.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
|
|
||||||
| `apprepository.podSecurityContext.fsGroup` | Set AppRepository Controller pod's Security Context fsGroup | `1001` |
|
| `apprepository.podSecurityContext.fsGroup` | Set AppRepository Controller pod's Security Context fsGroup | `1001` |
|
||||||
| `apprepository.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
| `apprepository.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
||||||
| `apprepository.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `apprepository.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
| `apprepository.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
||||||
| `apprepository.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` |
|
|
||||||
| `apprepository.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
| `apprepository.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
||||||
| `apprepository.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
| `apprepository.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
||||||
| `apprepository.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
| `apprepository.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
||||||
@@ -363,19 +342,18 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `apprepository.priorityClassName` | Priority class name for AppRepository Controller pods | `""` |
|
| `apprepository.priorityClassName` | Priority class name for AppRepository Controller pods | `""` |
|
||||||
| `apprepository.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
|
| `apprepository.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
|
||||||
| `apprepository.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
|
| `apprepository.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
|
||||||
| `apprepository.automountServiceAccountToken` | Mount Service Account token in pod | `true` |
|
|
||||||
| `apprepository.hostAliases` | Custom host aliases for AppRepository Controller pods | `[]` |
|
| `apprepository.hostAliases` | Custom host aliases for AppRepository Controller pods | `[]` |
|
||||||
| `apprepository.sidecars` | Add additional sidecar containers to the AppRepository Controller pod(s) | `[]` |
|
| `apprepository.sidecars` | Add additional sidecar containers to the AppRepository Controller pod(s) | `[]` |
|
||||||
| `apprepository.initContainers` | Add additional init containers to the AppRepository Controller pod(s) | `[]` |
|
| `apprepository.initContainers` | Add additional init containers to the AppRepository Controller pod(s) | `[]` |
|
||||||
| `apprepository.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
| `apprepository.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
||||||
| `apprepository.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` |
|
| `apprepository.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` |
|
||||||
| `apprepository.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` |
|
| `apprepository.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
|
||||||
| `apprepository.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
|
| `apprepository.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
|
||||||
|
|
||||||
### Auth Proxy parameters
|
### Auth Proxy parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------ |
|
| ------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ |
|
||||||
| `authProxy.enabled` | Specifies whether Kubeapps should configure OAuth login/logout | `false` |
|
| `authProxy.enabled` | Specifies whether Kubeapps should configure OAuth login/logout | `false` |
|
||||||
| `authProxy.image.registry` | OAuth2 Proxy image registry | `REGISTRY_NAME` |
|
| `authProxy.image.registry` | OAuth2 Proxy image registry | `REGISTRY_NAME` |
|
||||||
| `authProxy.image.repository` | OAuth2 Proxy image repository | `REPOSITORY_NAME/oauth2-proxy` |
|
| `authProxy.image.repository` | OAuth2 Proxy image repository | `REPOSITORY_NAME/oauth2-proxy` |
|
||||||
@@ -404,16 +382,13 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `authProxy.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Auth Proxy container(s) | `[]` |
|
| `authProxy.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Auth Proxy container(s) | `[]` |
|
||||||
| `authProxy.containerPorts.proxy` | Auth Proxy HTTP container port | `3000` |
|
| `authProxy.containerPorts.proxy` | Auth Proxy HTTP container port | `3000` |
|
||||||
| `authProxy.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
| `authProxy.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
||||||
| `authProxy.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `authProxy.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
| `authProxy.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
||||||
| `authProxy.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` |
|
|
||||||
| `authProxy.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
| `authProxy.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
||||||
| `authProxy.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
| `authProxy.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
||||||
| `authProxy.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
| `authProxy.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
||||||
| `authProxy.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
|
| `authProxy.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
|
||||||
| `authProxy.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
|
| `authProxy.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
|
||||||
| `authProxy.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
|
| `authProxy.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
|
||||||
| `authProxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if authProxy.resources is set (authProxy.resources is recommended for production). | `none` |
|
|
||||||
| `authProxy.resources.limits.cpu` | The CPU limits for the OAuth2 Proxy container | `250m` |
|
| `authProxy.resources.limits.cpu` | The CPU limits for the OAuth2 Proxy container | `250m` |
|
||||||
| `authProxy.resources.limits.memory` | The memory limits for the OAuth2 Proxy container | `128Mi` |
|
| `authProxy.resources.limits.memory` | The memory limits for the OAuth2 Proxy container | `128Mi` |
|
||||||
| `authProxy.resources.requests.cpu` | The requested CPU for the OAuth2 Proxy container | `25m` |
|
| `authProxy.resources.requests.cpu` | The requested CPU for the OAuth2 Proxy container | `25m` |
|
||||||
@@ -422,7 +397,7 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
### Pinniped Proxy parameters
|
### Pinniped Proxy parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
|
| ----------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------- | ----------------------------------------- |
|
||||||
| `pinnipedProxy.enabled` | Specifies whether Kubeapps should configure Pinniped Proxy | `false` |
|
| `pinnipedProxy.enabled` | Specifies whether Kubeapps should configure Pinniped Proxy | `false` |
|
||||||
| `pinnipedProxy.image.registry` | Pinniped Proxy image registry | `REGISTRY_NAME` |
|
| `pinnipedProxy.image.registry` | Pinniped Proxy image registry | `REGISTRY_NAME` |
|
||||||
| `pinnipedProxy.image.repository` | Pinniped Proxy image repository | `REPOSITORY_NAME/kubeapps-pinniped-proxy` |
|
| `pinnipedProxy.image.repository` | Pinniped Proxy image repository | `REPOSITORY_NAME/kubeapps-pinniped-proxy` |
|
||||||
@@ -444,16 +419,13 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `pinnipedProxy.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Pinniped Proxy container(s) | `[]` |
|
| `pinnipedProxy.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Pinniped Proxy container(s) | `[]` |
|
||||||
| `pinnipedProxy.containerPorts.pinnipedProxy` | Pinniped Proxy container port | `3333` |
|
| `pinnipedProxy.containerPorts.pinnipedProxy` | Pinniped Proxy container port | `3333` |
|
||||||
| `pinnipedProxy.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
| `pinnipedProxy.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
||||||
| `pinnipedProxy.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `pinnipedProxy.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
| `pinnipedProxy.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
||||||
| `pinnipedProxy.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` |
|
|
||||||
| `pinnipedProxy.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
| `pinnipedProxy.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
||||||
| `pinnipedProxy.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
| `pinnipedProxy.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
||||||
| `pinnipedProxy.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
| `pinnipedProxy.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
||||||
| `pinnipedProxy.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
|
| `pinnipedProxy.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` |
|
||||||
| `pinnipedProxy.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
|
| `pinnipedProxy.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` |
|
||||||
| `pinnipedProxy.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
|
| `pinnipedProxy.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` |
|
||||||
| `pinnipedProxy.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if pinnipedProxy.resources is set (pinnipedProxy.resources is recommended for production). | `none` |
|
|
||||||
| `pinnipedProxy.resources.limits.cpu` | The CPU limits for the Pinniped Proxy container | `250m` |
|
| `pinnipedProxy.resources.limits.cpu` | The CPU limits for the Pinniped Proxy container | `250m` |
|
||||||
| `pinnipedProxy.resources.limits.memory` | The memory limits for the Pinniped Proxy container | `128Mi` |
|
| `pinnipedProxy.resources.limits.memory` | The memory limits for the Pinniped Proxy container | `128Mi` |
|
||||||
| `pinnipedProxy.resources.requests.cpu` | The requested CPU for the Pinniped Proxy container | `25m` |
|
| `pinnipedProxy.resources.requests.cpu` | The requested CPU for the Pinniped Proxy container | `25m` |
|
||||||
@@ -480,7 +452,7 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
### Database Parameters
|
### Database Parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------ |
|
| ---------------------------------------- | ---------------------------------------------------------------------------- | ------------ |
|
||||||
| `postgresql.enabled` | Deploy a PostgreSQL server to satisfy the applications database requirements | `true` |
|
| `postgresql.enabled` | Deploy a PostgreSQL server to satisfy the applications database requirements | `true` |
|
||||||
| `postgresql.auth.username` | Username for PostgreSQL server | `postgres` |
|
| `postgresql.auth.username` | Username for PostgreSQL server | `postgres` |
|
||||||
| `postgresql.auth.postgresPassword` | Password for 'postgres' user | `""` |
|
| `postgresql.auth.postgresPassword` | Password for 'postgres' user | `""` |
|
||||||
@@ -489,7 +461,6 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `postgresql.primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `false` |
|
| `postgresql.primary.persistence.enabled` | Enable PostgreSQL Primary data persistence using PVC | `false` |
|
||||||
| `postgresql.architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` |
|
| `postgresql.architecture` | PostgreSQL architecture (`standalone` or `replication`) | `standalone` |
|
||||||
| `postgresql.securityContext.enabled` | Enabled PostgreSQL replicas pods' Security Context | `false` |
|
| `postgresql.securityContext.enabled` | Enabled PostgreSQL replicas pods' Security Context | `false` |
|
||||||
| `postgresql.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if postgresql.resources is set (postgresql.resources is recommended for production). | `none` |
|
|
||||||
| `postgresql.resources.limits` | The resources limits for the PostgreSQL container | `{}` |
|
| `postgresql.resources.limits` | The resources limits for the PostgreSQL container | `{}` |
|
||||||
| `postgresql.resources.requests.cpu` | The requested CPU for the PostgreSQL container | `250m` |
|
| `postgresql.resources.requests.cpu` | The requested CPU for the PostgreSQL container | `250m` |
|
||||||
| `postgresql.resources.requests.memory` | The requested memory for the PostgreSQL container | `256Mi` |
|
| `postgresql.resources.requests.memory` | The requested memory for the PostgreSQL container | `256Mi` |
|
||||||
@@ -497,7 +468,7 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
### kubeappsapis parameters
|
### kubeappsapis parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------- |
|
| ----------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------- |
|
||||||
| `kubeappsapis.enabledPlugins` | Manually override which plugins are enabled for the Kubeapps-APIs service | `[]` |
|
| `kubeappsapis.enabledPlugins` | Manually override which plugins are enabled for the Kubeapps-APIs service | `[]` |
|
||||||
| `kubeappsapis.pluginConfig.core.packages.v1alpha1.versionsInSummary.major` | Number of major versions to display in the summary | `3` |
|
| `kubeappsapis.pluginConfig.core.packages.v1alpha1.versionsInSummary.major` | Number of major versions to display in the summary | `3` |
|
||||||
| `kubeappsapis.pluginConfig.core.packages.v1alpha1.versionsInSummary.minor` | Number of minor versions to display in the summary | `3` |
|
| `kubeappsapis.pluginConfig.core.packages.v1alpha1.versionsInSummary.minor` | Number of minor versions to display in the summary | `3` |
|
||||||
@@ -527,20 +498,14 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `kubeappsapis.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for the KubeappsAPIs container | `""` |
|
| `kubeappsapis.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for the KubeappsAPIs container | `""` |
|
||||||
| `kubeappsapis.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for the KubeappsAPIs container | `""` |
|
| `kubeappsapis.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for the KubeappsAPIs container | `""` |
|
||||||
| `kubeappsapis.containerPorts.http` | KubeappsAPIs HTTP container port | `50051` |
|
| `kubeappsapis.containerPorts.http` | KubeappsAPIs HTTP container port | `50051` |
|
||||||
| `kubeappsapis.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if kubeappsapis.resources is set (kubeappsapis.resources is recommended for production). | `none` |
|
|
||||||
| `kubeappsapis.resources.limits.cpu` | The CPU limits for the KubeappsAPIs container | `250m` |
|
| `kubeappsapis.resources.limits.cpu` | The CPU limits for the KubeappsAPIs container | `250m` |
|
||||||
| `kubeappsapis.resources.limits.memory` | The memory limits for the KubeappsAPIs container | `256Mi` |
|
| `kubeappsapis.resources.limits.memory` | The memory limits for the KubeappsAPIs container | `256Mi` |
|
||||||
| `kubeappsapis.resources.requests.cpu` | The requested CPU for the KubeappsAPIs container | `25m` |
|
| `kubeappsapis.resources.requests.cpu` | The requested CPU for the KubeappsAPIs container | `25m` |
|
||||||
| `kubeappsapis.resources.requests.memory` | The requested memory for the KubeappsAPIs container | `32Mi` |
|
| `kubeappsapis.resources.requests.memory` | The requested memory for the KubeappsAPIs container | `32Mi` |
|
||||||
| `kubeappsapis.podSecurityContext.enabled` | Enabled KubeappsAPIs pods' Security Context | `true` |
|
| `kubeappsapis.podSecurityContext.enabled` | Enabled KubeappsAPIs pods' Security Context | `true` |
|
||||||
| `kubeappsapis.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
|
|
||||||
| `kubeappsapis.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
|
|
||||||
| `kubeappsapis.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
|
|
||||||
| `kubeappsapis.podSecurityContext.fsGroup` | Set KubeappsAPIs pod's Security Context fsGroup | `1001` |
|
| `kubeappsapis.podSecurityContext.fsGroup` | Set KubeappsAPIs pod's Security Context fsGroup | `1001` |
|
||||||
| `kubeappsapis.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
| `kubeappsapis.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
||||||
| `kubeappsapis.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `kubeappsapis.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
| `kubeappsapis.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
||||||
| `kubeappsapis.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` |
|
|
||||||
| `kubeappsapis.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
| `kubeappsapis.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
||||||
| `kubeappsapis.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
| `kubeappsapis.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
||||||
| `kubeappsapis.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
| `kubeappsapis.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
||||||
@@ -586,7 +551,6 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `kubeappsapis.priorityClassName` | Priority class name for KubeappsAPIs pods | `""` |
|
| `kubeappsapis.priorityClassName` | Priority class name for KubeappsAPIs pods | `""` |
|
||||||
| `kubeappsapis.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
|
| `kubeappsapis.schedulerName` | Name of the k8s scheduler (other than default) | `""` |
|
||||||
| `kubeappsapis.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
|
| `kubeappsapis.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` |
|
||||||
| `kubeappsapis.automountServiceAccountToken` | Mount Service Account token in pod | `true` |
|
|
||||||
| `kubeappsapis.hostAliases` | Custom host aliases for KubeappsAPIs pods | `[]` |
|
| `kubeappsapis.hostAliases` | Custom host aliases for KubeappsAPIs pods | `[]` |
|
||||||
| `kubeappsapis.sidecars` | Add additional sidecar containers to the KubeappsAPIs pod(s) | `[]` |
|
| `kubeappsapis.sidecars` | Add additional sidecar containers to the KubeappsAPIs pod(s) | `[]` |
|
||||||
| `kubeappsapis.initContainers` | Add additional init containers to the KubeappsAPIs pod(s) | `[]` |
|
| `kubeappsapis.initContainers` | Add additional init containers to the KubeappsAPIs pod(s) | `[]` |
|
||||||
@@ -594,13 +558,13 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `kubeappsapis.service.annotations` | Additional custom annotations for KubeappsAPIs service | `{}` |
|
| `kubeappsapis.service.annotations` | Additional custom annotations for KubeappsAPIs service | `{}` |
|
||||||
| `kubeappsapis.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
| `kubeappsapis.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
||||||
| `kubeappsapis.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` |
|
| `kubeappsapis.serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` |
|
||||||
| `kubeappsapis.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` |
|
| `kubeappsapis.serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` |
|
||||||
| `kubeappsapis.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
|
| `kubeappsapis.serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` |
|
||||||
|
|
||||||
### OCI Catalog chart configuration
|
### OCI Catalog chart configuration
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| -------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- |
|
| -------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------- | -------------------------------------- |
|
||||||
| `ociCatalog.enabled` | Enable the OCI catalog gRPC service for cataloging | `false` |
|
| `ociCatalog.enabled` | Enable the OCI catalog gRPC service for cataloging | `false` |
|
||||||
| `ociCatalog.image.registry` | OCI Catalog image registry | `REGISTRY_NAME` |
|
| `ociCatalog.image.registry` | OCI Catalog image registry | `REGISTRY_NAME` |
|
||||||
| `ociCatalog.image.repository` | OCI Catalog image repository | `REPOSITORY_NAME/kubeapps-oci-catalog` |
|
| `ociCatalog.image.repository` | OCI Catalog image repository | `REPOSITORY_NAME/kubeapps-oci-catalog` |
|
||||||
@@ -613,15 +577,12 @@ Once you have installed Kubeapps follow the [Getting Started Guide](https://gith
|
|||||||
| `ociCatalog.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for the OCI Catalog container | `""` |
|
| `ociCatalog.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for the OCI Catalog container | `""` |
|
||||||
| `ociCatalog.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for the OCI Catalog container | `""` |
|
| `ociCatalog.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for the OCI Catalog container | `""` |
|
||||||
| `ociCatalog.containerPorts.grpc` | OCI Catalog gRPC container port | `50061` |
|
| `ociCatalog.containerPorts.grpc` | OCI Catalog gRPC container port | `50061` |
|
||||||
| `ociCatalog.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if ociCatalog.resources is set (ociCatalog.resources is recommended for production). | `none` |
|
|
||||||
| `ociCatalog.resources.limits.cpu` | The CPU limits for the OCI Catalog container | `250m` |
|
| `ociCatalog.resources.limits.cpu` | The CPU limits for the OCI Catalog container | `250m` |
|
||||||
| `ociCatalog.resources.limits.memory` | The memory limits for the OCI Catalog container | `256Mi` |
|
| `ociCatalog.resources.limits.memory` | The memory limits for the OCI Catalog container | `256Mi` |
|
||||||
| `ociCatalog.resources.requests.cpu` | The requested CPU for the OCI Catalog container | `25m` |
|
| `ociCatalog.resources.requests.cpu` | The requested CPU for the OCI Catalog container | `25m` |
|
||||||
| `ociCatalog.resources.requests.memory` | The requested memory for the OCI Catalog container | `32Mi` |
|
| `ociCatalog.resources.requests.memory` | The requested memory for the OCI Catalog container | `32Mi` |
|
||||||
| `ociCatalog.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
| `ociCatalog.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` |
|
||||||
| `ociCatalog.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `ociCatalog.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
| `ociCatalog.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` |
|
||||||
| `ociCatalog.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `0` |
|
|
||||||
| `ociCatalog.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
| `ociCatalog.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` |
|
||||||
| `ociCatalog.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
| `ociCatalog.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` |
|
||||||
| `ociCatalog.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
| `ociCatalog.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `false` |
|
||||||
@@ -691,12 +652,6 @@ helm install kubeapps --namespace kubeapps -f custom-values.yaml oci://REGISTRY_
|
|||||||
|
|
||||||
## Configuration and installation details
|
## Configuration and installation details
|
||||||
|
|
||||||
### Resource requests and limits
|
|
||||||
|
|
||||||
Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case.
|
|
||||||
|
|
||||||
To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
|
|
||||||
|
|
||||||
### Configuring Initial Repositories
|
### Configuring Initial Repositories
|
||||||
|
|
||||||
By default, Kubeapps will track the [Bitnami Application Catalog](https://github.com/bitnami/charts). To change these defaults, override with your desired parameters the `apprepository.initialRepos` object present in the [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/kubeapps/values.yaml) file.
|
By default, Kubeapps will track the [Bitnami Application Catalog](https://github.com/bitnami/charts). To change these defaults, override with your desired parameters the `apprepository.initialRepos` object present in the [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/kubeapps/values.yaml) file.
|
||||||
@@ -954,7 +909,7 @@ Feel free to [open an issue](https://github.com/vmware-tanzu/kubeapps/issues/new
|
|||||||
This major release renames several values in this chart and adds missing features, in order to get aligned with the rest of the assets in the Bitnami charts repository.
|
This major release renames several values in this chart and adds missing features, in order to get aligned with the rest of the assets in the Bitnami charts repository.
|
||||||
|
|
||||||
Additionally, it updates both the [PostgreSQL](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) and the [Redis](https://github.com/bitnami/charts/tree/main/bitnami/redis) subcharts to their latest major versions, 11.0.0 and 16.0.0 respectively, where similar changes have been also performed.
|
Additionally, it updates both the [PostgreSQL](https://github.com/bitnami/charts/tree/main/bitnami/postgresql) and the [Redis](https://github.com/bitnami/charts/tree/main/bitnami/redis) subcharts to their latest major versions, 11.0.0 and 16.0.0 respectively, where similar changes have been also performed.
|
||||||
Check [PostgreSQL Upgrading Notes](https://github.com/bitnami/charts/tree/master/bitnami/postgresql#to-1100) and [Redis Upgrading Notes](https://github.com/bitnami/charts/tree/main/bitnami/redis#to-1600) for more information.
|
Check [PostgreSQL Upgrading Notes](https://docs.bitnami.com/kubernetes/infrastructure/postgresql/administration/upgrade/#to-1100) and [Redis Upgrading Notes](https://github.com/bitnami/charts/tree/main/bitnami/redis#to-1600) for more information.
|
||||||
|
|
||||||
The following values have been renamed:
|
The following values have been renamed:
|
||||||
|
|
||||||
@@ -1178,7 +1133,7 @@ After that, you should be able to upgrade Kubeapps as always and the database wi
|
|||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Copyright © 2024 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
|
Copyright © 2023 VMware, Inc.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -20,5 +20,3 @@
|
|||||||
.idea/
|
.idea/
|
||||||
*.tmproj
|
*.tmproj
|
||||||
.vscode/
|
.vscode/
|
||||||
# img folder
|
|
||||||
img/
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ annotations:
|
|||||||
category: Infrastructure
|
category: Infrastructure
|
||||||
licenses: Apache-2.0
|
licenses: Apache-2.0
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 2.19.0
|
appVersion: 2.13.3
|
||||||
description: A Library Helm Chart for grouping common logic between bitnami charts.
|
description: A Library Helm Chart for grouping common logic between bitnami charts.
|
||||||
This chart is not deployable by itself.
|
This chart is not deployable by itself.
|
||||||
home: https://bitnami.com
|
home: https://bitnami.com
|
||||||
@@ -20,4 +20,4 @@ name: common
|
|||||||
sources:
|
sources:
|
||||||
- https://github.com/bitnami/charts
|
- https://github.com/bitnami/charts
|
||||||
type: library
|
type: library
|
||||||
version: 2.19.0
|
version: 2.13.3
|
||||||
|
|||||||
@@ -24,14 +24,14 @@ data:
|
|||||||
myvalue: "Hello World"
|
myvalue: "Hello World"
|
||||||
```
|
```
|
||||||
|
|
||||||
Looking to use our applications in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
|
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
|
This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager.
|
||||||
|
|
||||||
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
|
Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters.
|
||||||
|
|
||||||
|
Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- Kubernetes 1.23+
|
- Kubernetes 1.23+
|
||||||
@@ -220,7 +220,7 @@ helm install test mychart --set path.to.value00="",path.to.value01=""
|
|||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Copyright © 2024 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
|
Copyright © 2023 VMware, Inc.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
@@ -1,39 +0,0 @@
|
|||||||
{{/*
|
|
||||||
Copyright VMware, Inc.
|
|
||||||
SPDX-License-Identifier: APACHE-2.0
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Return true if the detected platform is Openshift
|
|
||||||
Usage:
|
|
||||||
{{- include "common.compatibility.isOpenshift" . -}}
|
|
||||||
*/}}
|
|
||||||
{{- define "common.compatibility.isOpenshift" -}}
|
|
||||||
{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1" -}}
|
|
||||||
{{- true -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Render a compatible securityContext depending on the platform. By default it is maintained as it is. In other platforms like Openshift we remove default user/group values that do not work out of the box with the restricted-v1 SCC
|
|
||||||
Usage:
|
|
||||||
{{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" $) -}}
|
|
||||||
*/}}
|
|
||||||
{{- define "common.compatibility.renderSecurityContext" -}}
|
|
||||||
{{- $adaptedContext := .secContext -}}
|
|
||||||
{{- if .context.Values.global.compatibility -}}
|
|
||||||
{{- if .context.Values.global.compatibility.openshift -}}
|
|
||||||
{{- if or (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "force") (and (eq .context.Values.global.compatibility.openshift.adaptSecurityContext "auto") (include "common.compatibility.isOpenshift" .context)) -}}
|
|
||||||
{{/* Remove incompatible user/group values that do not work in Openshift out of the box */}}
|
|
||||||
{{- $adaptedContext = omit $adaptedContext "fsGroup" "runAsUser" "runAsGroup" -}}
|
|
||||||
{{- if not .secContext.seLinuxOptions -}}
|
|
||||||
{{/* If it is an empty object, we remove it from the resulting context because it causes validation issues */}}
|
|
||||||
{{- $adaptedContext = omit $adaptedContext "seLinuxOptions" -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- omit $adaptedContext "enabled" | toYaml -}}
|
|
||||||
{{- end -}}
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
{{/*
|
|
||||||
Copyright VMware, Inc.
|
|
||||||
SPDX-License-Identifier: APACHE-2.0
|
|
||||||
*/}}
|
|
||||||
|
|
||||||
{{/* vim: set filetype=mustache: */}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Return a resource request/limit object based on a given preset.
|
|
||||||
These presets are for basic testing and not meant to be used in production
|
|
||||||
{{ include "common.resources.preset" (dict "type" "nano") -}}
|
|
||||||
*/}}
|
|
||||||
{{- define "common.resources.preset" -}}
|
|
||||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage)*/}}
|
|
||||||
{{- $presets := dict
|
|
||||||
"nano" (dict
|
|
||||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "1024Mi")
|
|
||||||
)
|
|
||||||
"micro" (dict
|
|
||||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "1024Mi")
|
|
||||||
)
|
|
||||||
"small" (dict
|
|
||||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "1024Mi")
|
|
||||||
)
|
|
||||||
"medium" (dict
|
|
||||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "1024Mi")
|
|
||||||
)
|
|
||||||
"large" (dict
|
|
||||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "1024Mi")
|
|
||||||
)
|
|
||||||
"xlarge" (dict
|
|
||||||
"requests" (dict "cpu" "2.0" "memory" "4096Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "1024Mi")
|
|
||||||
)
|
|
||||||
"2xlarge" (dict
|
|
||||||
"requests" (dict "cpu" "4.0" "memory" "8192Mi" "ephemeral-storage" "50Mi")
|
|
||||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "1024Mi")
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
{{- if hasKey $presets .type -}}
|
|
||||||
{{- index $presets .type | toYaml -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
@@ -78,8 +78,6 @@ Params:
|
|||||||
- chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
|
- chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart.
|
||||||
- context - Context - Required - Parent context.
|
- context - Context - Required - Parent context.
|
||||||
- failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets.
|
- failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets.
|
||||||
- skipB64enc - Boolean - Optional - Default to false. If set to true, no the secret will not be base64 encrypted.
|
|
||||||
- skipQuote - Boolean - Optional - Default to false. If set to true, no quotes will be added around the secret.
|
|
||||||
The order in which this function returns a secret password:
|
The order in which this function returns a secret password:
|
||||||
1. Already existing 'Secret' resource
|
1. Already existing 'Secret' resource
|
||||||
(If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
|
(If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned)
|
||||||
@@ -93,6 +91,7 @@ The order in which this function returns a secret password:
|
|||||||
|
|
||||||
{{- $password := "" }}
|
{{- $password := "" }}
|
||||||
{{- $subchart := "" }}
|
{{- $subchart := "" }}
|
||||||
|
{{- $failOnNew := default true .failOnNew }}
|
||||||
{{- $chartName := default "" .chartName }}
|
{{- $chartName := default "" .chartName }}
|
||||||
{{- $passwordLength := default 10 .length }}
|
{{- $passwordLength := default 10 .length }}
|
||||||
{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
|
{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }}
|
||||||
@@ -100,14 +99,12 @@ The order in which this function returns a secret password:
|
|||||||
{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
|
{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }}
|
||||||
{{- if $secretData }}
|
{{- if $secretData }}
|
||||||
{{- if hasKey $secretData .key }}
|
{{- if hasKey $secretData .key }}
|
||||||
{{- $password = index $secretData .key | b64dec }}
|
{{- $password = index $secretData .key | quote }}
|
||||||
{{- else if not (eq .failOnNew false) }}
|
{{- else if $failOnNew }}
|
||||||
{{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
|
{{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}}
|
||||||
{{- else if $providedPasswordValue }}
|
|
||||||
{{- $password = $providedPasswordValue | toString }}
|
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{- else if $providedPasswordValue }}
|
{{- else if $providedPasswordValue }}
|
||||||
{{- $password = $providedPasswordValue | toString }}
|
{{- $password = $providedPasswordValue | toString | b64enc | quote }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
|
|
||||||
{{- if .context.Values.enabled }}
|
{{- if .context.Values.enabled }}
|
||||||
@@ -123,19 +120,12 @@ The order in which this function returns a secret password:
|
|||||||
{{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
|
{{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }}
|
||||||
{{- $password = randAscii $passwordLength }}
|
{{- $password = randAscii $passwordLength }}
|
||||||
{{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
|
{{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }}
|
||||||
{{- $password = printf "%s%s" $subStr $password | toString | shuffle }}
|
{{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
{{- $password = randAlphaNum $passwordLength }}
|
{{- $password = randAlphaNum $passwordLength | b64enc | quote }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
{{- if not .skipB64enc }}
|
|
||||||
{{- $password = $password | b64enc }}
|
|
||||||
{{- end -}}
|
|
||||||
{{- if .skipQuote -}}
|
|
||||||
{{- printf "%s" $password -}}
|
{{- printf "%s" $password -}}
|
||||||
{{- else -}}
|
|
||||||
{{- printf "%s" $password | quote -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|
||||||
{{/*
|
{{/*
|
||||||
|
|||||||
@@ -13,70 +13,7 @@ Usage:
|
|||||||
|
|
||||||
{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
|
{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }}
|
||||||
WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
|
WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
|
||||||
+info https://docs.bitnami.com/tutorials/understand-rolling-tags-containers
|
+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end -}}
|
|
||||||
|
|
||||||
{{/*
|
|
||||||
Warning about not setting the resource object in all deployments.
|
|
||||||
Usage:
|
|
||||||
{{ include "common.warnings.resources" (dict "sections" (list "path1" "path2") context $) }}
|
|
||||||
Example:
|
|
||||||
{{- include "common.warnings.resources" (dict "sections" (list "csiProvider.provider" "server" "volumePermissions" "") "context" $) }}
|
|
||||||
The list in the example assumes that the following values exist:
|
|
||||||
- csiProvider.provider.resources
|
|
||||||
- server.resources
|
|
||||||
- volumePermissions.resources
|
|
||||||
- resources
|
|
||||||
*/}}
|
|
||||||
{{- define "common.warnings.resources" -}}
|
|
||||||
{{- $values := .context.Values -}}
|
|
||||||
{{- $printMessage := false -}}
|
|
||||||
{{ $affectedSections := list -}}
|
|
||||||
{{- range .sections -}}
|
|
||||||
{{- if eq . "" -}}
|
|
||||||
{{/* Case where the resources section is at the root (one main deployment in the chart) */}}
|
|
||||||
{{- if not (index $values "resources") -}}
|
|
||||||
{{- $affectedSections = append $affectedSections "resources" -}}
|
|
||||||
{{- $printMessage = true -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{/* Case where the are multiple resources sections (more than one main deployment in the chart) */}}
|
|
||||||
{{- $keys := split "." . -}}
|
|
||||||
{{/* We iterate through the different levels until arriving to the resource section. Example: a.b.c.resources */}}
|
|
||||||
{{- $section := $values -}}
|
|
||||||
{{- range $keys -}}
|
|
||||||
{{- $section = index $section . -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- if not (index $section "resources") -}}
|
|
||||||
{{/* If the section has enabled=false or replicaCount=0, do not include it */}}
|
|
||||||
{{- if and (hasKey $section "enabled") -}}
|
|
||||||
{{- if index $section "enabled" -}}
|
|
||||||
{{/* enabled=true */}}
|
|
||||||
{{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
|
|
||||||
{{- $printMessage = true -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- else if and (hasKey $section "replicaCount") -}}
|
|
||||||
{{/* We need a casting to int because number 0 is not treated as an int by default */}}
|
|
||||||
{{- if (gt (index $section "replicaCount" | int) 0) -}}
|
|
||||||
{{/* replicaCount > 0 */}}
|
|
||||||
{{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
|
|
||||||
{{- $printMessage = true -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- else -}}
|
|
||||||
{{/* Default case, add it to the affected sections */}}
|
|
||||||
{{- $affectedSections = append $affectedSections (printf "%s.resources" .) -}}
|
|
||||||
{{- $printMessage = true -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
|
||||||
{{- if $printMessage }}
|
|
||||||
|
|
||||||
WARNING: There are "resources" sections in the chart not set. Using "resourcesPreset" is not recommended for production. For production installations, please set the following values according to your workload needs:
|
|
||||||
{{- range $affectedSections }}
|
|
||||||
- {{ . }}
|
|
||||||
{{- end }}
|
|
||||||
+info https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
||||||
{{- end -}}
|
|
||||||
{{- end -}}
|
{{- end -}}
|
||||||
|
|||||||
@@ -19,5 +19,3 @@
|
|||||||
.project
|
.project
|
||||||
.idea/
|
.idea/
|
||||||
*.tmproj
|
*.tmproj
|
||||||
# img folder
|
|
||||||
img/
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
dependencies:
|
dependencies:
|
||||||
- name: common
|
- name: common
|
||||||
repository: oci://registry-1.docker.io/bitnamicharts
|
repository: oci://registry-1.docker.io/bitnamicharts
|
||||||
version: 2.19.0
|
version: 2.13.3
|
||||||
digest: sha256:ac559eb57710d8904e266424ee364cd686d7e24517871f0c5c67f7c4500c2bcc
|
digest: sha256:9a971689db0c66ea95ac2e911c05014c2b96c6077c991131ff84f2982f88fb83
|
||||||
generated: "2024-03-08T15:56:40.04210215Z"
|
generated: "2023-10-19T12:32:36.790999138Z"
|
||||||
|
|||||||
@@ -1,19 +1,17 @@
|
|||||||
annotations:
|
annotations:
|
||||||
category: Database
|
category: Database
|
||||||
images: |
|
images: |
|
||||||
- name: kubectl
|
|
||||||
image: docker.io/bitnami/kubectl:1.29.2-debian-12-r3
|
|
||||||
- name: os-shell
|
- name: os-shell
|
||||||
image: docker.io/bitnami/os-shell:12-debian-12-r16
|
image: docker.io/bitnami/os-shell:11-debian-11-r91
|
||||||
- name: redis
|
|
||||||
image: docker.io/bitnami/redis:7.2.4-debian-12-r9
|
|
||||||
- name: redis-exporter
|
- name: redis-exporter
|
||||||
image: docker.io/bitnami/redis-exporter:1.58.0-debian-12-r4
|
image: docker.io/bitnami/redis-exporter:1.55.0-debian-11-r2
|
||||||
- name: redis-sentinel
|
- name: redis-sentinel
|
||||||
image: docker.io/bitnami/redis-sentinel:7.2.4-debian-12-r7
|
image: docker.io/bitnami/redis-sentinel:7.2.3-debian-11-r1
|
||||||
|
- name: redis
|
||||||
|
image: docker.io/bitnami/redis:7.2.3-debian-11-r1
|
||||||
licenses: Apache-2.0
|
licenses: Apache-2.0
|
||||||
apiVersion: v2
|
apiVersion: v2
|
||||||
appVersion: 7.2.4
|
appVersion: 7.2.3
|
||||||
dependencies:
|
dependencies:
|
||||||
- name: common
|
- name: common
|
||||||
repository: oci://registry-1.docker.io/bitnamicharts
|
repository: oci://registry-1.docker.io/bitnamicharts
|
||||||
@@ -35,4 +33,4 @@ maintainers:
|
|||||||
name: redis
|
name: redis
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/bitnami/charts/tree/main/bitnami/redis
|
- https://github.com/bitnami/charts/tree/main/bitnami/redis
|
||||||
version: 18.19.2
|
version: 18.4.0
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ Disclaimer: Redis is a registered trademark of Redis Ltd. Any rights therein are
|
|||||||
## TL;DR
|
## TL;DR
|
||||||
|
|
||||||
```console
|
```console
|
||||||
helm install my-release oci://registry-1.docker.io/bitnamicharts/redis
|
helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/redis
|
||||||
```
|
```
|
||||||
|
|
||||||
Looking to use Redisreg; in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
|
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
@@ -37,6 +37,8 @@ The main features of each chart are the following:
|
|||||||
| Single write point (single master) | Multiple write points (multiple masters) |
|
| Single write point (single master) | Multiple write points (multiple masters) |
|
||||||
|  |  |
|
|  |  |
|
||||||
|
|
||||||
|
Looking to use Redisreg; in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- Kubernetes 1.23+
|
- Kubernetes 1.23+
|
||||||
@@ -72,12 +74,11 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
### Global parameters
|
### Global parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
|
| ------------------------- | ------------------------------------------------------ | ----- |
|
||||||
| `global.imageRegistry` | Global Docker image registry | `""` |
|
| `global.imageRegistry` | Global Docker image registry | `""` |
|
||||||
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
|
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` |
|
||||||
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
|
| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` |
|
||||||
| `global.redis.password` | Global Redis® password (overrides `auth.password`) | `""` |
|
| `global.redis.password` | Global Redis® password (overrides `auth.password`) | `""` |
|
||||||
| `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `disabled` |
|
|
||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
@@ -86,7 +87,6 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `kubeVersion` | Override Kubernetes version | `""` |
|
| `kubeVersion` | Override Kubernetes version | `""` |
|
||||||
| `nameOverride` | String to partially override common.names.fullname | `""` |
|
| `nameOverride` | String to partially override common.names.fullname | `""` |
|
||||||
| `fullnameOverride` | String to fully override common.names.fullname | `""` |
|
| `fullnameOverride` | String to fully override common.names.fullname | `""` |
|
||||||
| `namespaceOverride` | String to fully override common.names.namespace | `""` |
|
|
||||||
| `commonLabels` | Labels to add to all deployed objects | `{}` |
|
| `commonLabels` | Labels to add to all deployed objects | `{}` |
|
||||||
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
|
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
|
||||||
| `secretAnnotations` | Annotations to add to secret | `{}` |
|
| `secretAnnotations` | Annotations to add to secret | `{}` |
|
||||||
@@ -121,14 +121,13 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `auth.existingSecret` | The name of an existing secret with Redis® credentials | `""` |
|
| `auth.existingSecret` | The name of an existing secret with Redis® credentials | `""` |
|
||||||
| `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `""` |
|
| `auth.existingSecretPasswordKey` | Password key to be retrieved from existing secret | `""` |
|
||||||
| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` |
|
| `auth.usePasswordFiles` | Mount credentials as files instead of using an environment variable | `false` |
|
||||||
| `auth.usePasswordFileFromSecret` | Mount password file from secret | `true` |
|
|
||||||
| `commonConfiguration` | Common configuration to be added into the ConfigMap | `""` |
|
| `commonConfiguration` | Common configuration to be added into the ConfigMap | `""` |
|
||||||
| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Redis® nodes | `""` |
|
| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for Redis® nodes | `""` |
|
||||||
|
|
||||||
### Redis® master configuration parameters
|
### Redis® master configuration parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ---------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------ |
|
| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | ------------------------ |
|
||||||
| `master.count` | Number of Redis® master instances to deploy (experimental, requires additional configuration) | `1` |
|
| `master.count` | Number of Redis® master instances to deploy (experimental, requires additional configuration) | `1` |
|
||||||
| `master.configuration` | Configuration for Redis® master nodes | `""` |
|
| `master.configuration` | Configuration for Redis® master nodes | `""` |
|
||||||
| `master.disableCommands` | Array with Redis® commands to disable on master nodes | `["FLUSHDB","FLUSHALL"]` |
|
| `master.disableCommands` | Array with Redis® commands to disable on master nodes | `["FLUSHDB","FLUSHALL"]` |
|
||||||
@@ -162,20 +161,15 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
|
| `master.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
|
||||||
| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
|
| `master.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
|
||||||
| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
|
| `master.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
|
||||||
| `master.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if master.resources is set (master.resources is recommended for production). | `none` |
|
| `master.resources.limits` | The resources limits for the Redis® master containers | `{}` |
|
||||||
| `master.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
|
| `master.resources.requests` | The requested resources for the Redis® master containers | `{}` |
|
||||||
| `master.podSecurityContext.enabled` | Enabled Redis® master pods' Security Context | `true` |
|
| `master.podSecurityContext.enabled` | Enabled Redis® master pods' Security Context | `true` |
|
||||||
| `master.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
|
|
||||||
| `master.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
|
|
||||||
| `master.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
|
|
||||||
| `master.podSecurityContext.fsGroup` | Set Redis® master pod's Security Context fsGroup | `1001` |
|
| `master.podSecurityContext.fsGroup` | Set Redis® master pod's Security Context fsGroup | `1001` |
|
||||||
| `master.containerSecurityContext.enabled` | Enabled Redis® master containers' Security Context | `true` |
|
| `master.containerSecurityContext.enabled` | Enabled Redis® master containers' Security Context | `true` |
|
||||||
| `master.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `master.containerSecurityContext.runAsUser` | Set Redis® master containers' Security Context runAsUser | `1001` |
|
| `master.containerSecurityContext.runAsUser` | Set Redis® master containers' Security Context runAsUser | `1001` |
|
||||||
| `master.containerSecurityContext.runAsGroup` | Set Redis® master containers' Security Context runAsGroup | `0` |
|
| `master.containerSecurityContext.runAsGroup` | Set Redis® master containers' Security Context runAsGroup | `0` |
|
||||||
| `master.containerSecurityContext.runAsNonRoot` | Set Redis® master containers' Security Context runAsNonRoot | `true` |
|
| `master.containerSecurityContext.runAsNonRoot` | Set Redis® master containers' Security Context runAsNonRoot | `true` |
|
||||||
| `master.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate Redis® pod(s) privileges | `false` |
|
| `master.containerSecurityContext.allowPrivilegeEscalation` | Is it possible to escalate Redis® pod(s) privileges | `false` |
|
||||||
| `master.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context read-only root filesystem | `false` |
|
|
||||||
| `master.containerSecurityContext.seccompProfile.type` | Set Redis® master containers' Security Context seccompProfile | `RuntimeDefault` |
|
| `master.containerSecurityContext.seccompProfile.type` | Set Redis® master containers' Security Context seccompProfile | `RuntimeDefault` |
|
||||||
| `master.containerSecurityContext.capabilities.drop` | Set Redis® master containers' Security Context capabilities to drop | `["ALL"]` |
|
| `master.containerSecurityContext.capabilities.drop` | Set Redis® master containers' Security Context capabilities to drop | `["ALL"]` |
|
||||||
| `master.kind` | Use either Deployment, StatefulSet (default) or DaemonSet | `StatefulSet` |
|
| `master.kind` | Use either Deployment, StatefulSet (default) or DaemonSet | `StatefulSet` |
|
||||||
@@ -183,7 +177,6 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `master.updateStrategy.type` | Redis® master statefulset strategy type | `RollingUpdate` |
|
| `master.updateStrategy.type` | Redis® master statefulset strategy type | `RollingUpdate` |
|
||||||
| `master.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` |
|
| `master.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` |
|
||||||
| `master.priorityClassName` | Redis® master pods' priorityClassName | `""` |
|
| `master.priorityClassName` | Redis® master pods' priorityClassName | `""` |
|
||||||
| `master.automountServiceAccountToken` | Mount Service Account token in pod | `false` |
|
|
||||||
| `master.hostAliases` | Redis® master pods host aliases | `[]` |
|
| `master.hostAliases` | Redis® master pods host aliases | `[]` |
|
||||||
| `master.podLabels` | Extra labels for Redis® master pods | `{}` |
|
| `master.podLabels` | Extra labels for Redis® master pods | `{}` |
|
||||||
| `master.podAnnotations` | Annotations for Redis® master pods | `{}` |
|
| `master.podAnnotations` | Annotations for Redis® master pods | `{}` |
|
||||||
@@ -229,22 +222,21 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `master.service.internalTrafficPolicy` | Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) | `Cluster` |
|
| `master.service.internalTrafficPolicy` | Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) | `Cluster` |
|
||||||
| `master.service.clusterIP` | Redis® master service Cluster IP | `""` |
|
| `master.service.clusterIP` | Redis® master service Cluster IP | `""` |
|
||||||
| `master.service.loadBalancerIP` | Redis® master service Load Balancer IP | `""` |
|
| `master.service.loadBalancerIP` | Redis® master service Load Balancer IP | `""` |
|
||||||
| `master.service.loadBalancerClass` | master service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific) | `""` |
|
|
||||||
| `master.service.loadBalancerSourceRanges` | Redis® master service Load Balancer sources | `[]` |
|
| `master.service.loadBalancerSourceRanges` | Redis® master service Load Balancer sources | `[]` |
|
||||||
| `master.service.externalIPs` | Redis® master service External IPs | `[]` |
|
| `master.service.externalIPs` | Redis® master service External IPs | `[]` |
|
||||||
| `master.service.annotations` | Additional custom annotations for Redis® master service | `{}` |
|
| `master.service.annotations` | Additional custom annotations for Redis® master service | `{}` |
|
||||||
| `master.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
|
| `master.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
|
||||||
| `master.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
|
| `master.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` |
|
||||||
| `master.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-master pods | `30` |
|
| `master.terminationGracePeriodSeconds` | Integer setting the termination grace period for the redis-master pods | `30` |
|
||||||
| `master.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
| `master.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
|
||||||
| `master.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
|
| `master.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
|
||||||
| `master.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `false` |
|
| `master.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` |
|
||||||
| `master.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
|
| `master.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
|
||||||
|
|
||||||
### Redis® replicas configuration parameters
|
### Redis® replicas configuration parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------ |
|
| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | ------------------------ |
|
||||||
| `replica.kind` | Use either DaemonSet or StatefulSet (default) | `StatefulSet` |
|
| `replica.kind` | Use either DaemonSet or StatefulSet (default) | `StatefulSet` |
|
||||||
| `replica.replicaCount` | Number of Redis® replicas to deploy | `3` |
|
| `replica.replicaCount` | Number of Redis® replicas to deploy | `3` |
|
||||||
| `replica.configuration` | Configuration for Redis® replicas nodes | `""` |
|
| `replica.configuration` | Configuration for Redis® replicas nodes | `""` |
|
||||||
@@ -282,20 +274,15 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `replica.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
|
| `replica.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` |
|
||||||
| `replica.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
|
| `replica.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` |
|
||||||
| `replica.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
|
| `replica.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` |
|
||||||
| `replica.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if replica.resources is set (replica.resources is recommended for production). | `none` |
|
| `replica.resources.limits` | The resources limits for the Redis® replicas containers | `{}` |
|
||||||
| `replica.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
|
| `replica.resources.requests` | The requested resources for the Redis® replicas containers | `{}` |
|
||||||
| `replica.podSecurityContext.enabled` | Enabled Redis® replicas pods' Security Context | `true` |
|
| `replica.podSecurityContext.enabled` | Enabled Redis® replicas pods' Security Context | `true` |
|
||||||
| `replica.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` |
|
|
||||||
| `replica.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` |
|
|
||||||
| `replica.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` |
|
|
||||||
| `replica.podSecurityContext.fsGroup` | Set Redis® replicas pod's Security Context fsGroup | `1001` |
|
| `replica.podSecurityContext.fsGroup` | Set Redis® replicas pod's Security Context fsGroup | `1001` |
|
||||||
| `replica.containerSecurityContext.enabled` | Enabled Redis® replicas containers' Security Context | `true` |
|
| `replica.containerSecurityContext.enabled` | Enabled Redis® replicas containers' Security Context | `true` |
|
||||||
| `replica.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `replica.containerSecurityContext.runAsUser` | Set Redis® replicas containers' Security Context runAsUser | `1001` |
|
| `replica.containerSecurityContext.runAsUser` | Set Redis® replicas containers' Security Context runAsUser | `1001` |
|
||||||
| `replica.containerSecurityContext.runAsGroup` | Set Redis® replicas containers' Security Context runAsGroup | `0` |
|
| `replica.containerSecurityContext.runAsGroup` | Set Redis® replicas containers' Security Context runAsGroup | `0` |
|
||||||
| `replica.containerSecurityContext.runAsNonRoot` | Set Redis® replicas containers' Security Context runAsNonRoot | `true` |
|
| `replica.containerSecurityContext.runAsNonRoot` | Set Redis® replicas containers' Security Context runAsNonRoot | `true` |
|
||||||
| `replica.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® replicas pod's Security Context allowPrivilegeEscalation | `false` |
|
| `replica.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® replicas pod's Security Context allowPrivilegeEscalation | `false` |
|
||||||
| `replica.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context read-only root filesystem | `false` |
|
|
||||||
| `replica.containerSecurityContext.seccompProfile.type` | Set Redis® replicas containers' Security Context seccompProfile | `RuntimeDefault` |
|
| `replica.containerSecurityContext.seccompProfile.type` | Set Redis® replicas containers' Security Context seccompProfile | `RuntimeDefault` |
|
||||||
| `replica.containerSecurityContext.capabilities.drop` | Set Redis® replicas containers' Security Context capabilities to drop | `["ALL"]` |
|
| `replica.containerSecurityContext.capabilities.drop` | Set Redis® replicas containers' Security Context capabilities to drop | `["ALL"]` |
|
||||||
| `replica.schedulerName` | Alternate scheduler for Redis® replicas pods | `""` |
|
| `replica.schedulerName` | Alternate scheduler for Redis® replicas pods | `""` |
|
||||||
@@ -303,7 +290,6 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `replica.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` |
|
| `replica.minReadySeconds` | How many seconds a pod needs to be ready before killing the next, during update | `0` |
|
||||||
| `replica.priorityClassName` | Redis® replicas pods' priorityClassName | `""` |
|
| `replica.priorityClassName` | Redis® replicas pods' priorityClassName | `""` |
|
||||||
| `replica.podManagementPolicy` | podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods | `""` |
|
| `replica.podManagementPolicy` | podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods | `""` |
|
||||||
| `replica.automountServiceAccountToken` | Mount Service Account token in pod | `false` |
|
|
||||||
| `replica.hostAliases` | Redis® replicas pods host aliases | `[]` |
|
| `replica.hostAliases` | Redis® replicas pods host aliases | `[]` |
|
||||||
| `replica.podLabels` | Extra labels for Redis® replicas pods | `{}` |
|
| `replica.podLabels` | Extra labels for Redis® replicas pods | `{}` |
|
||||||
| `replica.podAnnotations` | Annotations for Redis® replicas pods | `{}` |
|
| `replica.podAnnotations` | Annotations for Redis® replicas pods | `{}` |
|
||||||
@@ -349,7 +335,6 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `replica.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
|
| `replica.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
|
||||||
| `replica.service.clusterIP` | Redis® replicas service Cluster IP | `""` |
|
| `replica.service.clusterIP` | Redis® replicas service Cluster IP | `""` |
|
||||||
| `replica.service.loadBalancerIP` | Redis® replicas service Load Balancer IP | `""` |
|
| `replica.service.loadBalancerIP` | Redis® replicas service Load Balancer IP | `""` |
|
||||||
| `replica.service.loadBalancerClass` | replicas service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific) | `""` |
|
|
||||||
| `replica.service.loadBalancerSourceRanges` | Redis® replicas service Load Balancer sources | `[]` |
|
| `replica.service.loadBalancerSourceRanges` | Redis® replicas service Load Balancer sources | `[]` |
|
||||||
| `replica.service.annotations` | Additional custom annotations for Redis® replicas service | `{}` |
|
| `replica.service.annotations` | Additional custom annotations for Redis® replicas service | `{}` |
|
||||||
| `replica.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
|
| `replica.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
|
||||||
@@ -360,15 +345,15 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `replica.autoscaling.maxReplicas` | Maximum replicas for the pod autoscaling | `11` |
|
| `replica.autoscaling.maxReplicas` | Maximum replicas for the pod autoscaling | `11` |
|
||||||
| `replica.autoscaling.targetCPU` | Percentage of CPU to consider when autoscaling | `""` |
|
| `replica.autoscaling.targetCPU` | Percentage of CPU to consider when autoscaling | `""` |
|
||||||
| `replica.autoscaling.targetMemory` | Percentage of Memory to consider when autoscaling | `""` |
|
| `replica.autoscaling.targetMemory` | Percentage of Memory to consider when autoscaling | `""` |
|
||||||
| `replica.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
| `replica.serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
|
||||||
| `replica.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
|
| `replica.serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
|
||||||
| `replica.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `false` |
|
| `replica.serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` |
|
||||||
| `replica.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
|
| `replica.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
|
||||||
|
|
||||||
### Redis® Sentinel configuration parameters
|
### Redis® Sentinel configuration parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
|
| ------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
|
||||||
| `sentinel.enabled` | Use Redis® Sentinel on Redis® pods. | `false` |
|
| `sentinel.enabled` | Use Redis® Sentinel on Redis® pods. | `false` |
|
||||||
| `sentinel.image.registry` | Redis® Sentinel image registry | `REGISTRY_NAME` |
|
| `sentinel.image.registry` | Redis® Sentinel image registry | `REGISTRY_NAME` |
|
||||||
| `sentinel.image.repository` | Redis® Sentinel image repository | `REPOSITORY_NAME/redis-sentinel` |
|
| `sentinel.image.repository` | Redis® Sentinel image repository | `REPOSITORY_NAME/redis-sentinel` |
|
||||||
@@ -431,14 +416,12 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `sentinel.persistentVolumeClaimRetentionPolicy.enabled` | Controls if and how PVCs are deleted during the lifecycle of a StatefulSet | `false` |
|
| `sentinel.persistentVolumeClaimRetentionPolicy.enabled` | Controls if and how PVCs are deleted during the lifecycle of a StatefulSet | `false` |
|
||||||
| `sentinel.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` |
|
| `sentinel.persistentVolumeClaimRetentionPolicy.whenScaled` | Volume retention behavior when the replica count of the StatefulSet is reduced | `Retain` |
|
||||||
| `sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` |
|
| `sentinel.persistentVolumeClaimRetentionPolicy.whenDeleted` | Volume retention behavior that applies when the StatefulSet is deleted | `Retain` |
|
||||||
| `sentinel.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if sentinel.resources is set (sentinel.resources is recommended for production). | `none` |
|
| `sentinel.resources.limits` | The resources limits for the Redis® Sentinel containers | `{}` |
|
||||||
| `sentinel.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
|
| `sentinel.resources.requests` | The requested resources for the Redis® Sentinel containers | `{}` |
|
||||||
| `sentinel.containerSecurityContext.enabled` | Enabled Redis® Sentinel containers' Security Context | `true` |
|
| `sentinel.containerSecurityContext.enabled` | Enabled Redis® Sentinel containers' Security Context | `true` |
|
||||||
| `sentinel.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `sentinel.containerSecurityContext.runAsUser` | Set Redis® Sentinel containers' Security Context runAsUser | `1001` |
|
| `sentinel.containerSecurityContext.runAsUser` | Set Redis® Sentinel containers' Security Context runAsUser | `1001` |
|
||||||
| `sentinel.containerSecurityContext.runAsGroup` | Set Redis® Sentinel containers' Security Context runAsGroup | `0` |
|
| `sentinel.containerSecurityContext.runAsGroup` | Set Redis® Sentinel containers' Security Context runAsGroup | `0` |
|
||||||
| `sentinel.containerSecurityContext.runAsNonRoot` | Set Redis® Sentinel containers' Security Context runAsNonRoot | `true` |
|
| `sentinel.containerSecurityContext.runAsNonRoot` | Set Redis® Sentinel containers' Security Context runAsNonRoot | `true` |
|
||||||
| `sentinel.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context read-only root filesystem | `false` |
|
|
||||||
| `sentinel.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation | `false` |
|
| `sentinel.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® Sentinel containers' Security Context allowPrivilegeEscalation | `false` |
|
||||||
| `sentinel.containerSecurityContext.seccompProfile.type` | Set Redis® Sentinel containers' Security Context seccompProfile | `RuntimeDefault` |
|
| `sentinel.containerSecurityContext.seccompProfile.type` | Set Redis® Sentinel containers' Security Context seccompProfile | `RuntimeDefault` |
|
||||||
| `sentinel.containerSecurityContext.capabilities.drop` | Set Redis® Sentinel containers' Security Context capabilities to drop | `["ALL"]` |
|
| `sentinel.containerSecurityContext.capabilities.drop` | Set Redis® Sentinel containers' Security Context capabilities to drop | `["ALL"]` |
|
||||||
@@ -453,9 +436,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `sentinel.service.externalTrafficPolicy` | Redis® Sentinel service external traffic policy | `Cluster` |
|
| `sentinel.service.externalTrafficPolicy` | Redis® Sentinel service external traffic policy | `Cluster` |
|
||||||
| `sentinel.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
|
| `sentinel.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
|
||||||
| `sentinel.service.clusterIP` | Redis® Sentinel service Cluster IP | `""` |
|
| `sentinel.service.clusterIP` | Redis® Sentinel service Cluster IP | `""` |
|
||||||
| `sentinel.service.createMaster` | Enable master service pointing to the current master (experimental) | `false` |
|
|
||||||
| `sentinel.service.loadBalancerIP` | Redis® Sentinel service Load Balancer IP | `""` |
|
| `sentinel.service.loadBalancerIP` | Redis® Sentinel service Load Balancer IP | `""` |
|
||||||
| `sentinel.service.loadBalancerClass` | sentinel service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific) | `""` |
|
|
||||||
| `sentinel.service.loadBalancerSourceRanges` | Redis® Sentinel service Load Balancer sources | `[]` |
|
| `sentinel.service.loadBalancerSourceRanges` | Redis® Sentinel service Load Balancer sources | `[]` |
|
||||||
| `sentinel.service.annotations` | Additional custom annotations for Redis® Sentinel service | `{}` |
|
| `sentinel.service.annotations` | Additional custom annotations for Redis® Sentinel service | `{}` |
|
||||||
| `sentinel.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
|
| `sentinel.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` |
|
||||||
@@ -468,9 +449,8 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` |
|
| `serviceBindings.enabled` | Create secret for service binding (Experimental) | `false` |
|
||||||
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `true` |
|
| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` |
|
||||||
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
|
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
|
||||||
| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` |
|
|
||||||
| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` |
|
| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` |
|
||||||
| `networkPolicy.extraEgress` | Add extra egress rules to the NetworkPolicy | `[]` |
|
| `networkPolicy.extraEgress` | Add extra egress rules to the NetworkPolicy | `[]` |
|
||||||
| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` |
|
| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` |
|
||||||
@@ -484,7 +464,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `rbac.rules` | Custom RBAC rules to set | `[]` |
|
| `rbac.rules` | Custom RBAC rules to set | `[]` |
|
||||||
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
|
||||||
| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
|
| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` |
|
||||||
| `serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `false` |
|
| `serviceAccount.automountServiceAccountToken` | Whether to auto mount the service account token | `true` |
|
||||||
| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
|
| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` |
|
||||||
| `pdb.create` | Specifies whether a PodDisruptionBudget should be created | `false` |
|
| `pdb.create` | Specifies whether a PodDisruptionBudget should be created | `false` |
|
||||||
| `pdb.minAvailable` | Min number of pods that must still be available after the eviction | `1` |
|
| `pdb.minAvailable` | Min number of pods that must still be available after the eviction | `1` |
|
||||||
@@ -502,14 +482,13 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
### Metrics Parameters
|
### Metrics Parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
|
| ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | -------------------------------- |
|
||||||
| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` |
|
| `metrics.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` |
|
||||||
| `metrics.image.registry` | Redis® Exporter image registry | `REGISTRY_NAME` |
|
| `metrics.image.registry` | Redis® Exporter image registry | `REGISTRY_NAME` |
|
||||||
| `metrics.image.repository` | Redis® Exporter image repository | `REPOSITORY_NAME/redis-exporter` |
|
| `metrics.image.repository` | Redis® Exporter image repository | `REPOSITORY_NAME/redis-exporter` |
|
||||||
| `metrics.image.digest` | Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
| `metrics.image.digest` | Redis® Exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||||
| `metrics.image.pullPolicy` | Redis® Exporter image pull policy | `IfNotPresent` |
|
| `metrics.image.pullPolicy` | Redis® Exporter image pull policy | `IfNotPresent` |
|
||||||
| `metrics.image.pullSecrets` | Redis® Exporter image pull secrets | `[]` |
|
| `metrics.image.pullSecrets` | Redis® Exporter image pull secrets | `[]` |
|
||||||
| `metrics.containerPorts.http` | Metrics HTTP container port | `9121` |
|
|
||||||
| `metrics.startupProbe.enabled` | Enable startupProbe on Redis® replicas nodes | `false` |
|
| `metrics.startupProbe.enabled` | Enable startupProbe on Redis® replicas nodes | `false` |
|
||||||
| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
|
| `metrics.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` |
|
||||||
| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
|
| `metrics.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` |
|
||||||
@@ -536,31 +515,26 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `metrics.extraArgs` | Extra arguments for Redis® exporter, for example: | `{}` |
|
| `metrics.extraArgs` | Extra arguments for Redis® exporter, for example: | `{}` |
|
||||||
| `metrics.extraEnvVars` | Array with extra environment variables to add to Redis® exporter | `[]` |
|
| `metrics.extraEnvVars` | Array with extra environment variables to add to Redis® exporter | `[]` |
|
||||||
| `metrics.containerSecurityContext.enabled` | Enabled Redis® exporter containers' Security Context | `true` |
|
| `metrics.containerSecurityContext.enabled` | Enabled Redis® exporter containers' Security Context | `true` |
|
||||||
| `metrics.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `metrics.containerSecurityContext.runAsUser` | Set Redis® exporter containers' Security Context runAsUser | `1001` |
|
| `metrics.containerSecurityContext.runAsUser` | Set Redis® exporter containers' Security Context runAsUser | `1001` |
|
||||||
| `metrics.containerSecurityContext.runAsGroup` | Set Redis® exporter containers' Security Context runAsGroup | `0` |
|
| `metrics.containerSecurityContext.runAsGroup` | Set Redis® exporter containers' Security Context runAsGroup | `0` |
|
||||||
| `metrics.containerSecurityContext.runAsNonRoot` | Set Redis® exporter containers' Security Context runAsNonRoot | `true` |
|
| `metrics.containerSecurityContext.runAsNonRoot` | Set Redis® exporter containers' Security Context runAsNonRoot | `true` |
|
||||||
| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® exporter containers' Security Context allowPrivilegeEscalation | `false` |
|
| `metrics.containerSecurityContext.allowPrivilegeEscalation` | Set Redis® exporter containers' Security Context allowPrivilegeEscalation | `false` |
|
||||||
| `metrics.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context read-only root filesystem | `false` |
|
|
||||||
| `metrics.containerSecurityContext.seccompProfile.type` | Set Redis® exporter containers' Security Context seccompProfile | `RuntimeDefault` |
|
| `metrics.containerSecurityContext.seccompProfile.type` | Set Redis® exporter containers' Security Context seccompProfile | `RuntimeDefault` |
|
||||||
| `metrics.containerSecurityContext.capabilities.drop` | Set Redis® exporter containers' Security Context capabilities to drop | `["ALL"]` |
|
| `metrics.containerSecurityContext.capabilities.drop` | Set Redis® exporter containers' Security Context capabilities to drop | `["ALL"]` |
|
||||||
| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® metrics sidecar | `[]` |
|
| `metrics.extraVolumes` | Optionally specify extra list of additional volumes for the Redis® metrics sidecar | `[]` |
|
||||||
| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar | `[]` |
|
| `metrics.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar | `[]` |
|
||||||
| `metrics.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). | `none` |
|
| `metrics.resources.limits` | The resources limits for the Redis® exporter container | `{}` |
|
||||||
| `metrics.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
|
| `metrics.resources.requests` | The requested resources for the Redis® exporter container | `{}` |
|
||||||
| `metrics.podLabels` | Extra labels for Redis® exporter pods | `{}` |
|
| `metrics.podLabels` | Extra labels for Redis® exporter pods | `{}` |
|
||||||
| `metrics.podAnnotations` | Annotations for Redis® exporter pods | `{}` |
|
| `metrics.podAnnotations` | Annotations for Redis® exporter pods | `{}` |
|
||||||
| `metrics.service.enabled` | Create Service resource(s) for scraping metrics using PrometheusOperator ServiceMonitor, can be disabled when using a PodMonitor | `true` |
|
|
||||||
| `metrics.service.type` | Redis® exporter service type | `ClusterIP` |
|
| `metrics.service.type` | Redis® exporter service type | `ClusterIP` |
|
||||||
| `metrics.service.ports.http` | Redis® exporter service port | `9121` |
|
| `metrics.service.port` | Redis® exporter service port | `9121` |
|
||||||
| `metrics.service.externalTrafficPolicy` | Redis® exporter service external traffic policy | `Cluster` |
|
| `metrics.service.externalTrafficPolicy` | Redis® exporter service external traffic policy | `Cluster` |
|
||||||
| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
|
| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` |
|
||||||
| `metrics.service.loadBalancerIP` | Redis® exporter service Load Balancer IP | `""` |
|
| `metrics.service.loadBalancerIP` | Redis® exporter service Load Balancer IP | `""` |
|
||||||
| `metrics.service.loadBalancerClass` | exporter service Load Balancer class if service type is `LoadBalancer` (optional, cloud specific) | `""` |
|
|
||||||
| `metrics.service.loadBalancerSourceRanges` | Redis® exporter service Load Balancer sources | `[]` |
|
| `metrics.service.loadBalancerSourceRanges` | Redis® exporter service Load Balancer sources | `[]` |
|
||||||
| `metrics.service.annotations` | Additional custom annotations for Redis® exporter service | `{}` |
|
| `metrics.service.annotations` | Additional custom annotations for Redis® exporter service | `{}` |
|
||||||
| `metrics.service.clusterIP` | Redis® exporter service Cluster IP | `""` |
|
| `metrics.service.clusterIP` | Redis® exporter service Cluster IP | `""` |
|
||||||
| `metrics.serviceMonitor.port` | the service port to scrape metrics from | `http-metrics` |
|
|
||||||
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | `false` |
|
| `metrics.serviceMonitor.enabled` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | `false` |
|
||||||
| `metrics.serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created | `""` |
|
| `metrics.serviceMonitor.namespace` | The namespace in which the ServiceMonitor will be created | `""` |
|
||||||
| `metrics.serviceMonitor.interval` | The interval at which metrics should be scraped | `30s` |
|
| `metrics.serviceMonitor.interval` | The interval at which metrics should be scraped | `30s` |
|
||||||
@@ -572,8 +546,6 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `metrics.serviceMonitor.podTargetLabels` | Labels from the Kubernetes pod to be transferred to the created metrics | `[]` |
|
| `metrics.serviceMonitor.podTargetLabels` | Labels from the Kubernetes pod to be transferred to the created metrics | `[]` |
|
||||||
| `metrics.serviceMonitor.sampleLimit` | Limit of how many samples should be scraped from every Pod | `false` |
|
| `metrics.serviceMonitor.sampleLimit` | Limit of how many samples should be scraped from every Pod | `false` |
|
||||||
| `metrics.serviceMonitor.targetLimit` | Limit of how many targets should be scraped | `false` |
|
| `metrics.serviceMonitor.targetLimit` | Limit of how many targets should be scraped | `false` |
|
||||||
| `metrics.serviceMonitor.additionalEndpoints` | Additional endpoints to scrape (e.g sentinel) | `[]` |
|
|
||||||
| `metrics.podMonitor.port` | the pod port to scrape metrics from | `metrics` |
|
|
||||||
| `metrics.podMonitor.enabled` | Create PodMonitor resource(s) for scraping metrics using PrometheusOperator | `false` |
|
| `metrics.podMonitor.enabled` | Create PodMonitor resource(s) for scraping metrics using PrometheusOperator | `false` |
|
||||||
| `metrics.podMonitor.namespace` | The namespace in which the PodMonitor will be created | `""` |
|
| `metrics.podMonitor.namespace` | The namespace in which the PodMonitor will be created | `""` |
|
||||||
| `metrics.podMonitor.interval` | The interval at which metrics should be scraped | `30s` |
|
| `metrics.podMonitor.interval` | The interval at which metrics should be scraped | `30s` |
|
||||||
@@ -585,7 +557,6 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `metrics.podMonitor.podTargetLabels` | Labels from the Kubernetes pod to be transferred to the created metrics | `[]` |
|
| `metrics.podMonitor.podTargetLabels` | Labels from the Kubernetes pod to be transferred to the created metrics | `[]` |
|
||||||
| `metrics.podMonitor.sampleLimit` | Limit of how many samples should be scraped from every Pod | `false` |
|
| `metrics.podMonitor.sampleLimit` | Limit of how many samples should be scraped from every Pod | `false` |
|
||||||
| `metrics.podMonitor.targetLimit` | Limit of how many targets should be scraped | `false` |
|
| `metrics.podMonitor.targetLimit` | Limit of how many targets should be scraped | `false` |
|
||||||
| `metrics.podMonitor.additionalEndpoints` | Additional endpoints to scrape (e.g sentinel) | `[]` |
|
|
||||||
| `metrics.prometheusRule.enabled` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | `false` |
|
| `metrics.prometheusRule.enabled` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | `false` |
|
||||||
| `metrics.prometheusRule.namespace` | The namespace in which the prometheusRule will be created | `""` |
|
| `metrics.prometheusRule.namespace` | The namespace in which the prometheusRule will be created | `""` |
|
||||||
| `metrics.prometheusRule.additionalLabels` | Additional labels for the prometheusRule | `{}` |
|
| `metrics.prometheusRule.additionalLabels` | Additional labels for the prometheusRule | `{}` |
|
||||||
@@ -594,25 +565,16 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
### Init Container Parameters
|
### Init Container Parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ----------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- |
|
| ------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | -------------------------- |
|
||||||
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
|
| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` |
|
||||||
| `volumePermissions.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` |
|
| `volumePermissions.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` |
|
||||||
| `volumePermissions.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` |
|
| `volumePermissions.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` |
|
||||||
| `volumePermissions.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
| `volumePermissions.image.digest` | OS Shell + Utility image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
||||||
| `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` |
|
| `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` |
|
||||||
| `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` |
|
| `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` |
|
||||||
| `volumePermissions.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). | `none` |
|
| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` |
|
||||||
| `volumePermissions.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
|
| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` |
|
||||||
| `volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `nil` |
|
|
||||||
| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` |
|
| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` |
|
||||||
| `kubectl.image.registry` | Kubectl image registry | `REGISTRY_NAME` |
|
|
||||||
| `kubectl.image.repository` | Kubectl image repository | `REPOSITORY_NAME/kubectl` |
|
|
||||||
| `kubectl.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` |
|
|
||||||
| `kubectl.image.pullPolicy` | Kubectl image pull policy | `IfNotPresent` |
|
|
||||||
| `kubectl.image.pullSecrets` | Kubectl pull secrets | `[]` |
|
|
||||||
| `kubectl.command` | kubectl command to execute | `["/opt/bitnami/scripts/kubectl-scripts/update-master-label.sh"]` |
|
|
||||||
| `kubectl.resources.limits` | The resources limits for the kubectl containers | `{}` |
|
|
||||||
| `kubectl.resources.requests` | The requested resources for the kubectl containers | `{}` |
|
|
||||||
| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` |
|
| `sysctl.enabled` | Enable init container to modify Kernel settings | `false` |
|
||||||
| `sysctl.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` |
|
| `sysctl.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` |
|
||||||
| `sysctl.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` |
|
| `sysctl.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` |
|
||||||
@@ -621,8 +583,8 @@ The command removes all the Kubernetes components associated with the chart and
|
|||||||
| `sysctl.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` |
|
| `sysctl.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` |
|
||||||
| `sysctl.command` | Override default init-sysctl container command (useful when using custom images) | `[]` |
|
| `sysctl.command` | Override default init-sysctl container command (useful when using custom images) | `[]` |
|
||||||
| `sysctl.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` |
|
| `sysctl.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` |
|
||||||
| `sysctl.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if sysctl.resources is set (sysctl.resources is recommended for production). | `none` |
|
| `sysctl.resources.limits` | The resources limits for the init container | `{}` |
|
||||||
| `sysctl.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` |
|
| `sysctl.resources.requests` | The requested resources for the init container | `{}` |
|
||||||
|
|
||||||
### useExternalDNS Parameters
|
### useExternalDNS Parameters
|
||||||
|
|
||||||
@@ -654,17 +616,11 @@ helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/redis
|
|||||||
```
|
```
|
||||||
|
|
||||||
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
|
> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`.
|
||||||
> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/redis/values.yaml)
|
> **Tip**: You can use the default [values.yaml](values.yaml)
|
||||||
|
|
||||||
## Configuration and installation details
|
## Configuration and installation details
|
||||||
|
|
||||||
### Resource requests and limits
|
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
|
||||||
|
|
||||||
Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case.
|
|
||||||
|
|
||||||
To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcePreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
|
|
||||||
|
|
||||||
### [Rolling VS Immutable tags](https://docs.bitnami.com/tutorials/understand-rolling-tags-containers)
|
|
||||||
|
|
||||||
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
|
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
|
||||||
|
|
||||||
@@ -672,7 +628,7 @@ Bitnami will release a new chart updating its containers if a new version of the
|
|||||||
|
|
||||||
### Use a different Redis® version
|
### Use a different Redis® version
|
||||||
|
|
||||||
To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter.
|
To modify the application version used in this chart, specify a different version of the image using the `image.tag` parameter and/or a different repository using the `image.repository` parameter. Refer to the [chart documentation for more information on these parameters and how to use them with images from a private registry](https://docs.bitnami.com/kubernetes/infrastructure/redis/configuration/change-image-version/).
|
||||||
|
|
||||||
### Bootstrapping with an External Cluster
|
### Bootstrapping with an External Cluster
|
||||||
|
|
||||||
@@ -774,27 +730,13 @@ It's recommended to only change `master.count` if you know what you are doing.
|
|||||||
|
|
||||||
### Using a password file
|
### Using a password file
|
||||||
|
|
||||||
To use a password file for Redis® you need to create a secret containing the password and then deploy the chart using that secret. Follow these instructions:
|
To use a password file for Redis® you need to create a secret containing the password and then deploy the chart using that secret.
|
||||||
|
|
||||||
- Create the secret with the password. It is important that the file with the password must be called `redis-password`.
|
Refer to the chart documentation for more information on [using a password file for Redis®](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/use-password-file/).
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl create secret generic redis-password-secret --from-file=redis-password.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
- Deploy the Helm Chart using the secret name as parameter:
|
|
||||||
|
|
||||||
```text
|
|
||||||
usePassword=true
|
|
||||||
usePasswordFile=true
|
|
||||||
existingSecret=redis-password-secret
|
|
||||||
sentinels.enabled=true
|
|
||||||
metrics.enabled=true
|
|
||||||
```
|
|
||||||
|
|
||||||
### Securing traffic using TLS
|
### Securing traffic using TLS
|
||||||
|
|
||||||
TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the cluster:
|
TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart:
|
||||||
|
|
||||||
- `tls.enabled`: Enable TLS support. Defaults to `false`
|
- `tls.enabled`: Enable TLS support. Defaults to `false`
|
||||||
- `tls.existingSecret`: Name of the secret that contains the certificates. No defaults.
|
- `tls.existingSecret`: Name of the secret that contains the certificates. No defaults.
|
||||||
@@ -802,23 +744,7 @@ TLS support can be enabled in the chart by specifying the `tls.` parameters whil
|
|||||||
- `tls.certKeyFilename`: Certificate key filename. No defaults.
|
- `tls.certKeyFilename`: Certificate key filename. No defaults.
|
||||||
- `tls.certCAFilename`: CA Certificate filename. No defaults.
|
- `tls.certCAFilename`: CA Certificate filename. No defaults.
|
||||||
|
|
||||||
For example:
|
Refer to the chart documentation for more information on [creating the secret and a TLS deployment example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-tls/).
|
||||||
|
|
||||||
First, create the secret with the certificates files:
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem
|
|
||||||
```
|
|
||||||
|
|
||||||
Then, use the following parameters:
|
|
||||||
|
|
||||||
```console
|
|
||||||
tls.enabled="true"
|
|
||||||
tls.existingSecret="certificates-tls-secret"
|
|
||||||
tls.certFilename="cert.pem"
|
|
||||||
tls.certKeyFilename="cert.key"
|
|
||||||
tls.certCAFilename="ca.pem"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Metrics
|
### Metrics
|
||||||
|
|
||||||
@@ -834,65 +760,11 @@ tls-client-cert-file
|
|||||||
tls-ca-cert-file
|
tls-ca-cert-file
|
||||||
```
|
```
|
||||||
|
|
||||||
### Deploy a custom metrics script in the sidecar
|
|
||||||
|
|
||||||
A custom Lua script can be added to the `redis-exporter` sidecar by way of the `metrics.extraArgs.script` parameter. The pathname of the script must exist on the container, or the `redis_exporter` process (and therefore the whole pod) will refuse to start. The script can be provided to the sidecar containers via the `metrics.extraVolumes` and `metrics.extraVolumeMounts` parameters:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
metrics:
|
|
||||||
extraVolumeMounts:
|
|
||||||
- name: '{{ printf "%s-metrics-script-file" (include "common.names.fullname" .) }}'
|
|
||||||
mountPath: '{{ printf "/mnt/%s/" (include "common.names.name" .) }}'
|
|
||||||
readOnly: true
|
|
||||||
extraVolumes:
|
|
||||||
- name: '{{ printf "%s-metrics-script-file" (include "common.names.fullname" .) }}'
|
|
||||||
configMap:
|
|
||||||
name: '{{ printf "%s-metrics-script" (include "common.names.fullname" .) }}'
|
|
||||||
extraArgs:
|
|
||||||
script: '{{ printf "/mnt/%s/my_custom_metrics.lua" (include "common.names.name" .) }}'
|
|
||||||
```
|
|
||||||
|
|
||||||
Then deploy the script into the correct location via `extraDeploy`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
extraDeploy:
|
|
||||||
- apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: '{{ printf "%s-metrics-script" (include "common.names.fullname" .) }}'
|
|
||||||
data:
|
|
||||||
my_custom_metrics.lua: |
|
|
||||||
-- LUA SCRIPT CODE HERE, e.g.,
|
|
||||||
return {'bitnami_makes_the_best_charts', '1'}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Host Kernel Settings
|
### Host Kernel Settings
|
||||||
|
|
||||||
Redis® may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. To do so, you can set up a privileged `initContainer` with the `sysctlImage` config values, for example:
|
Redis® may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages.
|
||||||
|
|
||||||
```yaml
|
Refer to the chart documentation for more information on [configuring host kernel settings with an example](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/configure-kernel-settings/).
|
||||||
sysctlImage:
|
|
||||||
enabled: true
|
|
||||||
mountHostSys: true
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- -c
|
|
||||||
- |-
|
|
||||||
install_packages procps
|
|
||||||
sysctl -w net.core.somaxconn=10000
|
|
||||||
echo never > /host-sys/kernel/mm/transparent_hugepage/enabled
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure `sysctls` for master and slave pods. Example:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
securityContext:
|
|
||||||
sysctls:
|
|
||||||
- name: net.core.somaxconn
|
|
||||||
value: "10000"
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that this will not disable transparent huge tables.
|
|
||||||
|
|
||||||
## Persistence
|
## Persistence
|
||||||
|
|
||||||
@@ -912,115 +784,13 @@ helm install my-release --set master.persistence.existingClaim=PVC_NAME oci://RE
|
|||||||
|
|
||||||
## Backup and restore
|
## Backup and restore
|
||||||
|
|
||||||
To backup and restore Redis deployments on Kubernetes, you will need to create a snapshot of the data in the source cluster, and later restore it in a new cluster with the new parameters. Follow the instructions below:
|
Refer to the chart documentation for more information on [backing up and restoring Redis® deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/backup-restore/).
|
||||||
|
|
||||||
### Step 1: Backup the deployment
|
|
||||||
|
|
||||||
- Connect to one of the nodes and start the Redis CLI tool. Then, run the commands below:
|
|
||||||
|
|
||||||
```text
|
|
||||||
$ kubectl exec -it my-release-master-0 bash
|
|
||||||
$ redis-cli
|
|
||||||
127.0.0.1:6379> auth your_current_redis_password
|
|
||||||
OK
|
|
||||||
127.0.0.1:6379> save
|
|
||||||
OK
|
|
||||||
```
|
|
||||||
|
|
||||||
- Copy the dump file from the Redis node:
|
|
||||||
|
|
||||||
```console
|
|
||||||
kubectl cp my-release-master-0:/data/dump.rdb dump.rdb -c redis
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 2: Restore the data on the destination cluster
|
|
||||||
|
|
||||||
To restore the data in a new cluster, you will need to create a PVC and then upload the *dump.rdb* file to the new volume.
|
|
||||||
|
|
||||||
Follow the following steps:
|
|
||||||
|
|
||||||
- In the [*values.yaml*](https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml) file set the *appendonly* parameter to *no*. You can skip this step if it is already configured as *no*
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
commonConfiguration: |-
|
|
||||||
# Enable AOF https://redis.io/topics/persistence#append-only-file
|
|
||||||
appendonly no
|
|
||||||
# Disable RDB persistence, AOF persistence already enabled.
|
|
||||||
save ""
|
|
||||||
```
|
|
||||||
|
|
||||||
> *Note that the `Enable AOF` comment belongs to the original config file and what you're actually doing is disabling it. This change will only be neccessary for the temporal cluster you're creating to upload the dump.*
|
|
||||||
|
|
||||||
- Start the new cluster to create the PVCs. Use the command below as an example:
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3
|
|
||||||
```
|
|
||||||
|
|
||||||
- Now that the PVC were created, stop it and copy the *dump.rdp* file on the persisted data by using a helping pod.
|
|
||||||
|
|
||||||
```text
|
|
||||||
$ helm delete new-redis
|
|
||||||
|
|
||||||
$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides='
|
|
||||||
{
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"kind": "Pod",
|
|
||||||
"metadata": {
|
|
||||||
"name": "redisvolpod"
|
|
||||||
},
|
|
||||||
"spec": {
|
|
||||||
"containers": [{
|
|
||||||
"command": [
|
|
||||||
"tail",
|
|
||||||
"-f",
|
|
||||||
"/dev/null"
|
|
||||||
],
|
|
||||||
"image": "bitnami/minideb",
|
|
||||||
"name": "mycontainer",
|
|
||||||
"volumeMounts": [{
|
|
||||||
"mountPath": "/mnt",
|
|
||||||
"name": "redisdata"
|
|
||||||
}]
|
|
||||||
}],
|
|
||||||
"restartPolicy": "Never",
|
|
||||||
"volumes": [{
|
|
||||||
"name": "redisdata",
|
|
||||||
"persistentVolumeClaim": {
|
|
||||||
"claimName": "redis-data-new-redis-master-0"
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}' --image="bitnami/minideb"
|
|
||||||
|
|
||||||
$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb
|
|
||||||
$ kubectl delete pod volpod
|
|
||||||
```
|
|
||||||
|
|
||||||
- Restart the cluster:
|
|
||||||
|
|
||||||
> **INFO:** The *appendonly* parameter can be safely restored to your desired value.
|
|
||||||
|
|
||||||
```console
|
|
||||||
helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3
|
|
||||||
```
|
|
||||||
|
|
||||||
## NetworkPolicy
|
## NetworkPolicy
|
||||||
|
|
||||||
To enable network policy for Redis®, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
|
To enable network policy for Redis®, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`.
|
||||||
|
|
||||||
With NetworkPolicy enabled, only pods with the generated client label will be able to connect to Redis. This label will be displayed in the output after a successful install.
|
Refer to the chart documenation for more information on [enabling the network policy in Redis® deployments](https://docs.bitnami.com/kubernetes/infrastructure/redis/administration/enable-network-policy/).
|
||||||
|
|
||||||
With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to Redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
networkPolicy:
|
|
||||||
enabled: true
|
|
||||||
ingressNSMatchLabels:
|
|
||||||
redis: external
|
|
||||||
ingressNSPodMatchLabels:
|
|
||||||
redis-client: true
|
|
||||||
```
|
|
||||||
|
|
||||||
### Setting Pod's affinity
|
### Setting Pod's affinity
|
||||||
|
|
||||||
@@ -1094,7 +864,7 @@ The Redis® sentinel exporter was removed in this version because the upstrea
|
|||||||
- `sentinel.metrics.*` parameters are deprecated in favor of `metrics.sentinel.*` ones.
|
- `sentinel.metrics.*` parameters are deprecated in favor of `metrics.sentinel.*` ones.
|
||||||
- New parameters to add custom command, environment variables, sidecars, init containers, etc. were added.
|
- New parameters to add custom command, environment variables, sidecars, init containers, etc. were added.
|
||||||
- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels).
|
- Chart labels were adapted to follow the [Helm charts standard labels](https://helm.sh/docs/chart_best_practices/labels/#standard-labels).
|
||||||
- values.yaml metadata was adapted to follow the format supported by [Readme Generator for Helm](https://github.com/bitnami/readme-generator-for-helm).
|
- values.yaml metadata was adapted to follow the format supported by [Readme Generator for Helm](https://github.com/bitnami-labs/readme-generator-for-helm).
|
||||||
|
|
||||||
Consequences:
|
Consequences:
|
||||||
|
|
||||||
@@ -1234,7 +1004,7 @@ kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remo
|
|||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Copyright © 2024 Broadcom. The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
|
Copyright © 2023 VMware, Inc.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user