Compare commits

..

1 Commits

Author SHA1 Message Date
Andrei Kvapil
9ea27ef88e Update piraeus-operator and LINSTOR v2.4.1 2024-03-15 21:02:48 +01:00
370 changed files with 9512 additions and 120060 deletions

View File

@@ -33,7 +33,7 @@ You can use Cozystack as Kubernetes distribution for Bare Metal
## Documentation
The documentation is located on official [cozystack.io](https://cozystack.io) website.
The documentation is located on official [cozystack.io](cozystack.io) website.
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
@@ -44,8 +44,6 @@ If you encounter any difficulties, start with the [troubleshooting guide](https:
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/aenix-io/cozystack/releases) section.
- [Roadmap](https://github.com/orgs/aenix-io/projects/2)
## Contributions
Contributions are highly appreciated and very welcomed!

19
hack/prepare_release.sh Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/sh
set -e
if [ -e $1 ]; then
echo "Please pass version in the first argument"
echo "Example: $0 v0.0.2"
exit 1
fi
version=$1
talos_version=$(awk '/^version:/ {print $2}' packages/core/installer/images/talos/profiles/installer.yaml)
set -x
sed -i "/^TAG / s|=.*|= ${version}|" \
packages/apps/http-cache/Makefile \
packages/apps/kubernetes/Makefile \
packages/core/installer/Makefile \
packages/system/dashboard/Makefile

View File

@@ -15,6 +15,13 @@ metadata:
namespace: cozy-system
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cozystack
namespace: cozy-system
---
# Source: cozy-installer/templates/cozystack.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@@ -63,7 +70,7 @@ spec:
serviceAccountName: cozystack
containers:
- name: cozystack
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.3.1"
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.1.0"
env:
- name: KUBERNETES_SERVICE_HOST
value: localhost
@@ -82,7 +89,7 @@ spec:
fieldRef:
fieldPath: metadata.name
- name: darkhttpd
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.3.1"
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.1.0"
command:
- /usr/bin/darkhttpd
- /cozystack/assets
@@ -95,6 +102,3 @@ spec:
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoSchedule"
- key: "node.cilium.io/agent-not-ready"
operator: "Exists"
effect: "NoSchedule"

View File

@@ -1,25 +0,0 @@
apiVersion: v2
name: clickhouse
description: Managed ClickHouse service
icon: https://cdn.worldvectorlogo.com/logos/clickhouse.svg
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -1,36 +0,0 @@
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallation"
metadata:
name: "{{ .Release.Name }}"
spec:
{{- with .Values.size }}
defaults:
templates:
dataVolumeClaimTemplate: data-volume-template
{{- end }}
configuration:
{{- with .Values.users }}
users:
{{- range $name, $u := . }}
{{ $name }}/password_sha256_hex: {{ sha256sum $u.password }}
{{ $name }}/profile: {{ ternary "readonly" "default" (index $u "readonly" | default false) }}
{{- end }}
{{- end }}
profiles:
readonly/readonly: "1"
clusters:
- name: "clickhouse"
layout:
shardsCount: 1
replicasCount: 2
{{- with .Values.size }}
templates:
volumeClaimTemplates:
- name: data-volume-template
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ . }}
{{- end }}

View File

@@ -1,8 +0,0 @@
size: 10Gi
users:
user1:
password: strongpassword
user2:
readonly: true
password: hackme

View File

@@ -1,20 +1,22 @@
PUSH := 1
LOAD := 0
REGISTRY := ghcr.io/aenix-io/cozystack
NGINX_CACHE_TAG = v0.1.0
include ../../../scripts/common-envs.mk
TAG := v0.1.0
image: image-nginx
image-nginx:
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/nginx-cache \
--provenance false \
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)) \
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:latest \
--tag $(REGISTRY)/nginx-cache:$(NGINX_CACHE_TAG) \
--tag $(REGISTRY)/nginx-cache:$(NGINX_CACHE_TAG)-$(TAG) \
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:$(NGINX_CACHE_TAG) \
--cache-to type=inline \
--metadata-file images/nginx-cache.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))" > images/nginx-cache.tag
echo "$(REGISTRY)/nginx-cache:$(NGINX_CACHE_TAG)" > images/nginx-cache.tag
update:
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/chrislim2888/IP2Location-C-Library | awk -F'[/^]' 'END{print $$3}') && \

View File

@@ -1,4 +1,4 @@
{
"containerimage.config.digest": "sha256:e406d5ac59cc06bbab51e16ae9a520143ad4f54952ef8f8cca982dc89454d616",
"containerimage.digest": "sha256:08e5063e65d2adc17278abee0ab43ce31cf37bc9bc7eb7988ef16f1f1c459862"
"containerimage.config.digest": "sha256:318fd8d0d6f6127387042f6ad150e87023d1961c7c5059dd5324188a54b0ab4e",
"containerimage.digest": "sha256:e3cf145238e6e45f7f13b9acaea445c94ff29f76a34ba9fa50828401a5a3cc68"
}

View File

@@ -1,25 +0,0 @@
apiVersion: v2
name: kafka
description: Managed Kafka service
icon: https://upload.wikimedia.org/wikipedia/commons/0/05/Apache_kafka.svg
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -1,53 +0,0 @@
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: {{ .Release.Name }}
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
kafka:
replicas: 3
listeners:
- name: plain
port: 9092
type: internal
tls: false
- name: tls
port: 9093
type: internal
tls: true
- name: external
port: 9094
{{- if .Values.external }}
type: loadbalancer
{{- else }}
type: internal
{{- end }}
tls: false
config:
offsets.topic.replication.factor: 3
transaction.state.log.replication.factor: 3
transaction.state.log.min.isr: 2
default.replication.factor: 3
min.insync.replicas: 2
storage:
type: jbod
volumes:
- id: 0
type: persistent-claim
{{- with .Values.kafka.size }}
size: {{ . }}
{{- end }}
deleteClaim: true
zookeeper:
replicas: 3
storage:
type: persistent-claim
{{- with .Values.zookeeper.size }}
size: {{ . }}
{{- end }}
deleteClaim: false
entityOperator:
topicOperator: {}
userOperator: {}

View File

@@ -1,17 +0,0 @@
{{- range $topic := .Values.topics }}
---
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaTopic
metadata:
name: "{{ $.Release.Name }}-{{ kebabcase $topic.name }}"
labels:
strimzi.io/cluster: "{{ $.Release.Name }}"
spec:
topicName: "{{ $topic.name }}"
partitions: 10
replicas: 3
{{- with $topic.config }}
config:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -1,20 +0,0 @@
external: false
kafka:
size: 10Gi
zookeeper:
size: 5Gi
topics:
- name: Results
partitions: 1
replicas: 3
config:
min.insync.replicas: 2
- name: Orders
config:
cleanup.policy: compact
segment.ms: 3600000
max.compaction.lag.ms: 5400000
min.insync.replicas: 2
partitions: 1
replicationFactor: 3

View File

@@ -1,17 +1,19 @@
PUSH := 1
LOAD := 0
REGISTRY := ghcr.io/aenix-io/cozystack
TAG := v0.1.0
UBUNTU_CONTAINER_DISK_TAG = v1.29.1
include ../../../scripts/common-envs.mk
image: image-ubuntu-container-disk
image-ubuntu-container-disk:
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/ubuntu-container-disk \
--provenance false \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)) \
--tag $(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:latest \
--tag $(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG) \
--tag $(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG)-$(TAG) \
--cache-from type=registry,ref=$(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG) \
--cache-to type=inline \
--metadata-file images/ubuntu-container-disk.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/ubuntu-container-disk:$(call settag,$(UBUNTU_CONTAINER_DISK_TAG))" > images/ubuntu-container-disk.tag
echo "$(REGISTRY)/ubuntu-container-disk:$(UBUNTU_CONTAINER_DISK_TAG)" > images/ubuntu-container-disk.tag

View File

@@ -1,4 +1,4 @@
{
"containerimage.config.digest": "sha256:62baab666445d76498fb14cc1d0865fc82e4bdd5cb1d7ba80475dc5024184622",
"containerimage.digest": "sha256:9363d717f966f4e7927da332eaaf17401b42203a2fcb493b428f94d096dae3a5"
"containerimage.config.digest": "sha256:ee8968be63c7c45621ec45f3687211e0875acb24e8d9784e8d2ebcbf46a3538c",
"containerimage.digest": "sha256:16c3c07e74212585786dc1f1ae31d3ab90a575014806193e8e37d1d7751cb084"
}

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.2.0
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -1,7 +1,7 @@
{{- range $name := .Values.databases }}
{{ $dnsName := replace "_" "-" $name }}
---
apiVersion: k8s.mariadb.com/v1alpha1
apiVersion: mariadb.mmontes.io/v1alpha1
kind: Database
metadata:
name: {{ $.Release.Name }}-{{ $dnsName }}

View File

@@ -1,5 +1,5 @@
---
apiVersion: k8s.mariadb.com/v1alpha1
apiVersion: mariadb.mmontes.io/v1alpha1
kind: MariaDB
metadata:
name: {{ .Release.Name }}
@@ -35,9 +35,8 @@ spec:
# automaticFailover: true
metrics:
enabled: true
exporter:
image: prom/mysqld-exporter:v0.15.1
image: prom/mysqld-exporter:v0.14.0
resources:
requests:
cpu: 50m
@@ -54,10 +53,14 @@ spec:
name: {{ .Release.Name }}-my-cnf
key: config
storage:
size: {{ .Values.size }}
resizeInUseVolumes: true
waitForVolumeResize: true
volumeClaimTemplate:
resources:
requests:
storage: {{ .Values.size }}
accessModes:
- ReadWriteOnce
{{- if .Values.external }}
primaryService:

View File

@@ -2,7 +2,7 @@
{{ if not (eq $name "root") }}
{{ $dnsName := replace "_" "-" $name }}
---
apiVersion: k8s.mariadb.com/v1alpha1
apiVersion: mariadb.mmontes.io/v1alpha1
kind: User
metadata:
name: {{ $.Release.Name }}-{{ $dnsName }}
@@ -15,7 +15,7 @@ spec:
key: {{ $name }}-password
maxUserConnections: {{ $u.maxUserConnections }}
---
apiVersion: k8s.mariadb.com/v1alpha1
apiVersion: mariadb.mmontes.io/v1alpha1
kind: Grant
metadata:
name: {{ $.Release.Name }}-{{ $dnsName }}

View File

@@ -1,9 +1,6 @@
clickhouse 0.1.0 HEAD
http-cache 0.1.0 HEAD
kafka 0.1.0 HEAD
kubernetes 0.1.0 HEAD
mysql 0.1.0 f642698
mysql 0.2.0 HEAD
mysql 0.1.0 HEAD
postgres 0.1.0 HEAD
rabbitmq 0.1.0 HEAD
redis 0.1.1 HEAD

4
packages/core/Makefile Normal file
View File

@@ -0,0 +1,4 @@
gen: fix-chartnames
fix-chartnames:
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do printf "name: cozy-%s\nversion: 1.0.0\n" "$$i" > "$$i/Chart.yaml"; done

View File

@@ -1,3 +0,0 @@
apiVersion: v2
name: cozy-fluxcd
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process

View File

@@ -1,13 +0,0 @@
NAME=fluxcd
NAMESPACE=cozy-$(NAME)
API_VERSIONS_FLAGS=$(addprefix -a ,$(shell kubectl api-versions))
show:
helm template -n $(NAMESPACE) $(NAME) . --no-hooks --dry-run=server $(API_VERSIONS_FLAGS)
apply:
helm template -n $(NAMESPACE) $(NAME) . --no-hooks --dry-run=server $(API_VERSIONS_FLAGS) | kubectl apply -n $(NAMESPACE) -f-
diff:
helm template -n $(NAMESPACE) $(NAME) . --no-hooks --dry-run=server $(API_VERSIONS_FLAGS) | kubectl diff -n $(NAMESPACE) -f-

View File

@@ -1,3 +1,2 @@
apiVersion: v2
name: cozy-installer
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
version: 1.0.0

View File

@@ -1,10 +1,11 @@
NAMESPACE=cozy-installer
NAME=installer
NAMESPACE=cozy-system
PUSH := 1
LOAD := 0
REGISTRY := ghcr.io/aenix-io/cozystack
TAG := v0.1.0
TALOS_VERSION=$(shell awk '/^version:/ {print $$2}' images/talos/profiles/installer.yaml)
include ../../../scripts/common-envs.mk
show:
helm template -n $(NAMESPACE) $(NAME) .
@@ -20,40 +21,39 @@ update:
image: image-cozystack image-talos image-matchbox
image-cozystack:
make -C ../../.. repos
docker buildx build -f images/cozystack/Dockerfile ../../.. \
--provenance false \
--tag $(REGISTRY)/cozystack:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/cozystack:latest \
--tag $(REGISTRY)/cozystack:$(TAG) \
--cache-from type=registry,ref=$(REGISTRY)/cozystack:$(TAG) \
--cache-to type=inline \
--metadata-file images/cozystack.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/cozystack:$(call settag,$(TAG))" > images/cozystack.tag
echo "$(REGISTRY)/cozystack:$(TAG)" > images/cozystack.tag
image-talos:
test -f ../../../_out/assets/installer-amd64.tar || make talos-installer
docker load -i ../../../_out/assets/installer-amd64.tar
docker tag ghcr.io/siderolabs/installer:$(TALOS_VERSION) ghcr.io/aenix-io/cozystack/talos:$(call settag,$(TALOS_VERSION))
docker push ghcr.io/aenix-io/cozystack/talos:$(call settag,$(TALOS_VERSION))
docker tag ghcr.io/siderolabs/installer:$(TALOS_VERSION) ghcr.io/aenix-io/cozystack/talos:$(TALOS_VERSION)
docker push ghcr.io/aenix-io/cozystack/talos:$(TALOS_VERSION)
image-matchbox:
test -f ../../../_out/assets/kernel-amd64 || make talos-kernel
test -f ../../../_out/assets/initramfs-metal-amd64.xz || make talos-initramfs
docker buildx build -f images/matchbox/Dockerfile ../../.. \
--provenance false \
--tag $(REGISTRY)/matchbox:$(call settag,$(TAG)) \
--tag $(REGISTRY)/matchbox:$(call settag,$(TALOS_VERSION)-$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/matchbox:latest \
--tag $(REGISTRY)/matchbox:$(TAG) \
--tag $(REGISTRY)/matchbox:$(TALOS_VERSION)-$(TAG) \
--cache-from type=registry,ref=$(REGISTRY)/matchbox:$(TALOS_VERSION) \
--cache-to type=inline \
--metadata-file images/matchbox.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/matchbox:$(call settag,$(TALOS_VERSION))" > images/matchbox.tag
echo "$(REGISTRY)/matchbox:$(TALOS_VERSION)" > images/matchbox.tag
assets: talos-iso talos-nocloud
assets: talos-iso
talos-initramfs talos-kernel talos-installer talos-iso talos-nocloud:
talos-initramfs talos-kernel talos-installer talos-iso:
mkdir -p ../../../_out/assets
cat images/talos/profiles/$(subst talos-,,$@).yaml | \
docker run --rm -i -v /dev:/dev --privileged "ghcr.io/siderolabs/imager:$(TALOS_VERSION)" --tar-to-stdout - | \

View File

@@ -2,7 +2,7 @@
set -e
set -u
PROFILES="initramfs kernel iso installer nocloud"
PROFILES="initramfs kernel iso installer"
FIRMWARES="amd-ucode amdgpu-firmware bnx2-bnx2x i915-ucode intel-ice-firmware intel-ucode qlogic-firmware"
EXTENSIONS="drbd zfs"
@@ -32,14 +32,6 @@ done
for profile in $PROFILES; do
echo "writing profile images/talos/profiles/$profile.yaml"
if [ "$profile" = "nocloud" ]; then
image_options="{ diskSize: 1306525696, diskFormat: raw }"
out_format=".xz"
else
image_options="{}"
out_format="raw"
fi
cat > images/talos/profiles/$profile.yaml <<EOT
# this file generated by hack/gen-profiles.sh
# do not edit it
@@ -66,7 +58,6 @@ input:
- imageRef: ghcr.io/siderolabs/zfs:${ZFS_VERSION}
output:
kind: ${profile}
imageOptions: ${image_options}
outFormat: ${out_format}
outFormat: raw
EOT
done

View File

@@ -1,4 +1,4 @@
{
"containerimage.config.digest": "sha256:29b11ecbb92bae830f2e55cd4b6f7f3ada09b2f5514c0eeee395bd2dbd12fb81",
"containerimage.digest": "sha256:791df989ff37a76062c7c638dbfc93435df9ee0db48797f2045c80b6d6b937c0"
"containerimage.config.digest": "sha256:ec8a4983a663f06a1503507482667a206e83e0d8d3663dff60ced9221855d6b0",
"containerimage.digest": "sha256:abb7b2fbc1f143c922f2a35afc4423a74b2b63c0bddfe620750613ed835aa861"
}

View File

@@ -1 +1 @@
ghcr.io/aenix-io/cozystack/cozystack:v0.3.1
ghcr.io/aenix-io/cozystack/cozystack:v0.1.0

View File

@@ -1,4 +1,4 @@
{
"containerimage.config.digest": "sha256:d63ac434876b4e47c130e6b99f0c9657e718f9d97f522f5ccd878eab75844122",
"containerimage.digest": "sha256:9963580a02ac4ddccafb60f2411365910bcadd73f92d1c9187a278221306a4ed"
"containerimage.config.digest": "sha256:b869a6324f9c0e6d1dd48eee67cbe3842ee14efd59bdde477736ad2f90568ff7",
"containerimage.digest": "sha256:c30b237c5fa4fbbe47e1aba56e8f99569fe865620aa1953f31fc373794123cd7"
}

View File

@@ -1,27 +0,0 @@
# this file generated by hack/gen-profiles.sh
# do not edit it
arch: amd64
platform: metal
secureboot: false
version: v1.6.4
input:
kernel:
path: /usr/install/amd64/vmlinuz
initramfs:
path: /usr/install/amd64/initramfs.xz
baseInstaller:
imageRef: ghcr.io/siderolabs/installer:v1.6.4
systemExtensions:
- imageRef: ghcr.io/siderolabs/amd-ucode:20240115
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20240115
- imageRef: ghcr.io/siderolabs/bnx2-bnx2x:20240115
- imageRef: ghcr.io/siderolabs/i915-ucode:20240115
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20240115
- imageRef: ghcr.io/siderolabs/intel-ucode:20231114
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20240115
- imageRef: ghcr.io/siderolabs/drbd:9.2.6-v1.6.4
- imageRef: ghcr.io/siderolabs/zfs:2.1.14-v1.6.4
output:
kind: image
imageOptions: { diskSize: 1306525696, diskFormat: raw }
outFormat: .xz

View File

@@ -12,6 +12,12 @@ metadata:
name: cozystack
namespace: cozy-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cozystack
namespace: cozy-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@@ -76,9 +82,6 @@ spec:
- key: "node.kubernetes.io/not-ready"
operator: "Exists"
effect: "NoSchedule"
- key: "node.cilium.io/agent-not-ready"
operator: "Exists"
effect: "NoSchedule"
---
apiVersion: v1
kind: Service

View File

@@ -1,3 +1,2 @@
apiVersion: v2
name: cozy-platform
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
version: 1.0.0

View File

@@ -1,5 +1,5 @@
NAME=platform
NAMESPACE=cozy-system
NAME=platform
API_VERSIONS_FLAGS=$(addprefix -a ,$(shell kubectl api-versions))
@@ -13,7 +13,7 @@ namespaces-show:
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml
namespaces-apply:
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml | kubectl apply -n $(NAMESPACE) -f-
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml | kubectl apply -f-
diff:
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) | kubectl diff -f-
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml | kubectl diff -f-

View File

@@ -1,114 +0,0 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
releases:
- name: cilium
releaseName: cilium
chart: cozy-cilium
namespace: cozy-cilium
privileged: true
dependsOn: []
values:
cilium:
bpf:
masquerade: true
cni:
chainingMode: ~
customConf: false
configMap: ""
enableIPv4Masquerade: true
enableIdentityMark: true
ipv4NativeRoutingCIDR: "{{ index $cozyConfig.data "ipv4-pod-cidr" }}"
autoDirectNodeRoutes: true
- name: cert-manager
releaseName: cert-manager
chart: cozy-cert-manager
namespace: cozy-cert-manager
dependsOn: [cilium]
- name: cert-manager-issuers
releaseName: cert-manager-issuers
chart: cozy-cert-manager-issuers
namespace: cozy-cert-manager
dependsOn: [cilium,cert-manager]
- name: victoria-metrics-operator
releaseName: victoria-metrics-operator
chart: cozy-victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
dependsOn: [cilium,cert-manager]
- name: monitoring
releaseName: monitoring
chart: cozy-monitoring
namespace: cozy-monitoring
privileged: true
dependsOn: [cilium,victoria-metrics-operator]
- name: metallb
releaseName: metallb
chart: cozy-metallb
namespace: cozy-metallb
privileged: true
dependsOn: [cilium]
- name: grafana-operator
releaseName: grafana-operator
chart: cozy-grafana-operator
namespace: cozy-grafana-operator
dependsOn: [cilium]
- name: mariadb-operator
releaseName: mariadb-operator
chart: cozy-mariadb-operator
namespace: cozy-mariadb-operator
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
- name: postgres-operator
releaseName: postgres-operator
chart: cozy-postgres-operator
namespace: cozy-postgres-operator
dependsOn: [cilium,cert-manager]
- name: kafka-operator
releaseName: kafka-operator
chart: cozy-kafka-operator
namespace: cozy-kafka-operator
dependsOn: [cilium,kubeovn]
- name: clickhouse-operator
releaseName: clickhouse-operator
chart: cozy-clickhouse-operator
namespace: cozy-clickhouse-operator
dependsOn: [cilium,kubeovn]
- name: rabbitmq-operator
releaseName: rabbitmq-operator
chart: cozy-rabbitmq-operator
namespace: cozy-rabbitmq-operator
dependsOn: [cilium]
- name: redis-operator
releaseName: redis-operator
chart: cozy-redis-operator
namespace: cozy-redis-operator
dependsOn: [cilium]
- name: piraeus-operator
releaseName: piraeus-operator
chart: cozy-piraeus-operator
namespace: cozy-linstor
dependsOn: [cilium,cert-manager]
- name: linstor
releaseName: linstor
chart: cozy-linstor
namespace: cozy-linstor
privileged: true
dependsOn: [piraeus-operator,cilium,cert-manager]
- name: telepresence
releaseName: traffic-manager
chart: cozy-telepresence
namespace: cozy-telepresence
dependsOn: []

View File

@@ -1,75 +0,0 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
releases:
- name: cert-manager
releaseName: cert-manager
chart: cozy-cert-manager
namespace: cozy-cert-manager
dependsOn: []
- name: cert-manager-issuers
releaseName: cert-manager-issuers
chart: cozy-cert-manager-issuers
namespace: cozy-cert-manager
dependsOn: [cert-manager]
- name: victoria-metrics-operator
releaseName: victoria-metrics-operator
chart: cozy-victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
dependsOn: [cert-manager]
- name: monitoring
releaseName: monitoring
chart: cozy-monitoring
namespace: cozy-monitoring
privileged: true
dependsOn: [victoria-metrics-operator]
- name: grafana-operator
releaseName: grafana-operator
chart: cozy-grafana-operator
namespace: cozy-grafana-operator
dependsOn: []
- name: mariadb-operator
releaseName: mariadb-operator
chart: cozy-mariadb-operator
namespace: cozy-mariadb-operator
dependsOn: [victoria-metrics-operator]
- name: postgres-operator
releaseName: postgres-operator
chart: cozy-postgres-operator
namespace: cozy-postgres-operator
dependsOn: [cert-manager]
- name: kafka-operator
releaseName: kafka-operator
chart: cozy-kafka-operator
namespace: cozy-kafka-operator
dependsOn: [cilium,kubeovn]
- name: clickhouse-operator
releaseName: clickhouse-operator
chart: cozy-clickhouse-operator
namespace: cozy-clickhouse-operator
dependsOn: [cilium,kubeovn]
- name: rabbitmq-operator
releaseName: rabbitmq-operator
chart: cozy-rabbitmq-operator
namespace: cozy-rabbitmq-operator
dependsOn: []
- name: redis-operator
releaseName: redis-operator
chart: cozy-redis-operator
namespace: cozy-redis-operator
dependsOn: []
- name: telepresence
releaseName: traffic-manager
chart: cozy-telepresence
namespace: cozy-telepresence
dependsOn: []

View File

@@ -1,183 +0,0 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
releases:
- name: cilium
releaseName: cilium
chart: cozy-cilium
namespace: cozy-cilium
privileged: true
dependsOn: []
- name: kubeovn
releaseName: kubeovn
chart: cozy-kubeovn
namespace: cozy-kubeovn
privileged: true
dependsOn: [cilium]
values:
cozystack:
nodesHash: {{ include "cozystack.master-node-ips" . | sha256sum }}
kube-ovn:
ipv4:
POD_CIDR: "{{ index $cozyConfig.data "ipv4-pod-cidr" }}"
POD_GATEWAY: "{{ index $cozyConfig.data "ipv4-pod-gateway" }}"
SVC_CIDR: "{{ index $cozyConfig.data "ipv4-svc-cidr" }}"
JOIN_CIDR: "{{ index $cozyConfig.data "ipv4-join-cidr" }}"
- name: cert-manager
releaseName: cert-manager
chart: cozy-cert-manager
namespace: cozy-cert-manager
dependsOn: [cilium,kubeovn]
- name: cert-manager-issuers
releaseName: cert-manager-issuers
chart: cozy-cert-manager-issuers
namespace: cozy-cert-manager
dependsOn: [cilium,kubeovn,cert-manager]
- name: victoria-metrics-operator
releaseName: victoria-metrics-operator
chart: cozy-victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
dependsOn: [cilium,kubeovn,cert-manager]
- name: monitoring
releaseName: monitoring
chart: cozy-monitoring
namespace: cozy-monitoring
privileged: true
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
- name: kubevirt-operator
releaseName: kubevirt-operator
chart: cozy-kubevirt-operator
namespace: cozy-kubevirt
dependsOn: [cilium,kubeovn]
- name: kubevirt
releaseName: kubevirt
chart: cozy-kubevirt
namespace: cozy-kubevirt
privileged: true
dependsOn: [cilium,kubeovn,kubevirt-operator]
- name: kubevirt-cdi-operator
releaseName: kubevirt-cdi-operator
chart: cozy-kubevirt-cdi-operator
namespace: cozy-kubevirt-cdi
dependsOn: [cilium,kubeovn]
- name: kubevirt-cdi
releaseName: kubevirt-cdi
chart: cozy-kubevirt-cdi
namespace: cozy-kubevirt-cdi
dependsOn: [cilium,kubeovn,kubevirt-cdi-operator]
- name: metallb
releaseName: metallb
chart: cozy-metallb
namespace: cozy-metallb
privileged: true
dependsOn: [cilium,kubeovn]
- name: grafana-operator
releaseName: grafana-operator
chart: cozy-grafana-operator
namespace: cozy-grafana-operator
dependsOn: [cilium,kubeovn]
- name: mariadb-operator
releaseName: mariadb-operator
chart: cozy-mariadb-operator
namespace: cozy-mariadb-operator
dependsOn: [cilium,kubeovn,cert-manager,victoria-metrics-operator]
- name: postgres-operator
releaseName: postgres-operator
chart: cozy-postgres-operator
namespace: cozy-postgres-operator
dependsOn: [cilium,kubeovn,cert-manager]
- name: kafka-operator
releaseName: kafka-operator
chart: cozy-kafka-operator
namespace: cozy-kafka-operator
dependsOn: [cilium,kubeovn]
- name: clickhouse-operator
releaseName: clickhouse-operator
chart: cozy-clickhouse-operator
namespace: cozy-clickhouse-operator
dependsOn: [cilium,kubeovn]
- name: rabbitmq-operator
releaseName: rabbitmq-operator
chart: cozy-rabbitmq-operator
namespace: cozy-rabbitmq-operator
dependsOn: [cilium,kubeovn]
- name: redis-operator
releaseName: redis-operator
chart: cozy-redis-operator
namespace: cozy-redis-operator
dependsOn: [cilium,kubeovn]
- name: piraeus-operator
releaseName: piraeus-operator
chart: cozy-piraeus-operator
namespace: cozy-linstor
dependsOn: [cilium,kubeovn,cert-manager]
- name: linstor
releaseName: linstor
chart: cozy-linstor
namespace: cozy-linstor
privileged: true
dependsOn: [piraeus-operator,cilium,kubeovn,cert-manager]
- name: telepresence
releaseName: traffic-manager
chart: cozy-telepresence
namespace: cozy-telepresence
dependsOn: [cilium,kubeovn]
- name: dashboard
releaseName: dashboard
chart: cozy-dashboard
namespace: cozy-dashboard
dependsOn: [cilium,kubeovn]
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
values:
kubeapps:
redis:
master:
podAnnotations:
{{- range $index, $repo := . }}
{{- with (($repo.status).artifact).revision }}
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
- name: kamaji
releaseName: kamaji
chart: cozy-kamaji
namespace: cozy-kamaji
dependsOn: [cilium,kubeovn,cert-manager]
- name: capi-operator
releaseName: capi-operator
chart: cozy-capi-operator
namespace: cozy-cluster-api
privileged: true
dependsOn: [cilium,kubeovn,cert-manager]
- name: capi-providers
releaseName: capi-providers
chart: cozy-capi-providers
namespace: cozy-cluster-api
privileged: true
dependsOn: [cilium,kubeovn,capi-operator]

View File

@@ -1,101 +0,0 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
releases:
- name: cert-manager
releaseName: cert-manager
chart: cozy-cert-manager
namespace: cozy-cert-manager
dependsOn: []
- name: cert-manager-issuers
releaseName: cert-manager-issuers
chart: cozy-cert-manager-issuers
namespace: cozy-cert-manager
dependsOn: [cert-manager]
- name: victoria-metrics-operator
releaseName: victoria-metrics-operator
chart: cozy-victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
dependsOn: [cert-manager]
- name: monitoring
releaseName: monitoring
chart: cozy-monitoring
namespace: cozy-monitoring
privileged: true
dependsOn: [victoria-metrics-operator]
- name: grafana-operator
releaseName: grafana-operator
chart: cozy-grafana-operator
namespace: cozy-grafana-operator
dependsOn: []
- name: mariadb-operator
releaseName: mariadb-operator
chart: cozy-mariadb-operator
namespace: cozy-mariadb-operator
dependsOn: [cert-manager,victoria-metrics-operator]
- name: postgres-operator
releaseName: postgres-operator
chart: cozy-postgres-operator
namespace: cozy-postgres-operator
dependsOn: [cert-manager]
- name: kafka-operator
releaseName: kafka-operator
chart: cozy-kafka-operator
namespace: cozy-kafka-operator
dependsOn: [cilium,kubeovn]
- name: clickhouse-operator
releaseName: clickhouse-operator
chart: cozy-clickhouse-operator
namespace: cozy-clickhouse-operator
dependsOn: [cilium,kubeovn]
- name: rabbitmq-operator
releaseName: rabbitmq-operator
chart: cozy-rabbitmq-operator
namespace: cozy-rabbitmq-operator
dependsOn: []
- name: redis-operator
releaseName: redis-operator
chart: cozy-redis-operator
namespace: cozy-redis-operator
dependsOn: []
- name: piraeus-operator
releaseName: piraeus-operator
chart: cozy-piraeus-operator
namespace: cozy-linstor
dependsOn: [cert-manager]
- name: telepresence
releaseName: traffic-manager
chart: cozy-telepresence
namespace: cozy-telepresence
dependsOn: []
- name: dashboard
releaseName: dashboard
chart: cozy-dashboard
namespace: cozy-dashboard
dependsOn: []
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
values:
kubeapps:
redis:
master:
podAnnotations:
{{- range $index, $repo := . }}
{{- with (($repo.status).artifact).revision }}
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,7 +1,7 @@
{{/*
Get IP-addresses of master nodes
*/}}
{{- define "cozystack.master-node-ips" -}}
{{- define "master.nodeIPs" -}}
{{- $nodes := lookup "v1" "Node" "" "" -}}
{{- $ips := list -}}
{{- range $node := $nodes.items -}}

View File

@@ -1,10 +1,7 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
{{- $bundleName := index $cozyConfig.data "bundle-name" }}
{{- $bundle := tpl (.Files.Get (printf "bundles/%s.yaml" $bundleName)) . | fromYaml }}
{{- $host := "example.org" }}
{{- $tenantRoot := list }}
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2beta2" }}
{{- $tenantRoot = lookup "helm.toolkit.fluxcd.io/v2beta2" "HelmRelease" "tenant-root" "tenant-root" }}
{{- if .Capabilities.APIVersions.Has "helm.toolkit.fluxcd.io/v2beta1" }}
{{- $tenantRoot = lookup "helm.toolkit.fluxcd.io/v2beta1" "HelmRelease" "tenant-root" "tenant-root" }}
{{- end }}
{{- if and $tenantRoot $tenantRoot.spec $tenantRoot.spec.values $tenantRoot.spec.values.host }}
{{- $host = $tenantRoot.spec.values.host }}
@@ -22,7 +19,7 @@ metadata:
namespace.cozystack.io/host: "{{ $host }}"
name: tenant-root
---
apiVersion: helm.toolkit.fluxcd.io/v2beta2
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: tenant-root
@@ -48,9 +45,7 @@ spec:
values:
host: "{{ $host }}"
dependsOn:
{{- range $x := $bundle.releases }}
{{- if has $x.name (list "cilium" "kubeovn") }}
- name: {{ $x.name }}
namespace: {{ $x.namespace }}
{{- end }}
{{- end }}
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn

View File

@@ -1,27 +1,13 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
{{- $bundleName := index $cozyConfig.data "bundle-name" }}
{{- $bundle := tpl (.Files.Get (printf "bundles/%s.yaml" $bundleName)) . | fromYaml }}
{{- $dependencyNamespaces := dict }}
{{- $disabledComponents := splitList "," ((index $cozyConfig.data "bundle-disable") | default "") }}
{{/* collect dependency namespaces from releases */}}
{{- range $x := $bundle.releases }}
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
{{- end }}
{{- range $x := $bundle.releases }}
{{- if not (has $x.name $disabledComponents) }}
---
apiVersion: helm.toolkit.fluxcd.io/v2beta2
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: {{ $x.name }}
namespace: {{ $x.namespace }}
name: cilium
namespace: cozy-cilium
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: {{ $x.releaseName | default $x.name }}
releaseName: cilium
install:
remediation:
retries: -1
@@ -30,31 +16,743 @@ spec:
retries: -1
chart:
spec:
chart: {{ $x.chart }}
chart: cozy-cilium
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubeovn
namespace: cozy-kubeovn
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubeovn
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubeovn
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
{{- $values := dict }}
{{- with $x.values }}
{{- $values = merge . $values }}
{{- end }}
{{- with index $cozyConfig.data (printf "values-%s" $x.name) }}
{{- $values = merge (fromYaml .) $values }}
{{- end }}
{{- with $values }}
values:
{{- toYaml . | nindent 4}}
{{- end }}
{{- with $x.dependsOn }}
cozystack:
configHash: {{ index (lookup "v1" "ConfigMap" "cozy-system" "cozystack") "data" | toJson | sha256sum }}
nodesHash: {{ include "master.nodeIPs" . | sha256sum }}
dependsOn:
{{- range $dep := . }}
{{- if not (has $dep $disabledComponents) }}
- name: {{ $dep }}
namespace: {{ index $dependencyNamespaces $dep }}
- name: cilium
namespace: cozy-cilium
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cozy-fluxcd
namespace: cozy-fluxcd
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: fluxcd
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-fluxcd
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager
namespace: cozy-cert-manager
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: cert-manager
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-cert-manager
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager-issuers
namespace: cozy-cert-manager
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: cert-manager-issuers
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-cert-manager-issuers
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: victoria-metrics-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-victoria-metrics-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: monitoring
namespace: cozy-monitoring
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: monitoring
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-monitoring
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubevirt-operator
namespace: cozy-kubevirt
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubevirt-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubevirt-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubevirt
namespace: cozy-kubevirt
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubevirt
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubevirt
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: kubevirt-operator
namespace: cozy-kubevirt
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubevirt-cdi-operator
namespace: cozy-kubevirt-cdi
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubevirt-cdi-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubevirt-cdi-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubevirt-cdi
namespace: cozy-kubevirt-cdi
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubevirt-cdi
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubevirt-cdi
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: kubevirt-cdi-operator
namespace: cozy-kubevirt-cdi
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: metallb
namespace: cozy-metallb
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: metallb
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-metallb
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: grafana-operator
namespace: cozy-grafana-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: grafana-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-grafana-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: mariadb-operator
namespace: cozy-mariadb-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: mariadb-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-mariadb-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
- name: victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: postgres-operator
namespace: cozy-postgres-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: postgres-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-postgres-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: rabbitmq-operator
namespace: cozy-rabbitmq-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: rabbitmq-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-rabbitmq-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: redis-operator
namespace: cozy-redis-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: redis-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-redis-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: piraeus-operator
namespace: cozy-linstor
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: piraeus-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-piraeus-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: linstor
namespace: cozy-linstor
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: linstor
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-linstor
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: piraeus-operator
namespace: cozy-linstor
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: telepresence
namespace: cozy-telepresence
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: traffic-manager
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-telepresence
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: dashboard
namespace: cozy-dashboard
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: dashboard
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-dashboard
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
values:
kubeapps:
redis:
master:
podAnnotations:
{{- range $index, $repo := . }}
{{- with (($repo.status).artifact).revision }}
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kamaji
namespace: cozy-kamaji
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kamaji
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kamaji
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: capi-operator
namespace: cozy-cluster-api
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: capi-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-capi-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: capi-providers
namespace: cozy-cluster-api
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: capi-providers
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-capi-providers
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: capi-operator
namespace: cozy-cluster-api
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn

View File

@@ -1,33 +1,13 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
{{- $bundleName := index $cozyConfig.data "bundle-name" }}
{{- $bundle := tpl (.Files.Get (printf "bundles/%s.yaml" $bundleName)) . | fromYaml }}
{{- $namespaces := dict }}
{{/* collect namespaces from releases */}}
{{- range $x := $bundle.releases }}
{{- if not (hasKey $namespaces $x.namespace) }}
{{- $_ := set $namespaces $x.namespace false }}
{{- end }}
{{/* if at least one release requires a privileged namespace, then it should be privileged */}}
{{- if or $x.privileged (index $namespaces $x.namespace) }}
{{- $_ := set $namespaces $x.namespace true }}
{{- end }}
{{- end }}
{{/* Add extra namespaces */}}
{{- $_ := set $namespaces "cozy-public" false }}
{{- $_ := set $namespaces "cozy-fluxcd" false }}
{{- range $namespace, $privileged := $namespaces }}
{{- range $ns := .Values.namespaces }}
---
apiVersion: v1
kind: Namespace
metadata:
annotations:
"helm.sh/resource-policy": keep
{{- if $privileged }}
{{- if $ns.privileged }}
labels:
pod-security.kubernetes.io/enforce: privileged
{{- end }}
name: {{ $namespace }}
name: {{ $ns.name }}
{{- end }}

View File

@@ -0,0 +1,30 @@
namespaces:
- name: cozy-public
- name: cozy-system
privileged: true
- name: cozy-cert-manager
- name: cozy-cilium
privileged: true
- name: cozy-fluxcd
- name: cozy-grafana-operator
- name: cozy-kamaji
- name: cozy-cluster-api
privileged: true # for capk only
- name: cozy-dashboard
- name: cozy-kubeovn
privileged: true
- name: cozy-kubevirt
privileged: true
- name: cozy-kubevirt-cdi
- name: cozy-linstor
privileged: true
- name: cozy-mariadb-operator
- name: cozy-metallb
privileged: true
- name: cozy-monitoring
privileged: true
- name: cozy-postgres-operator
- name: cozy-rabbitmq-operator
- name: cozy-redis-operator
- name: cozy-telepresence
- name: cozy-victoria-metrics-operator

View File

@@ -67,7 +67,7 @@ spec:
ingress:
metadata:
annotations:
acme.cert-manager.io/http01-ingress-class: "{{ $ingress }}"
kubernetes.io/ingress.class: "{{ $ingress }}"
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
ingressClassName: "{{ $ingress }}"

View File

@@ -1,12 +1,12 @@
OUT=../../_out/repos/system
include ../../scripts/common-envs.mk
gen: fix-chartnames
repo:
repo: fix-chartnames
rm -rf "$(OUT)"
mkdir -p "$(OUT)"
helm package -d "$(OUT)" $$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")') --version $(VERSION)
helm package -d "$(OUT)" $$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")')
cd "$(OUT)" && helm repo index .
fix-chartnames:
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: cozy-$$i/" "$$i/Chart.yaml"; done
find . -name Chart.yaml -maxdepth 2 | awk -F/ '{print $$2}' | while read i; do printf "name: cozy-%s\nversion: 1.0.0\n" "$$i" > "$$i/Chart.yaml"; done

View File

@@ -1,3 +1,2 @@
apiVersion: v2
name: cozy-capi-operator
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
version: 1.0.0

View File

@@ -1,7 +1,14 @@
NAME=capi-operator
NAMESPACE=cozy-cluster-api
include ../../../scripts/package-system.mk
show:
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
apply:
helm upgrade -i -n $(NAMESPACE) $(NAME) .
diff:
helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) .
update:
rm -rf charts

View File

@@ -1,3 +1,2 @@
apiVersion: v2
name: cozy-capi-providers
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
version: 1.0.0

View File

@@ -1,4 +1,11 @@
NAME=capi-providers
NAMESPACE=cozy-cluster-api
include ../../../scripts/package-system.mk
show:
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
apply:
helm upgrade -i -n $(NAMESPACE) $(NAME) .
diff:
helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) .

View File

@@ -1,3 +1,2 @@
apiVersion: v2
name: cozy-cert-manager-issuers
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
version: 1.0.0

View File

@@ -1,4 +1,11 @@
NAME=cert-manager-issuers
NAMESPACE=cozy-cert-manager
include ../../../scripts/package-system.mk
show:
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
apply:
helm upgrade -i -n $(NAMESPACE) $(NAME) .
diff:
helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) .

View File

@@ -1,3 +1,2 @@
apiVersion: v2
name: cozy-cert-manager
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
version: 1.0.0

View File

@@ -1,7 +1,14 @@
NAME=cert-manager
NAMESPACE=cozy-$(NAME)
NAMESPACE=cozy-cert-manager
include ../../../scripts/package-system.mk
show:
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
apply:
helm upgrade -i -n $(NAMESPACE) $(NAME) .
diff:
helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) .
update:
rm -rf charts

View File

@@ -1,3 +1,2 @@
apiVersion: v2
name: cozy-cilium
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
version: 1.0.0

View File

@@ -1,12 +1,19 @@
NAMESPACE=cozy-cilium
NAME=cilium
NAMESPACE=cozy-$(NAME)
include ../../../scripts/package-system.mk
show:
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
apply:
helm upgrade -i -n $(NAMESPACE) $(NAME) .
diff:
helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) .
update:
rm -rf charts
helm repo add cilium https://helm.cilium.io/
helm repo update cilium
helm pull cilium/cilium --untar --untardir charts --version 1.14
helm pull cilium/cilium --untar --untardir charts
sed -i -e '/Used in iptables/d' -e '/SYS_MODULE/d' charts/cilium/values.yaml
patch -p3 --no-backup-if-mismatch < patches/fix-cgroups.patch
patch -p3 < patches/fix-cgroups.patch

View File

@@ -122,7 +122,7 @@ annotations:
description: |
CiliumPodIPPool defines an IP pool that can be used for pooled IPAM (i.e. the multi-pool IPAM mode).
apiVersion: v2
appVersion: 1.14.9
appVersion: 1.14.5
description: eBPF-based Networking, Security, and Observability
home: https://cilium.io/
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.14/Documentation/images/logo-solo.svg
@@ -138,4 +138,4 @@ kubeVersion: '>= 1.16.0-0'
name: cilium
sources:
- https://github.com/cilium/cilium
version: 1.14.9
version: 1.14.5

View File

@@ -1,6 +1,6 @@
# cilium
![Version: 1.14.9](https://img.shields.io/badge/Version-1.14.9-informational?style=flat-square) ![AppVersion: 1.14.9](https://img.shields.io/badge/AppVersion-1.14.9-informational?style=flat-square)
![Version: 1.14.5](https://img.shields.io/badge/Version-1.14.5-informational?style=flat-square) ![AppVersion: 1.14.5](https://img.shields.io/badge/AppVersion-1.14.5-informational?style=flat-square)
Cilium is open source software for providing and transparently securing
network connectivity and loadbalancing between application workloads such as
@@ -76,7 +76,7 @@ contributors across the globe, there is almost always someone available to help.
| authentication.mutual.spire.install.agent.securityContext | object | `{}` | Security context to be added to spire agent containers. SecurityContext holds pod-level security attributes and common container settings. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container |
| authentication.mutual.spire.install.agent.serviceAccount | object | `{"create":true,"name":"spire-agent"}` | SPIRE agent service account |
| authentication.mutual.spire.install.agent.skipKubeletVerification | bool | `true` | SPIRE Workload Attestor kubelet verification. |
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
| authentication.mutual.spire.install.agent.tolerations | list | `[]` | SPIRE agent tolerations configuration ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
@@ -155,12 +155,12 @@ contributors across the globe, there is almost always someone available to help.
| clustermesh.apiserver.extraEnv | list | `[]` | Additional clustermesh-apiserver environment variables. |
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
| clustermesh.apiserver.image | object | `{"digest":"sha256:5c16f8b8e22ce41e11998e70846fbcecea3a6b683a38253809ead8d871f6d8a3","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.14.9","useDigest":true}` | Clustermesh API server image. |
| clustermesh.apiserver.image | object | `{"digest":"sha256:7eaa35cf5452c43b1f7d0cde0d707823ae7e49965bcb54c053e31ea4e04c3d96","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.14.5","useDigest":true}` | Clustermesh API server image. |
| clustermesh.apiserver.kvstoremesh.enabled | bool | `false` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
| clustermesh.apiserver.kvstoremesh.extraVolumeMounts | list | `[]` | Additional KVStoreMesh volumeMounts. |
| clustermesh.apiserver.kvstoremesh.image | object | `{"digest":"sha256:9d9efb25806660f3663b9cd803fb8679f2b115763470002a9770e2c1eb1e5b22","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/kvstoremesh","tag":"v1.14.9","useDigest":true}` | KVStoreMesh image. |
| clustermesh.apiserver.kvstoremesh.image | object | `{"digest":"sha256:d7137edd0efa2b1407b20088af3980a9993bb616d85bf9b55ea2891d1b99023a","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/kvstoremesh","tag":"v1.14.5","useDigest":true}` | KVStoreMesh image. |
| clustermesh.apiserver.kvstoremesh.resources | object | `{}` | Resource requests and limits for the KVStoreMesh container |
| clustermesh.apiserver.kvstoremesh.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]}}` | KVStoreMesh Security context |
| clustermesh.apiserver.metrics.enabled | bool | `true` | Enables exporting apiserver metrics in OpenMetrics format. |
@@ -300,7 +300,7 @@ contributors across the globe, there is almost always someone available to help.
| eni.subnetIDsFilter | list | `[]` | Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. |
| eni.subnetTagsFilter | list | `[]` | Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs Important note: This requires that each instance has an ENI with a matching subnet attached when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, use the CNI configuration file settings (cni.customConf) instead. |
| eni.updateEC2AdapterLimitViaAPI | bool | `true` | Update ENI Adapter limits from the EC2 API |
| envoy.affinity | object | `{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"cilium.io/no-schedule","operator":"NotIn","values":["true"]}]}]}},"podAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium"}},"topologyKey":"kubernetes.io/hostname"}]},"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium-envoy"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-envoy. |
| envoy.affinity | object | `{"podAntiAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":[{"labelSelector":{"matchLabels":{"k8s-app":"cilium-envoy"}},"topologyKey":"kubernetes.io/hostname"}]}}` | Affinity for cilium-envoy. |
| envoy.connectTimeoutSeconds | int | `2` | Time in seconds after which a TCP connection attempt times out |
| envoy.dnsPolicy | string | `nil` | DNS policy for Cilium envoy pods. Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy |
| envoy.enabled | bool | `false` | Enable Envoy Proxy in standalone DaemonSet. |
@@ -312,7 +312,7 @@ contributors across the globe, there is almost always someone available to help.
| envoy.extraVolumes | list | `[]` | Additional envoy volumes. |
| envoy.healthPort | int | `9878` | TCP port for the health API. |
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
| envoy.image | object | `{"digest":"sha256:39b75548447978230dedcf25da8940e4d3540c741045ef391a8e74dbb9661a86","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.26.7-bbde4095997ea57ead209f56158790d47224a0f5","useDigest":true}` | Envoy container image. |
| envoy.image | object | `{"digest":"sha256:992998398dadfff7117bfa9fdb7c9474fefab7f0237263f7c8114e106c67baca","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.26.6-ad82c7c56e88989992fd25d8d67747de865c823b","useDigest":true}` | Envoy container image. |
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
| envoy.log.format | string | `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` | The format string to use for laying out the log message metadata of Envoy. |
@@ -324,15 +324,14 @@ contributors across the globe, there is almost always someone available to help.
| envoy.podLabels | object | `{}` | Labels to be added to envoy pods |
| envoy.podSecurityContext | object | `{}` | Security Context for cilium-envoy pods. |
| envoy.priorityClassName | string | `nil` | The priority class to use for cilium-envoy. |
| envoy.prometheus | object | `{"enabled":true,"port":"9964","serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Configure Cilium Envoy Prometheus options. Note that some of these apply to either cilium-agent or cilium-envoy. |
| envoy.prometheus.enabled | bool | `true` | Enable prometheus metrics for cilium-envoy |
| envoy.prometheus.port | string | `"9964"` | Serve prometheus metrics for cilium-envoy on the configured port |
| envoy.prometheus.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor cilium-envoy |
| envoy.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) Note that this setting applies to both cilium-envoy _and_ cilium-agent with Envoy enabled. |
| envoy.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) |
| envoy.prometheus.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. |
| envoy.prometheus.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor cilium-envoy |
| envoy.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-envoy or for cilium-agent with Envoy configured. |
| envoy.prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-envoy or for cilium-agent with Envoy configured. |
| envoy.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-envoy |
| envoy.prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-envoy |
| envoy.readinessProbe.failureThreshold | int | `3` | failure threshold of readiness probe |
| envoy.readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe |
| envoy.resources | object | `{}` | Envoy resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ |
@@ -419,7 +418,7 @@ contributors across the globe, there is almost always someone available to help.
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
| hubble.relay.image | object | `{"digest":"sha256:f506f3c6e0a979437cde79eb781654fda4f10ddb5642cebc4dc81254cfb7eeaa","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.14.9","useDigest":true}` | Hubble-relay container image. |
| hubble.relay.image | object | `{"digest":"sha256:dbef89f924a927043d02b40c18e417c1ea0e8f58b44523b80fef7e3652db24d4","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.14.5","useDigest":true}` | Hubble-relay container image. |
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
@@ -476,7 +475,7 @@ contributors across the globe, there is almost always someone available to help.
| hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. |
| hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. |
| hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. |
| hubble.ui.backend.image | object | `{"digest":"sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.13.0","useDigest":true}` | Hubble-ui backend image. |
| hubble.ui.backend.image | object | `{"digest":"sha256:1f86f3400827a0451e6332262467f894eeb7caf0eb8779bd951e2caa9d027cbe","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.12.1","useDigest":true}` | Hubble-ui backend image. |
| hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. |
| hubble.ui.backend.securityContext | object | `{}` | Hubble-ui backend security context. |
| hubble.ui.baseUrl | string | `"/"` | Defines base url prefix for all hubble-ui http requests. It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. Trailing `/` is required for custom path, ex. `/service-map/` |
@@ -484,7 +483,7 @@ contributors across the globe, there is almost always someone available to help.
| hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. |
| hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. |
| hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. |
| hubble.ui.frontend.image | object | `{"digest":"sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.13.0","useDigest":true}` | Hubble-ui frontend image. |
| hubble.ui.frontend.image | object | `{"digest":"sha256:9e5f81ee747866480ea1ac4630eb6975ff9227f9782b7c93919c081c33f38267","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.12.1","useDigest":true}` | Hubble-ui frontend image. |
| hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. |
| hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. |
| hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 |
@@ -511,7 +510,7 @@ contributors across the globe, there is almost always someone available to help.
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd` or `kvstore`). |
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
| image | object | `{"digest":"sha256:4ef1eb7a3bc39d0fefe14685e6c0d4e01301c40df2a89bc93ffca9a1ab927301","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.14.9","useDigest":true}` | Agent container image. |
| image | object | `{"digest":"sha256:d3b287029755b6a47dee01420e2ea469469f1b174a2089c10af7e5e9289ef05b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.14.5","useDigest":true}` | Agent container image. |
| imagePullSecrets | string | `nil` | Configure image pull secrets for pulling container images |
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
@@ -619,7 +618,7 @@ contributors across the globe, there is almost always someone available to help.
| operator.extraVolumes | list | `[]` | Additional cilium-operator volumes. |
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
| operator.image | object | `{"alibabacloudDigest":"sha256:765314779093b54750f83280f009229f20fe1f28466a633d9bb4143d2ad669c5","awsDigest":"sha256:041ad5b49ae63ba0f1974e1a1d9ebf9f52541cd2813088fa687f9d544125a1ec","azureDigest":"sha256:2d3b9d868eb03fa9256d34192a734a2abab283f527a9c97b7cefcd3401649d17","genericDigest":"sha256:1552d653870dd8ebbd16ee985a5497dd78a2097370978b0cfbd2da2072f30712","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.14.9","useDigest":true}` | cilium-operator image. |
| operator.image | object | `{"alibabacloudDigest":"sha256:e0152c498ba73c56a82eee2a706c8f400e9a6999c665af31a935bdf08e659bc3","awsDigest":"sha256:785ccf1267d0ed3ba9e4bd8166577cb4f9e4ce996af26b27c9d5c554a0d5b09a","azureDigest":"sha256:9203f5583aa34e716d7a6588ebd144e43ce3b77873f578fc12b2679e33591353","genericDigest":"sha256:303f9076bdc73b3fc32aaedee64a14f6f44c8bb08ee9e3956d443021103ebe7a","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.14.5","useDigest":true}` | cilium-operator image. |
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
@@ -666,7 +665,7 @@ contributors across the globe, there is almost always someone available to help.
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
| preflight.image | object | `{"digest":"sha256:4ef1eb7a3bc39d0fefe14685e6c0d4e01301c40df2a89bc93ffca9a1ab927301","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.14.9","useDigest":true}` | Cilium pre-flight image. |
| preflight.image | object | `{"digest":"sha256:d3b287029755b6a47dee01420e2ea469469f1b174a2089c10af7e5e9289ef05b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.14.5","useDigest":true}` | Cilium pre-flight image. |
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |

View File

@@ -11,9 +11,9 @@ set -o nounset
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
if [[ "$(iptables-save | grep -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
iptables-save | grep -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'

View File

@@ -100,7 +100,7 @@ then
# Since that version containerd no longer allows missing configuration for the CNI,
# not even for pods with hostNetwork set to true. Thus, we add a temporary one.
# This will be replaced with the real config by the agent pod.
echo -e '{\n\t"cniVersion": "0.3.1",\n\t"name": "cilium",\n\t"type": "cilium-cni"\n}' > /etc/cni/net.d/05-cilium.conf
echo -e "{\n\t"cniVersion": "0.3.1",\n\t"name": "cilium",\n\t"type": "cilium-cni"\n}" > /etc/cni/net.d/05-cilium.conf
fi
# Start containerd. It won't create it's CNI configuration file anymore.

View File

@@ -447,9 +447,6 @@ spec:
volumeMounts:
- name: tmp
mountPath: /tmp
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 8 }}
{{- end }}
terminationMessagePolicy: FallbackToLogsOnError
{{- if .Values.cgroup.autoMount.enabled }}
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.

View File

@@ -34,20 +34,6 @@ spec:
metricRelabelings:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.envoy.prometheus.serviceMonitor.enabled }}
- port: envoy-metrics
interval: {{ .Values.envoy.prometheus.serviceMonitor.interval | quote }}
honorLabels: true
path: /metrics
{{- with .Values.envoy.prometheus.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.envoy.prometheus.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
targetLabels:
- k8s-app
{{- end }}

View File

@@ -13,7 +13,6 @@
{{- $fragmentTracking := "true" -}}
{{- $defaultKubeProxyReplacement := "false" -}}
{{- $azureUsePrimaryAddress := "true" -}}
{{- $defaultDNSProxyEnableTransparentMode := "false" -}}
{{- /* Default values when 1.8 was initially deployed */ -}}
{{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}}
@@ -49,7 +48,6 @@
{{- $azureUsePrimaryAddress = "false" -}}
{{- end }}
{{- $defaultKubeProxyReplacement = "disabled" -}}
{{- $defaultDNSProxyEnableTransparentMode = "true" -}}
{{- end -}}
{{- /* Default values when 1.14 was initially deployed */ -}}
@@ -432,16 +430,10 @@ data:
# - vxlan (default)
# - geneve
{{- if .Values.gke.enabled }}
{{- if ne (.Values.routingMode | default "native") "native" }}
{{- fail (printf "RoutingMode must be set to native when gke.enabled=true" )}}
{{- end }}
routing-mode: "native"
enable-endpoint-routes: "true"
enable-local-node-route: "false"
{{- else if .Values.aksbyocni.enabled }}
{{- if ne (.Values.routingMode | default "tunnel") "tunnel" }}
{{- fail (printf "RoutingMode must be set to tunnel when aksbyocni.enabled=true" )}}
{{- end }}
routing-mode: "tunnel"
tunnel-protocol: "vxlan"
{{- else if .Values.routingMode }}
@@ -1100,13 +1092,6 @@ data:
{{- end }}
{{- if .Values.dnsProxy }}
{{- if hasKey .Values.dnsProxy "enableTransparentMode" }}
# explicit setting gets precedence
dnsproxy-enable-transparent-mode: {{ .Values.dnsProxy.enableTransparentMode | quote }}
{{- else if eq $cniChainingMode "none" }}
# default DNS proxy to transparent mode in non-chaining modes
dnsproxy-enable-transparent-mode: {{ $defaultDNSProxyEnableTransparentMode | quote }}
{{- end }}
{{- if .Values.dnsProxy.dnsRejectResponseCode }}
tofqdns-dns-reject-response-code: {{ .Values.dnsProxy.dnsRejectResponseCode | quote }}
{{- end }}

View File

@@ -82,7 +82,7 @@ spec:
{{- if semverCompare ">=1.20-0" .Capabilities.KubeVersion.Version }}
startupProbe:
httpGet:
host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
host: "localhost"
path: /healthz
port: {{ .Values.envoy.healthPort }}
scheme: HTTP
@@ -92,7 +92,7 @@ spec:
{{- end }}
livenessProbe:
httpGet:
host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
host: "localhost"
path: /healthz
port: {{ .Values.envoy.healthPort }}
scheme: HTTP
@@ -110,7 +110,7 @@ spec:
timeoutSeconds: 5
readinessProbe:
httpGet:
host: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
host: "localhost"
path: /healthz
port: {{ .Values.envoy.healthPort }}
scheme: HTTP

View File

@@ -7,7 +7,6 @@ metadata:
namespace: {{ .Values.envoy.prometheus.serviceMonitor.namespace | default .Release.Namespace }}
labels:
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-envoy
{{- with .Values.envoy.prometheus.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
@@ -23,7 +22,7 @@ spec:
matchNames:
- {{ .Release.Namespace }}
endpoints:
- port: envoy-metrics
- port: metrics
interval: {{ .Values.envoy.prometheus.serviceMonitor.interval | quote }}
honorLabels: true
path: /metrics

View File

@@ -66,13 +66,8 @@ spec:
- /tmp/ready
initialDelaySeconds: 5
periodSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
{{- with .Values.preflight.extraEnv }}
env:
{{- toYaml . | trim | nindent 12 }}
{{- end }}
volumeMounts:

View File

@@ -88,12 +88,10 @@ spec:
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.authentication.mutual.spire.install.agent.tolerations }}
tolerations:
{{- with .Values.authentication.mutual.spire.install.agent.tolerations }}
{{- toYaml . | trim | nindent 8 }}
{{- end }}
- key: {{ .Values.agentNotReadyTaintKey | default "node.cilium.io/agent-not-ready" }}
effect: NoSchedule
{{- toYaml . | trim | nindent 8 }}
{{- end }}
volumes:
- name: spire-config
configMap:

View File

@@ -143,10 +143,10 @@ rollOutCiliumPods: false
image:
override: ~
repository: "quay.io/cilium/cilium"
tag: "v1.14.9"
tag: "v1.14.5"
pullPolicy: "IfNotPresent"
# cilium-digest
digest: "sha256:4ef1eb7a3bc39d0fefe14685e6c0d4e01301c40df2a89bc93ffca9a1ab927301"
digest: "sha256:d3b287029755b6a47dee01420e2ea469469f1b174a2089c10af7e5e9289ef05b"
useDigest: true
# -- Affinity for cilium-agent.
@@ -1109,9 +1109,9 @@ hubble:
image:
override: ~
repository: "quay.io/cilium/hubble-relay"
tag: "v1.14.9"
tag: "v1.14.5"
# hubble-relay-digest
digest: "sha256:f506f3c6e0a979437cde79eb781654fda4f10ddb5642cebc4dc81254cfb7eeaa"
digest: "sha256:dbef89f924a927043d02b40c18e417c1ea0e8f58b44523b80fef7e3652db24d4"
useDigest: true
pullPolicy: "IfNotPresent"
@@ -1337,8 +1337,8 @@ hubble:
image:
override: ~
repository: "quay.io/cilium/hubble-ui-backend"
tag: "v0.13.0"
digest: "sha256:1e7657d997c5a48253bb8dc91ecee75b63018d16ff5e5797e5af367336bc8803"
tag: "v0.12.1"
digest: "sha256:1f86f3400827a0451e6332262467f894eeb7caf0eb8779bd951e2caa9d027cbe"
useDigest: true
pullPolicy: "IfNotPresent"
@@ -1368,8 +1368,8 @@ hubble:
image:
override: ~
repository: "quay.io/cilium/hubble-ui"
tag: "v0.13.0"
digest: "sha256:7d663dc16538dd6e29061abd1047013a645e6e69c115e008bee9ea9fef9a6666"
tag: "v0.12.1"
digest: "sha256:9e5f81ee747866480ea1ac4630eb6975ff9227f9782b7c93919c081c33f38267"
useDigest: true
pullPolicy: "IfNotPresent"
@@ -1853,9 +1853,9 @@ envoy:
image:
override: ~
repository: "quay.io/cilium/cilium-envoy"
tag: "v1.26.7-bbde4095997ea57ead209f56158790d47224a0f5"
tag: "v1.26.6-ad82c7c56e88989992fd25d8d67747de865c823b"
pullPolicy: "IfNotPresent"
digest: "sha256:39b75548447978230dedcf25da8940e4d3540c741045ef391a8e74dbb9661a86"
digest: "sha256:992998398dadfff7117bfa9fdb7c9474fefab7f0237263f7c8114e106c67baca"
useDigest: true
# -- Additional containers added to the cilium Envoy DaemonSet.
@@ -1968,20 +1968,7 @@ envoy:
labelSelector:
matchLabels:
k8s-app: cilium-envoy
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
k8s-app: cilium
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cilium.io/no-schedule
operator: NotIn
values:
- "true"
# -- Node selector for cilium-envoy.
nodeSelector:
kubernetes.io/os: linux
@@ -2002,16 +1989,12 @@ envoy:
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ~
# -- Configure Cilium Envoy Prometheus options.
# Note that some of these apply to either cilium-agent or cilium-envoy.
prometheus:
# -- Enable prometheus metrics for cilium-envoy
enabled: true
serviceMonitor:
# -- Enable service monitors.
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
# Note that this setting applies to both cilium-envoy _and_ cilium-agent
# with Envoy enabled.
enabled: false
# -- Labels to add to ServiceMonitor cilium-envoy
labels: {}
@@ -2023,14 +2006,12 @@ envoy:
# service monitors configured.
# namespace: ""
# -- Relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
relabelings:
- sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
replacement: ${1}
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
metricRelabelings: ~
# -- Serve prometheus metrics for cilium-envoy on the configured port
port: "9964"
@@ -2269,15 +2250,15 @@ operator:
image:
override: ~
repository: "quay.io/cilium/operator"
tag: "v1.14.9"
tag: "v1.14.5"
# operator-generic-digest
genericDigest: "sha256:1552d653870dd8ebbd16ee985a5497dd78a2097370978b0cfbd2da2072f30712"
genericDigest: "sha256:303f9076bdc73b3fc32aaedee64a14f6f44c8bb08ee9e3956d443021103ebe7a"
# operator-azure-digest
azureDigest: "sha256:2d3b9d868eb03fa9256d34192a734a2abab283f527a9c97b7cefcd3401649d17"
azureDigest: "sha256:9203f5583aa34e716d7a6588ebd144e43ce3b77873f578fc12b2679e33591353"
# operator-aws-digest
awsDigest: "sha256:041ad5b49ae63ba0f1974e1a1d9ebf9f52541cd2813088fa687f9d544125a1ec"
awsDigest: "sha256:785ccf1267d0ed3ba9e4bd8166577cb4f9e4ce996af26b27c9d5c554a0d5b09a"
# operator-alibabacloud-digest
alibabacloudDigest: "sha256:765314779093b54750f83280f009229f20fe1f28466a633d9bb4143d2ad669c5"
alibabacloudDigest: "sha256:e0152c498ba73c56a82eee2a706c8f400e9a6999c665af31a935bdf08e659bc3"
useDigest: true
pullPolicy: "IfNotPresent"
suffix: ""
@@ -2554,9 +2535,9 @@ preflight:
image:
override: ~
repository: "quay.io/cilium/cilium"
tag: "v1.14.9"
tag: "v1.14.5"
# cilium-digest
digest: "sha256:4ef1eb7a3bc39d0fefe14685e6c0d4e01301c40df2a89bc93ffca9a1ab927301"
digest: "sha256:d3b287029755b6a47dee01420e2ea469469f1b174a2089c10af7e5e9289ef05b"
useDigest: true
pullPolicy: "IfNotPresent"
@@ -2704,9 +2685,9 @@ clustermesh:
image:
override: ~
repository: "quay.io/cilium/clustermesh-apiserver"
tag: "v1.14.9"
tag: "v1.14.5"
# clustermesh-apiserver-digest
digest: "sha256:5c16f8b8e22ce41e11998e70846fbcecea3a6b683a38253809ead8d871f6d8a3"
digest: "sha256:7eaa35cf5452c43b1f7d0cde0d707823ae7e49965bcb54c053e31ea4e04c3d96"
useDigest: true
pullPolicy: "IfNotPresent"
@@ -2751,9 +2732,9 @@ clustermesh:
image:
override: ~
repository: "quay.io/cilium/kvstoremesh"
tag: "v1.14.9"
tag: "v1.14.5"
# kvstoremesh-digest
digest: "sha256:9d9efb25806660f3663b9cd803fb8679f2b115763470002a9770e2c1eb1e5b22"
digest: "sha256:d7137edd0efa2b1407b20088af3980a9993bb616d85bf9b55ea2891d1b99023a"
useDigest: true
pullPolicy: "IfNotPresent"
@@ -3105,8 +3086,6 @@ dnsProxy:
proxyPort: 0
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
proxyResponseMaxDelay: 100ms
# -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
# enableTransparentMode: true
# -- SCTP Configuration Values
sctp:
@@ -3157,21 +3136,8 @@ authentication:
# -- SPIRE Workload Attestor kubelet verification.
skipKubeletVerification: true
# -- SPIRE agent tolerations configuration
# By default it follows the same tolerations as the agent itself
# to allow the Cilium agent on this node to connect to SPIRE.
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations:
- key: node.kubernetes.io/not-ready
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
effect: NoSchedule
value: "true"
- key: CriticalAddonsOnly
operator: "Exists"
tolerations: []
# -- SPIRE agent affinity configuration
affinity: {}
# -- SPIRE agent nodeSelector configuration

View File

@@ -1854,9 +1854,9 @@ envoy:
image:
override: ~
repository: "quay.io/cilium/cilium-envoy"
tag: "v1.26.7-bbde4095997ea57ead209f56158790d47224a0f5"
tag: "v1.26.6-ad82c7c56e88989992fd25d8d67747de865c823b"
pullPolicy: "${PULL_POLICY}"
digest: "sha256:39b75548447978230dedcf25da8940e4d3540c741045ef391a8e74dbb9661a86"
digest: "sha256:992998398dadfff7117bfa9fdb7c9474fefab7f0237263f7c8114e106c67baca"
useDigest: true
# -- Additional containers added to the cilium Envoy DaemonSet.
@@ -1969,20 +1969,7 @@ envoy:
labelSelector:
matchLabels:
k8s-app: cilium-envoy
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
k8s-app: cilium
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cilium.io/no-schedule
operator: NotIn
values:
- "true"
# -- Node selector for cilium-envoy.
nodeSelector:
kubernetes.io/os: linux
@@ -2003,16 +1990,12 @@ envoy:
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ~
# -- Configure Cilium Envoy Prometheus options.
# Note that some of these apply to either cilium-agent or cilium-envoy.
prometheus:
# -- Enable prometheus metrics for cilium-envoy
enabled: true
serviceMonitor:
# -- Enable service monitors.
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
# Note that this setting applies to both cilium-envoy _and_ cilium-agent
# with Envoy enabled.
enabled: false
# -- Labels to add to ServiceMonitor cilium-envoy
labels: {}
@@ -2024,14 +2007,12 @@ envoy:
# service monitors configured.
# namespace: ""
# -- Relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
relabelings:
- sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
replacement: ${1}
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
metricRelabelings: ~
# -- Serve prometheus metrics for cilium-envoy on the configured port
port: "9964"
@@ -3108,8 +3089,6 @@ dnsProxy:
proxyPort: 0
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
proxyResponseMaxDelay: 100ms
# -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
# enableTransparentMode: true
# -- SCTP Configuration Values
sctp:
@@ -3160,21 +3139,8 @@ authentication:
# -- SPIRE Workload Attestor kubelet verification.
skipKubeletVerification: true
# -- SPIRE agent tolerations configuration
# By default it follows the same tolerations as the agent itself
# to allow the Cilium agent on this node to connect to SPIRE.
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations:
- key: node.kubernetes.io/not-ready
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
effect: NoSchedule
value: "true"
- key: CriticalAddonsOnly
operator: "Exists"
tolerations: []
# -- SPIRE agent affinity configuration
affinity: {}
# -- SPIRE agent nodeSelector configuration

View File

@@ -3,10 +3,11 @@ cilium:
enabled: false
externalIPs:
enabled: true
tunnel: disabled
autoDirectNodeRoutes: false
kubeProxyReplacement: strict
bpf:
masquerade: false
masquerade: true
loadBalancer:
algorithm: maglev
cgroup:
@@ -24,4 +25,3 @@ cilium:
configMap: cni-configuration
routingMode: native
enableIPv4Masquerade: false
enableIdentityMark: false

View File

@@ -1,3 +0,0 @@
apiVersion: v2
name: cozy-clickhouse-operator
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process

View File

@@ -1,10 +0,0 @@
NAME=clickhouse-operator
NAMESPACE=cozy-clickhouse-operator
include ../../../scripts/package-system.mk
update:
rm -rf charts
helm repo add clickhouse-operator https://docs.altinity.com/clickhouse-operator/
helm repo update clickhouse-operator
helm pull clickhouse-operator/altinity-clickhouse-operator --untar --untardir charts

View File

@@ -1,17 +0,0 @@
apiVersion: v2
appVersion: 0.23.4
description: 'Helm chart to deploy [altinity-clickhouse-operator](https://github.com/Altinity/clickhouse-operator). The
ClickHouse Operator creates, configures and manages ClickHouse clusters running
on Kubernetes. For upgrade please install CRDs separately: ```bash kubectl apply
-f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml kubectl
apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml kubectl
apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
```'
home: https://github.com/Altinity/clickhouse-operator
icon: https://logosandtypes.com/wp-content/uploads/2020/12/altinity.svg
maintainers:
- email: support@altinity.com
name: altinity
name: altinity-clickhouse-operator
type: application
version: 0.23.4

View File

@@ -1,65 +0,0 @@
# altinity-clickhouse-operator
![Version: 0.23.4](https://img.shields.io/badge/Version-0.23.4-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.23.4](https://img.shields.io/badge/AppVersion-0.23.4-informational?style=flat-square)
Helm chart to deploy [altinity-clickhouse-operator](https://github.com/Altinity/clickhouse-operator).
The ClickHouse Operator creates, configures and manages ClickHouse clusters running on Kubernetes.
For upgrade please install CRDs separately:
```bash
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallations.clickhouse.altinity.com.yaml
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseinstallationtemplates.clickhouse.altinity.com.yaml
kubectl apply -f https://github.com/Altinity/clickhouse-operator/raw/master/deploy/helm/crds/CustomResourceDefinition-clickhouseoperatorconfigurations.clickhouse.altinity.com.yaml
```
**Homepage:** <https://github.com/Altinity/clickhouse-operator>
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| altinity | <support@altinity.com> | |
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| additionalResources | list | `[]` | list of additional resources to create (are processed via `tpl` function), useful for create ClickHouse clusters together with clickhouse-operator, look `kubectl explain chi` for details |
| affinity | object | `{}` | affinity for scheduler pod assignment, look `kubectl explain pod.spec.affinity` for details |
| configs | object | check the values.yaml file for the config content, auto-generated from latest operator release | clickhouse-operator configs |
| dashboards.additionalLabels | object | `{"grafana_dashboard":""}` | labels to add to a secret with dashboards |
| dashboards.annotations | object | `{}` | annotations to add to a secret with dashboards |
| dashboards.enabled | bool | `false` | provision grafana dashboards as secrets (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-6.33.1/charts/grafana/values.yaml#L679 ) |
| dashboards.grafana_folder | string | `"clickhouse"` | |
| fullnameOverride | string | `""` | full name of the chart. |
| imagePullSecrets | list | `[]` | image pull secret for private images in clickhouse-operator pod possible value format [{"name":"your-secret-name"}] look `kubectl explain pod.spec.imagePullSecrets` for details |
| metrics.containerSecurityContext | object | `{}` | |
| metrics.enabled | bool | `true` | |
| metrics.env | list | `[]` | additional environment variables for the deployment of metrics-exporter containers possible format value [{"name": "SAMPLE", "value": "text"}] |
| metrics.image.pullPolicy | string | `"IfNotPresent"` | image pull policy |
| metrics.image.repository | string | `"altinity/metrics-exporter"` | image repository |
| metrics.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) |
| metrics.resources | object | `{}` | custom resource configuration |
| nameOverride | string | `""` | override name of the chart |
| nodeSelector | object | `{}` | node for scheduler pod assignment, look `kubectl explain pod.spec.nodeSelector` for details |
| operator.containerSecurityContext | object | `{}` | |
| operator.env | list | `[]` | additional environment variables for the clickhouse-operator container in deployment possible format value [{"name": "SAMPLE", "value": "text"}] |
| operator.image.pullPolicy | string | `"IfNotPresent"` | image pull policy |
| operator.image.repository | string | `"altinity/clickhouse-operator"` | image repository |
| operator.image.tag | string | `""` | image tag (chart's appVersion value will be used if not set) |
| operator.resources | object | `{}` | custom resource configuration, look `kubectl explain pod.spec.containers.resources` for details |
| podAnnotations | object | `{"clickhouse-operator-metrics/port":"9999","clickhouse-operator-metrics/scrape":"true","prometheus.io/port":"8888","prometheus.io/scrape":"true"}` | annotations to add to the clickhouse-operator pod, look `kubectl explain pod.spec.annotations` for details |
| podLabels | object | `{}` | labels to add to the clickhouse-operator pod |
| podSecurityContext | object | `{}` | |
| rbac.create | bool | `true` | specifies whether cluster roles and cluster role bindings should be created |
| secret.create | bool | `true` | create a secret with operator credentials |
| secret.password | string | `"clickhouse_operator_password"` | operator credentials password |
| secret.username | string | `"clickhouse_operator"` | operator credentials username |
| serviceAccount.annotations | object | `{}` | annotations to add to the service account |
| serviceAccount.create | bool | `true` | specifies whether a service account should be created |
| serviceAccount.name | string | `nil` | the name of the service account to use; if not set and create is true, a name is generated using the fullname template |
| serviceMonitor.additionalLabels | object | `{}` | additional labels for service monitor |
| serviceMonitor.enabled | bool | `false` | ServiceMonitor Custom resource is created for a (prometheus-operator)[https://github.com/prometheus-operator/prometheus-operator] |
| tolerations | list | `[]` | tolerations for scheduler pod assignment, look `kubectl explain pod.spec.tolerations` for details |

View File

@@ -1,263 +0,0 @@
# Template Parameters:
#
# OPERATOR_VERSION=0.23.4
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
labels:
clickhouse-keeper.altinity.com/chop: 0.23.4
spec:
group: clickhouse-keeper.altinity.com
scope: Namespaced
names:
kind: ClickHouseKeeperInstallation
singular: clickhousekeeperinstallation
plural: clickhousekeeperinstallations
shortNames:
- chk
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- name: status
type: string
description: CHK status
jsonPath: .status.status
- name: replicas
type: integer
description: Replica count
priority: 1 # show in wide view
jsonPath: .status.replicas
- name: age
type: date
description: Age of the resource
# Displayed in all priorities
jsonPath: .metadata.creationTimestamp
subresources:
status: {}
schema:
openAPIV3Schema:
type: object
required:
- spec
description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one ClickHouse Keeper cluster"
properties:
apiVersion:
type: string
description: |
APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
kind:
type: string
description: |
Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
metadata:
type: object
status:
type: object
description: |
Current ClickHouseKeeperInstallation status, contains many fields like overall status, desired replicas and ready replica list with their endpoints
properties:
chop-version:
type: string
description: "ClickHouse operator version"
chop-commit:
type: string
description: "ClickHouse operator git commit SHA"
chop-date:
type: string
description: "ClickHouse operator build date"
chop-ip:
type: string
description: "IP address of the operator's pod which managed this CHI"
status:
type: string
description: "Status"
replicas:
type: integer
format: int32
description: Replicas is the number of number of desired replicas in the cluster
readyReplicas:
type: array
description: ReadyReplicas is the array of endpoints of those ready replicas in the cluster
items:
type: object
properties:
host:
type: string
description: dns name or ip address for Keeper node
port:
type: integer
minimum: 0
maximum: 65535
description: TCP port which used to connect to Keeper node
secure:
type: string
description: if a secure connection to Keeper is required
normalized:
type: object
description: "Normalized CHK requested"
x-kubernetes-preserve-unknown-fields: true
normalizedCompleted:
type: object
description: "Normalized CHK completed"
x-kubernetes-preserve-unknown-fields: true
spec:
type: object
description: KeeperSpec defines the desired state of a Keeper cluster
properties:
namespaceDomainPattern:
type: string
description: |
Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
Typical use scenario - custom cluster domain in Kubernetes cluster
Example: %s.svc.my.test
replicas:
type: integer
format: int32
description: |
Replicas is the expected size of the keeper cluster.
The valid range of size is from 1 to 7.
minimum: 1
maximum: 7
configuration:
type: object
description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
# nullable: true
properties:
settings:
type: object
description: "allows configure multiple aspects and behavior for `clickhouse-keeper` instance"
x-kubernetes-preserve-unknown-fields: true
clusters:
type: array
description: |
describes ClickHouseKeeper clusters layout and allows change settings on cluster-level and replica-level
# nullable: true
items:
type: object
#required:
# - name
properties:
name:
type: string
description: "cluster name, used to identify set of ClickHouseKeeper servers and wide used during generate names of related Kubernetes resources"
minLength: 1
# See namePartClusterMaxLen const
maxLength: 15
pattern: "^[a-zA-Z0-9-]{0,15}$"
layout:
type: object
description: |
describe current cluster layout, how many replicas
# nullable: true
properties:
replicasCount:
type: integer
description: "how many replicas in ClickHouseKeeper cluster"
templates:
type: object
description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
# nullable: true
properties:
podTemplates:
type: array
description: |
podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
# nullable: true
items:
type: object
#required:
# - name
properties:
name:
type: string
description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
metadata:
type: object
description: |
allows pass standard object's metadata from template to Pod
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
# nullable: true
x-kubernetes-preserve-unknown-fields: true
spec:
# TODO specify PodSpec
type: object
description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
# nullable: true
x-kubernetes-preserve-unknown-fields: true
volumeClaimTemplates:
type: array
description: "allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else"
# nullable: true
items:
type: object
#required:
# - name
# - spec
properties:
name:
type: string
description: |
template name, could use to link inside
top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
metadata:
type: object
description: |
allows to pass standard object's metadata from template to PVC
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
# nullable: true
x-kubernetes-preserve-unknown-fields: true
spec:
type: object
description: |
allows define all aspects of `PVC` resource
More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
# nullable: true
x-kubernetes-preserve-unknown-fields: true
serviceTemplates:
type: array
description: |
allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
# nullable: true
items:
type: object
#required:
# - name
# - spec
properties:
name:
type: string
description: |
template name, could use to link inside
chi-level `chi.spec.defaults.templates.serviceTemplate`
cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
metadata:
# TODO specify ObjectMeta
type: object
description: |
allows pass standard object's metadata from template to Service
Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
# nullable: true
x-kubernetes-preserve-unknown-fields: true
spec:
# TODO specify ServiceSpec
type: object
description: |
describe behavior of generated Service
More info: https://kubernetes.io/docs/concepts/services-networking/service/
# nullable: true
x-kubernetes-preserve-unknown-fields: true

View File

@@ -1,415 +0,0 @@
# Template Parameters:
#
# NONE
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: clickhouseoperatorconfigurations.clickhouse.altinity.com
labels:
clickhouse.altinity.com/chop: 0.23.4
spec:
group: clickhouse.altinity.com
scope: Namespaced
names:
kind: ClickHouseOperatorConfiguration
singular: clickhouseoperatorconfiguration
plural: clickhouseoperatorconfigurations
shortNames:
- chopconf
versions:
- name: v1
served: true
storage: true
additionalPrinterColumns:
- name: namespaces
type: string
description: Watch namespaces
jsonPath: .status
- name: age
type: date
description: Age of the resource
# Displayed in all priorities
jsonPath: .metadata.creationTimestamp
schema:
openAPIV3Schema:
type: object
description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md"
x-kubernetes-preserve-unknown-fields: true
properties:
status:
type: object
x-kubernetes-preserve-unknown-fields: true
spec:
type: object
description: |
Allows to define settings of the clickhouse-operator.
More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml
Check into etc-clickhouse-operator* ConfigMaps if you need more control
x-kubernetes-preserve-unknown-fields: true
properties:
watch:
type: object
description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
properties:
namespaces:
type: array
description: "List of namespaces where clickhouse-operator watches for events."
items:
type: string
clickhouse:
type: object
description: "Clickhouse related parameters used by clickhouse-operator"
properties:
configuration:
type: object
properties:
file:
type: object
properties:
path:
type: object
description: |
Each 'path' can be either absolute or relative.
In case path is absolute - it is used as is.
In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
properties:
common:
type: string
description: |
Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
Default value - config.d
host:
type: string
description: |
Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
Default value - conf.d
user:
type: string
description: |
Path to the folder where ClickHouse configuration files with users settings are located.
Files are common for all instances within a CHI.
Default value - users.d
user:
type: object
description: "Default parameters for any user which will create"
properties:
default:
type: object
properties:
profile:
type: string
description: "ClickHouse server configuration `<profile>...</profile>` for any <user>"
quota:
type: string
description: "ClickHouse server configuration `<quota>...</quota>` for any <user>"
networksIP:
type: array
description: "ClickHouse server configuration `<networks><ip>...</ip></networks>` for any <user>"
items:
type: string
password:
type: string
description: "ClickHouse server configuration `<password>...</password>` for any <user>"
network:
type: object
description: "Default network parameters for any user which will create"
properties:
hostRegexpTemplate:
type: string
description: "ClickHouse server configuration `<host_regexp>...</host_regexp>` for any <user>"
configurationRestartPolicy:
type: object
description: "Configuration restart policy describes what configuration changes require ClickHouse restart"
properties:
rules:
type: array
description: "Array of set of rules per specified ClickHouse versions"
items:
type: object
properties:
version:
type: string
description: "ClickHouse version expression"
rules:
type: array
description: "Set of configuration rules for specified ClickHouse version"
items:
type: object
description: "setting: value pairs for configuration restart policy"
access:
type: object
description: "parameters which use for connect to clickhouse from clickhouse-operator deployment"
properties:
scheme:
type: string
description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto"
username:
type: string
description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
password:
type: string
description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
rootCA:
type: string
description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse"
secret:
type: object
properties:
namespace:
type: string
description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
name:
type: string
description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
port:
type: integer
minimum: 1
maximum: 65535
description: "Port to be used by operator to connect to ClickHouse instances"
timeouts:
type: object
description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds"
properties:
connect:
type: integer
minimum: 1
maximum: 10
description: "Timout to setup connection from the operator to ClickHouse instances. In seconds."
query:
type: integer
minimum: 1
maximum: 600
description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds."
metrics:
type: object
description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator"
properties:
timeouts:
type: object
description: |
Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
Specified in seconds.
properties:
collect:
type: integer
minimum: 1
maximum: 600
description: |
Timeout used to limit metrics collection request. In seconds.
Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
All collected metrics are returned.
template:
type: object
description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
properties:
chi:
type: object
properties:
policy:
type: string
description: |
CHI template updates handling policy
Possible policy values:
- ReadOnStart. Accept CHIT updates on the operators start only.
- ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
enum:
- ""
- "ReadOnStart"
- "ApplyOnNextReconcile"
path:
type: string
description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located."
reconcile:
type: object
description: "allow tuning reconciling process"
properties:
runtime:
type: object
description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
properties:
reconcileCHIsThreadsNumber:
type: integer
minimum: 1
maximum: 65535
description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default"
reconcileShardsThreadsNumber:
type: integer
minimum: 1
maximum: 65535
description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
reconcileShardsMaxConcurrencyPercent:
type: integer
minimum: 0
maximum: 100
description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
statefulSet:
type: object
description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator"
properties:
create:
type: object
description: "Behavior during create StatefulSet"
properties:
onFailure:
type: string
description: |
What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
Possible options:
1. abort - do nothing, just break the process and wait for admin.
2. delete - delete newly created problematic StatefulSet.
3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet.
update:
type: object
description: "Behavior during update StatefulSet"
properties:
timeout:
type: integer
description: "How many seconds to wait for created/updated StatefulSet to be Ready"
pollInterval:
type: integer
description: "How many seconds to wait between checks for created/updated StatefulSet status"
onFailure:
type: string
description: |
What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
Possible options:
1. abort - do nothing, just break the process and wait for admin.
2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
host:
type: object
description: |
Whether the operator during reconcile procedure should wait for a ClickHouse host:
- to be excluded from a ClickHouse cluster
- to complete all running queries
- to be included into a ClickHouse cluster
respectfully before moving forward
properties:
wait:
type: object
properties:
exclude: &TypeStringBool
type: string
description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster"
enum:
# List StringBoolXXX constants from model
- ""
- "0"
- "1"
- "False"
- "false"
- "True"
- "true"
- "No"
- "no"
- "Yes"
- "yes"
- "Off"
- "off"
- "On"
- "on"
- "Disable"
- "disable"
- "Enable"
- "enable"
- "Disabled"
- "disabled"
- "Enabled"
- "enabled"
queries:
!!merge <<: *TypeStringBool
description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
include:
!!merge <<: *TypeStringBool
description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
annotation:
type: object
description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
properties:
include:
type: array
description: |
When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
include annotations with names from the following list
items:
type: string
exclude:
type: array
description: |
When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
exclude annotations with names from the following list
items:
type: string
label:
type: object
description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources"
properties:
include:
type: array
description: |
When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
include labels from the following list
items:
type: string
exclude:
type: array
items:
type: string
description: |
When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
exclude labels from the following list
appendScope:
!!merge <<: *TypeStringBool
description: |
Whether to append *Scope* labels to StatefulSet and Pod
- "LabelShardScopeIndex"
- "LabelReplicaScopeIndex"
- "LabelCHIScopeIndex"
- "LabelCHIScopeCycleSize"
- "LabelCHIScopeCycleIndex"
- "LabelCHIScopeCycleOffset"
- "LabelClusterScopeIndex"
- "LabelClusterScopeCycleSize"
- "LabelClusterScopeCycleIndex"
- "LabelClusterScopeCycleOffset"
statefulSet:
type: object
description: "define StatefulSet-specific parameters"
properties:
revisionHistoryLimit:
type: integer
description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n"
pod:
type: object
description: "define pod specific parameters"
properties:
terminationGracePeriod:
type: integer
description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n"
logger:
type: object
description: "allow setup clickhouse-operator logger behavior"
properties:
logtostderr:
type: string
description: "boolean, allows logs to stderr"
alsologtostderr:
type: string
description: "boolean allows logs to stderr and files both"
v:
type: string
description: "verbosity level of clickhouse-operator log, default - 1 max - 9"
stderrthreshold:
type: string
vmodule:
type: string
description: |
Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level.
Ex.: file*=2 sets the 'V' to 2 in all files with names like file*.
log_backtrace_at:
type: string
description: |
It can be set to a file and line number with a logging line.
Ex.: file.go:123
Each time when this line is being executed, a stack trace will be written to the Info log.

View File

@@ -1,102 +0,0 @@
{{/* vim: set filetype=go-template: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "altinity-clickhouse-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "altinity-clickhouse-operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "altinity-clickhouse-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "altinity-clickhouse-operator.labels" -}}
helm.sh/chart: {{ include "altinity-clickhouse-operator.chart" . }}
{{ include "altinity-clickhouse-operator.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
{{- if .Values.podLabels }}
{{ toYaml .Values.podLabels }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Selector labels
*/}}
{{- define "altinity-clickhouse-operator.selectorLabels" -}}
app.kubernetes.io/name: {{ include "altinity-clickhouse-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "altinity-clickhouse-operator.serviceAccountName" -}}
{{ default (include "altinity-clickhouse-operator.fullname" .) .Values.serviceAccount.name }}
{{- end -}}
{{/*
Create the tag for the docker image to use
*/}}
{{- define "altinity-clickhouse-operator.operator.tag" -}}
{{- .Values.operator.image.tag | default .Chart.AppVersion -}}
{{- end -}}
{{/*
Create the tag for the docker image to use
*/}}
{{- define "altinity-clickhouse-operator.metrics.tag" -}}
{{- .Values.metrics.image.tag | default .Chart.AppVersion -}}
{{- end -}}
{{/*
altinity-clickhouse-operator.rawResource will create a resource template that can be
merged with each item in `.Values.additionalResources`.
*/}}
{{- define "altinity-clickhouse-operator.rawResource" -}}
metadata:
labels:
{{- include "altinity-clickhouse-operator.labels" . | nindent 4 }}
{{- end }}
{{/*
*/}}
{{- define "altinity-clickhouse-operator.configmap-data" }}
{{- $root := index . 0 }}
{{- $data := index . 1 }}
{{- if not $data -}}
null
{{ end }}
{{- range $k, $v := $data }}
{{- if not (kindIs "string" $v) }}
{{- $v = toYaml $v }}
{{- end }}
{{- tpl (toYaml (dict $k $v)) $root }}
{{ end }}
{{- end }}

View File

@@ -1,5 +0,0 @@
{{- $template := fromYaml (include "altinity-clickhouse-operator.rawResource" .) -}}
{{- range $i, $t := .Values.additionalResources }}
---
{{ toYaml (merge (tpl $t $ | fromYaml) $template) -}}
{{- end }}

View File

@@ -1,21 +0,0 @@
{{- if .Values.dashboards.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "altinity-clickhouse-operator.fullname" . }}-dashboards
namespace: {{ .Release.Namespace }}
labels:
{{- include "altinity-clickhouse-operator.labels" . | nindent 4 }}
{{- if .Values.dashboards.additionalLabels }}
{{- toYaml .Values.dashboards.additionalLabels | nindent 4 }}
{{- end }}
{{- with .Values.dashboards.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
data:
{{- range $path, $_ := .Files.Glob "files/*.json" }}
{{ $path | trimPrefix "files/" }}: {{ $.Files.Get $path | b64enc -}}
{{ end }}
{{- end }}

View File

@@ -1,211 +0,0 @@
{{- if .Values.rbac.create -}}
# Specifies either
# ClusterRole
# or
# Role
# to be bound to ServiceAccount.
# ClusterRole is namespace-less and must have unique name
# Role is namespace-bound
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "altinity-clickhouse-operator.fullname" . }}
#namespace: kube-system
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
rules:
#
# Core API group
#
- apiGroups:
- ""
resources:
- configmaps
- services
- persistentvolumeclaims
- secrets
verbs:
- get
- list
- patch
- update
- watch
- create
- delete
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- patch
- update
- watch
- delete
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
#
# apps.* resources
#
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- list
- patch
- update
- watch
- create
- delete
- apiGroups:
- apps
resources:
- replicasets
verbs:
- get
- patch
- update
- delete
# The operator deployment personally, identified by name
- apiGroups:
- apps
resources:
- deployments
resourceNames:
- {{ include "altinity-clickhouse-operator.fullname" . }}
verbs:
- get
- patch
- update
- delete
#
# policy.* resources
#
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- patch
- update
- watch
- create
- delete
#
# apiextensions
#
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
# clickhouse - related resources
- apiGroups:
- clickhouse.altinity.com
#
# The operators specific Custom Resources
#
resources:
- clickhouseinstallations
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallationtemplates
- clickhouseoperatorconfigurations
verbs:
- get
- list
- watch
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations/finalizers
- clickhouseinstallationtemplates/finalizers
- clickhouseoperatorconfigurations/finalizers
verbs:
- update
- apiGroups:
- clickhouse.altinity.com
resources:
- clickhouseinstallations/status
- clickhouseinstallationtemplates/status
- clickhouseoperatorconfigurations/status
verbs:
- get
- update
- patch
- create
- delete
# clickhouse-keeper - related resources
- apiGroups:
- clickhouse-keeper.altinity.com
resources:
- clickhousekeeperinstallations
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- clickhouse-keeper.altinity.com
resources:
- clickhousekeeperinstallations/finalizers
verbs:
- update
- apiGroups:
- clickhouse-keeper.altinity.com
resources:
- clickhousekeeperinstallations/status
verbs:
- get
- update
- patch
- create
- delete
{{- end }}

View File

@@ -1,23 +0,0 @@
{{- if .Values.rbac.create -}}
# Specifies either
# ClusterRoleBinding between ClusterRole and ServiceAccount.
# or
# RoleBinding between Role and ServiceAccount.
# ClusterRoleBinding is namespace-less and must have unique name
# RoleBinding is namespace-bound
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "altinity-clickhouse-operator.fullname" . }}
#namespace: kube-system
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "altinity-clickhouse-operator.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "altinity-clickhouse-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,13 +0,0 @@
# Template Parameters:
#
# NAME=etc-clickhouse-operator-confd-files
# NAMESPACE=kube-system
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-confd-files" (include "altinity-clickhouse-operator.fullname" .) }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.confdFiles) | nindent 2 }}

View File

@@ -1,13 +0,0 @@
# Template Parameters:
#
# NAME=etc-clickhouse-operator-configd-files
# NAMESPACE=kube-system
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-configd-files" (include "altinity-clickhouse-operator.fullname" .) }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.configdFiles) | nindent 2 }}

View File

@@ -1,13 +0,0 @@
# Template Parameters:
#
# NAME=etc-clickhouse-operator-files
# NAMESPACE=kube-system
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-files" (include "altinity-clickhouse-operator.fullname" .) }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.files) | nindent 2 }}

View File

@@ -1,13 +0,0 @@
# Template Parameters:
#
# NAME=etc-clickhouse-operator-templatesd-files
# NAMESPACE=kube-system
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-templatesd-files" (include "altinity-clickhouse-operator.fullname" .) }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.templatesdFiles) | nindent 2 }}

View File

@@ -1,13 +0,0 @@
# Template Parameters:
#
# NAME=etc-clickhouse-operator-usersd-files
# NAMESPACE=kube-system
# COMMENT=
#
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-usersd-files" (include "altinity-clickhouse-operator.fullname" .) }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
data: {{ include "altinity-clickhouse-operator.configmap-data" (list . .Values.configs.usersdFiles) | nindent 2 }}

View File

@@ -1,195 +0,0 @@
# Template Parameters:
#
# NAMESPACE=kube-system
# COMMENT=
# OPERATOR_IMAGE=altinity/clickhouse-operator:0.23.4
# OPERATOR_IMAGE_PULL_POLICY=Always
# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.23.4
# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
#
# Setup Deployment for clickhouse-operator
# Deployment would be created in kubectl-specified namespace
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ include "altinity-clickhouse-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels: {{ include "altinity-clickhouse-operator.selectorLabels" . | nindent 6 }}
template:
metadata:
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 8 }}
annotations:
{{ toYaml .Values.podAnnotations | nindent 8 }}
checksum/files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-files.yaml") . | sha256sum }}
checksum/confd-files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-confd-files.yaml") . | sha256sum }}
checksum/configd-files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-configd-files.yaml") . | sha256sum }}
checksum/templatesd-files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-templatesd-files.yaml") . | sha256sum }}
checksum/usersd-files: {{ include (print $.Template.BasePath "/generated/ConfigMap-etc-clickhouse-operator-usersd-files.yaml") . | sha256sum }}
spec:
serviceAccountName: {{ include "altinity-clickhouse-operator.serviceAccountName" . }}
volumes:
- name: etc-clickhouse-operator-folder
configMap:
name: {{ include "altinity-clickhouse-operator.fullname" . }}-files
- name: etc-clickhouse-operator-confd-folder
configMap:
name: {{ include "altinity-clickhouse-operator.fullname" . }}-confd-files
- name: etc-clickhouse-operator-configd-folder
configMap:
name: {{ include "altinity-clickhouse-operator.fullname" . }}-configd-files
- name: etc-clickhouse-operator-templatesd-folder
configMap:
name: {{ include "altinity-clickhouse-operator.fullname" . }}-templatesd-files
- name: etc-clickhouse-operator-usersd-folder
configMap:
name: {{ include "altinity-clickhouse-operator.fullname" . }}-usersd-files
containers:
- name: {{ .Chart.Name }}
image: {{ .Values.operator.image.repository }}:{{ include "altinity-clickhouse-operator.operator.tag" . }}
imagePullPolicy: {{ .Values.operator.image.pullPolicy }}
volumeMounts:
- name: etc-clickhouse-operator-folder
mountPath: /etc/clickhouse-operator
- name: etc-clickhouse-operator-confd-folder
mountPath: /etc/clickhouse-operator/conf.d
- name: etc-clickhouse-operator-configd-folder
mountPath: /etc/clickhouse-operator/config.d
- name: etc-clickhouse-operator-templatesd-folder
mountPath: /etc/clickhouse-operator/templates.d
- name: etc-clickhouse-operator-usersd-folder
mountPath: /etc/clickhouse-operator/users.d
env:
# Pod-specific
# spec.nodeName: ip-172-20-52-62.ec2.internal
- name: OPERATOR_POD_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# metadata.name: clickhouse-operator-6f87589dbb-ftcsf
- name: OPERATOR_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# metadata.namespace: kube-system
- name: OPERATOR_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# status.podIP: 100.96.3.2
- name: OPERATOR_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
# spec.serviceAccount: clickhouse-operator
# spec.serviceAccountName: clickhouse-operator
- name: OPERATOR_POD_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# Container-specific
- name: OPERATOR_CONTAINER_CPU_REQUEST
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: requests.cpu
- name: OPERATOR_CONTAINER_CPU_LIMIT
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: limits.cpu
- name: OPERATOR_CONTAINER_MEM_REQUEST
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: requests.memory
- name: OPERATOR_CONTAINER_MEM_LIMIT
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: limits.memory
{{ with .Values.operator.env }}{{ toYaml . | nindent 12 }}{{ end }}
ports:
- containerPort: 9999
name: metrics
resources: {{ toYaml .Values.operator.resources | nindent 12 }}
securityContext: {{ toYaml .Values.operator.containerSecurityContext | nindent 12 }}
{{ if .Values.metrics.enabled }}
- name: metrics-exporter
image: {{ .Values.metrics.image.repository }}:{{ include "altinity-clickhouse-operator.metrics.tag" . }}
imagePullPolicy: {{ .Values.metrics.image.pullPolicy }}
volumeMounts:
- name: etc-clickhouse-operator-folder
mountPath: /etc/clickhouse-operator
- name: etc-clickhouse-operator-confd-folder
mountPath: /etc/clickhouse-operator/conf.d
- name: etc-clickhouse-operator-configd-folder
mountPath: /etc/clickhouse-operator/config.d
- name: etc-clickhouse-operator-templatesd-folder
mountPath: /etc/clickhouse-operator/templates.d
- name: etc-clickhouse-operator-usersd-folder
mountPath: /etc/clickhouse-operator/users.d
env:
# Pod-specific
# spec.nodeName: ip-172-20-52-62.ec2.internal
- name: OPERATOR_POD_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# metadata.name: clickhouse-operator-6f87589dbb-ftcsf
- name: OPERATOR_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
# metadata.namespace: kube-system
- name: OPERATOR_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
# status.podIP: 100.96.3.2
- name: OPERATOR_POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
# spec.serviceAccount: clickhouse-operator
# spec.serviceAccountName: clickhouse-operator
- name: OPERATOR_POD_SERVICE_ACCOUNT
valueFrom:
fieldRef:
fieldPath: spec.serviceAccountName
# Container-specific
- name: OPERATOR_CONTAINER_CPU_REQUEST
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: requests.cpu
- name: OPERATOR_CONTAINER_CPU_LIMIT
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: limits.cpu
- name: OPERATOR_CONTAINER_MEM_REQUEST
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: requests.memory
- name: OPERATOR_CONTAINER_MEM_LIMIT
valueFrom:
resourceFieldRef:
containerName: {{ .Chart.Name }}
resource: limits.memory
{{ with .Values.metrics.env }}{{ toYaml . | nindent 12 }}{{ end }}
ports:
- containerPort: 8888
name: metrics
resources: {{ toYaml .Values.metrics.resources | nindent 12 }}
securityContext: {{ toYaml .Values.metrics.containerSecurityContext | nindent 12 }}
{{ end }}
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
nodeSelector: {{ toYaml .Values.nodeSelector | nindent 8 }}
affinity: {{ toYaml .Values.affinity | nindent 8 }}
tolerations: {{ toYaml .Values.tolerations | nindent 8 }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}

View File

@@ -1,20 +0,0 @@
{{- if .Values.secret.create -}}
#
# Template parameters available:
# NAMESPACE=kube-system
# COMMENT=
# OPERATOR_VERSION=0.23.4
# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
#
apiVersion: v1
kind: Secret
metadata:
name: {{ include "altinity-clickhouse-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
type: Opaque
data:
username: {{ .Values.secret.username | b64enc }}
password: {{ .Values.secret.password | b64enc }}
{{- end -}}

View File

@@ -1,23 +0,0 @@
# Template Parameters:
#
# NAMESPACE=kube-system
# COMMENT=
#
# Setup ClusterIP Service to provide monitoring metrics for Prometheus
# Service would be created in kubectl-specified namespace
# In order to get access outside of k8s it should be exposed as:
# kubectl --namespace prometheus port-forward service/prometheus 9090
# and point browser to localhost:9090
kind: Service
apiVersion: v1
metadata:
name: {{ printf "%s-metrics" (include "altinity-clickhouse-operator.fullname" .) }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
spec:
ports:
- port: 8888
name: clickhouse-metrics
- port: 9999
name: operator-metrics
selector: {{ include "altinity-clickhouse-operator.selectorLabels" . | nindent 4 }}

View File

@@ -1,26 +0,0 @@
{{- if .Values.serviceAccount.create -}}
# Template Parameters:
#
# COMMENT=
# NAMESPACE=kube-system
# NAME=clickhouse-operator
#
# Setup ServiceAccount
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "altinity-clickhouse-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels: {{ include "altinity-clickhouse-operator.labels" . | nindent 4 }}
annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }}
# Template Parameters:
#
# NAMESPACE=kube-system
# COMMENT=#
# ROLE_KIND=ClusterRole
# ROLE_NAME=clickhouse-operator-kube-system
# ROLE_BINDING_KIND=ClusterRoleBinding
# ROLE_BINDING_NAME=clickhouse-operator-kube-system
#
{{- end -}}

View File

@@ -1,19 +0,0 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ printf "%s-clickhouse-metrics" (include "altinity-clickhouse-operator.fullname" .) }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "altinity-clickhouse-operator.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
endpoints:
- port: clickhouse-metrics # 8888
- port: operator-metrics # 9999
selector:
matchLabels:
{{- include "altinity-clickhouse-operator.selectorLabels" . | nindent 6 }}
{{- end }}

View File

@@ -1,670 +0,0 @@
operator:
image:
# operator.image.repository -- image repository
repository: altinity/clickhouse-operator
# operator.image.tag -- image tag (chart's appVersion value will be used if not set)
tag: ""
# operator.image.pullPolicy -- image pull policy
pullPolicy: IfNotPresent
containerSecurityContext: {}
# operator.resources -- custom resource configuration, look `kubectl explain pod.spec.containers.resources` for details
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# operator.env -- additional environment variables for the clickhouse-operator container in deployment
# possible format value [{"name": "SAMPLE", "value": "text"}]
env: []
metrics:
enabled: true
image:
# metrics.image.repository -- image repository
repository: altinity/metrics-exporter
# metrics.image.tag -- image tag (chart's appVersion value will be used if not set)
tag: ""
# metrics.image.pullPolicy -- image pull policy
pullPolicy: IfNotPresent
containerSecurityContext: {}
# metrics.resources -- custom resource configuration
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
# metrics.env -- additional environment variables for the deployment of metrics-exporter containers
# possible format value [{"name": "SAMPLE", "value": "text"}]
env: []
# imagePullSecrets -- image pull secret for private images in clickhouse-operator pod
# possible value format [{"name":"your-secret-name"}]
# look `kubectl explain pod.spec.imagePullSecrets` for details
imagePullSecrets: []
# podLabels -- labels to add to the clickhouse-operator pod
podLabels: {}
# podAnnotations -- annotations to add to the clickhouse-operator pod, look `kubectl explain pod.spec.annotations` for details
podAnnotations:
prometheus.io/port: '8888'
prometheus.io/scrape: 'true'
clickhouse-operator-metrics/port: '9999'
clickhouse-operator-metrics/scrape: 'true'
# nameOverride -- override name of the chart
nameOverride: ""
# fullnameOverride -- full name of the chart.
fullnameOverride: ""
serviceAccount:
# serviceAccount.create -- specifies whether a service account should be created
create: true
# serviceAccount.annotations -- annotations to add to the service account
annotations: {}
# serviceAccount.name -- the name of the service account to use; if not set and create is true, a name is generated using the fullname template
name:
rbac:
# rbac.create -- specifies whether cluster roles and cluster role bindings should be created
create: true
secret:
# secret.create -- create a secret with operator credentials
create: true
# secret.username -- operator credentials username
username: clickhouse_operator
# secret.password -- operator credentials password
password: clickhouse_operator_password
# nodeSelector -- node for scheduler pod assignment, look `kubectl explain pod.spec.nodeSelector` for details
nodeSelector: {}
# tolerations -- tolerations for scheduler pod assignment, look `kubectl explain pod.spec.tolerations` for details
tolerations: []
# affinity -- affinity for scheduler pod assignment, look `kubectl explain pod.spec.affinity` for details
affinity: {}
# podSecurityContext - operator deployment SecurityContext, look `kubectl explain pod.spec.securityContext` for details
podSecurityContext: {}
serviceMonitor:
# serviceMonitor.enabled -- ServiceMonitor Custom resource is created for a (prometheus-operator)[https://github.com/prometheus-operator/prometheus-operator]
enabled: false
# serviceMonitor.additionalLabels -- additional labels for service monitor
additionalLabels: {}
# configs -- clickhouse-operator configs
# @default -- check the values.yaml file for the config content, auto-generated from latest operator release
configs:
confdFiles: null
configdFiles:
01-clickhouse-01-listen.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<listen_try>1</listen_try>
</yandex>
01-clickhouse-02-logger.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<logger>
<!-- Possible levels: https://github.com/pocoproject/poco/blob/devel/Foundation/include/Poco/Logger.h#L439 -->
<level>debug</level>
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<size>1000M</size>
<count>10</count>
<!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
<console>1</console>
</logger>
</yandex>
01-clickhouse-03-query_log.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<query_log replace="1">
<database>system</database>
<table>query_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</query_log>
<query_thread_log remove="1"/>
</yandex>
01-clickhouse-04-part_log.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<part_log replace="1">
<database>system</database>
<table>part_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</part_log>
</yandex>
01-clickhouse-05-trace_log.xml: |-
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<trace_log replace="1">
<database>system</database>
<table>trace_log</table>
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
</trace_log>
</yandex>
files:
config.yaml:
# IMPORTANT
# This file is auto-generated
# Do not edit this file - all changes would be lost
# Edit appropriate template in the following folder:
# deploy/builder/templates-config
# IMPORTANT
#
# Template parameters available:
# WATCH_NAMESPACES=
# CH_USERNAME_PLAIN=
# CH_PASSWORD_PLAIN=
# CH_CREDENTIALS_SECRET_NAMESPACE=
# CH_CREDENTIALS_SECRET_NAME=clickhouse-operator
# VERBOSITY=1
################################################
##
## Watch section
##
################################################
watch:
# List of namespaces where clickhouse-operator watches for events.
# Concurrently running operators should watch on different namespaces.
# IMPORTANT
# Regexp is applicable.
#namespaces: ["dev", "test"]
namespaces: []
clickhouse:
configuration:
################################################
##
## Configuration files section
##
################################################
file:
# Each 'path' can be either absolute or relative.
# In case path is absolute - it is used as is
# In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
path:
# Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
common: config.d
# Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
host: conf.d
# Path to the folder where ClickHouse configuration files with users' settings are located.
# Files are common for all instances within a CHI.
user: users.d
################################################
##
## Configuration users section
##
################################################
user:
# Default settings for user accounts, created by the operator.
# IMPORTANT. These are not access credentials or settings for 'default' user account,
# it is a template for filling out missing fields for all user accounts to be created by the operator,
# with the following EXCEPTIONS:
# 1. 'default' user account DOES NOT use provided password, but uses all the rest of the fields.
# Password for 'default' user account has to be provided explicitly, if to be used.
# 2. CHOP user account DOES NOT use:
# - profile setting. It uses predefined profile called 'clickhouse_operator'
# - quota setting. It uses empty quota name.
# - networks IP setting. Operator specifies 'networks/ip' user setting to match operators' pod IP only.
# - password setting. Password for CHOP account is used from 'clickhouse.access.*' section
default:
# Default values for ClickHouse user account(s) created by the operator
# 1. user/profile - string
# 2. user/quota - string
# 3. user/networks/ip - multiple strings
# 4. user/password - string
# These values can be overwritten on per-user basis.
profile: "default"
quota: "default"
networksIP:
- "::1"
- "127.0.0.1"
password: "default"
################################################
##
## Configuration network section
##
################################################
network:
# Default host_regexp to limit network connectivity from outside
hostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
################################################
##
## Configuration restart policy section
## Configuration restart policy describes what configuration changes require ClickHouse restart
##
################################################
configurationRestartPolicy:
rules:
# IMPORTANT!
# Special version of "*" - default version - has to satisfy all ClickHouse versions.
# Default version will also be used in case ClickHouse version is unknown.
# ClickHouse version may be unknown due to host being down - for example, because of incorrect "settings" section.
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
- version: "*"
rules:
# see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
# to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
- settings/*: "yes"
# single values
- settings/access_control_path: "no"
- settings/dictionaries_config: "no"
- settings/max_server_memory_*: "no"
- settings/max_*_to_drop: "no"
- settings/max_concurrent_queries: "no"
- settings/models_config: "no"
- settings/user_defined_executable_functions_config: "no"
# structured XML
- settings/logger/*: "no"
- settings/macros/*: "no"
- settings/remote_servers/*: "no"
- settings/user_directories/*: "no"
- zookeeper/*: "yes"
- files/*.xml: "yes"
- files/config.d/*.xml: "yes"
- files/config.d/*dict*.xml: "no"
# exceptions in default profile
- profiles/default/background_*_pool_size: "yes"
- profiles/default/max_*_for_server: "yes"
- version: "21.*"
rules:
- settings/logger: "yes"
#################################################
##
## Access to ClickHouse instances
##
################################################
access:
# Possible values for 'scheme' are:
# 1. http - force http to be used to connect to ClickHouse instances
# 2. https - force https to be used to connect to ClickHouse instances
# 3. auto - either http or https is selected based on open ports
scheme: "auto"
# ClickHouse credentials (username, password and port) to be used by the operator to connect to ClickHouse instances.
# These credentials are used for:
# 1. Metrics requests
# 2. Schema maintenance
# 3. DROP DNS CACHE
# User with these credentials can be specified in additional ClickHouse .xml config files,
# located in 'clickhouse.configuration.file.path.user' folder
username: ""
password: ""
rootCA: ""
# Location of the k8s Secret with username and password to be used by the operator to connect to ClickHouse instances.
# Can be used instead of explicitly specified username and password available in sections:
# - clickhouse.access.username
# - clickhouse.access.password
# Secret should have two keys:
# 1. username
# 2. password
secret:
# Empty `namespace` means that k8s secret would be looked in the same namespace where operator's pod is running.
namespace: ""
# Empty `name` means no k8s Secret would be looked for
name: '{{ include "altinity-clickhouse-operator.fullname" . }}'
# Port where to connect to ClickHouse instances to
port: 8123
# Timeouts used to limit connection and queries from the operator to ClickHouse instances
# Specified in seconds.
timeouts:
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
connect: 1
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
query: 4
#################################################
##
## Metrics collection
##
################################################
metrics:
# Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
# Specified in seconds.
timeouts:
# Timeout used to limit metrics collection request. In seconds.
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
# All collected metrics are returned.
collect: 9
################################################
##
## Template(s) management section
##
################################################
template:
chi:
# CHI template updates handling policy
# Possible policy values:
# - ReadOnStart. Accept CHIT updates on the operators start only.
# - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
policy: ApplyOnNextReconcile
# Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
# Templates are added to the list of all templates and used when CHI is reconciled.
# Templates are applied in sorted alpha-numeric order.
path: templates.d
################################################
##
## Reconcile section
##
################################################
reconcile:
# Reconcile runtime settings
runtime:
# Max number of concurrent CHI reconciles in progress
reconcileCHIsThreadsNumber: 10
# The operator reconciles shards concurrently in each CHI with the following limitations:
# 1. Number of shards being reconciled (and thus having hosts down) in each CHI concurrently
# can not be greater than 'reconcileShardsThreadsNumber'.
# 2. Percentage of shards being reconciled (and thus having hosts down) in each CHI concurrently
# can not be greater than 'reconcileShardsMaxConcurrencyPercent'.
# 3. The first shard is always reconciled alone. Concurrency starts from the second shard and onward.
# Thus limiting number of shards being reconciled (and thus having hosts down) in each CHI by both number and percentage
# Max number of concurrent shard reconciles within one CHI in progress
reconcileShardsThreadsNumber: 5
# Max percentage of concurrent shard reconciles within one CHI in progress
reconcileShardsMaxConcurrencyPercent: 50
# Reconcile StatefulSet scenario
statefulSet:
# Create StatefulSet scenario
create:
# What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
# Possible options:
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
# do not try to fix or delete or update it, just abort reconcile cycle.
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
# 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: ignore
# Update StatefulSet scenario
update:
# How many seconds to wait for created/updated StatefulSet to be 'Ready'
timeout: 300
# How many seconds to wait between checks/polls for created/updated StatefulSet status
pollInterval: 5
# What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
# Possible options:
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
# do not try to fix or delete or update it, just abort reconcile cycle.
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
# 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
# Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration.
# Follow 'abort' path afterwards.
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
onFailure: abort
# Reconcile Host scenario
host:
# Whether the operator during reconcile procedure should wait for a ClickHouse host:
# - to be excluded from a ClickHouse cluster
# - to complete all running queries
# - to be included into a ClickHouse cluster
# respectfully before moving forward
wait:
exclude: true
queries: true
include: false
################################################
##
## Annotations management section
##
################################################
annotation:
# Applied when:
# 1. Propagating annotations from the CHI's `metadata.annotations` to child objects' `metadata.annotations`,
# 2. Propagating annotations from the CHI Template's `metadata.annotations` to CHI's `metadata.annotations`,
# Include annotations from the following list:
# Applied only when not empty. Empty list means "include all, no selection"
include: []
# Exclude annotations from the following list:
exclude: []
################################################
##
## Labels management section
##
################################################
label:
# Applied when:
# 1. Propagating labels from the CHI's `metadata.labels` to child objects' `metadata.labels`,
# 2. Propagating labels from the CHI Template's `metadata.labels` to CHI's `metadata.labels`,
# Include labels from the following list:
# Applied only when not empty. Empty list means "include all, no selection"
include: []
# Exclude labels from the following list:
# Applied only when not empty. Empty list means "nothing to exclude, no selection"
exclude: []
# Whether to append *Scope* labels to StatefulSet and Pod.
# Full list of available *scope* labels check in 'labeler.go'
# LabelShardScopeIndex
# LabelReplicaScopeIndex
# LabelCHIScopeIndex
# LabelCHIScopeCycleSize
# LabelCHIScopeCycleIndex
# LabelCHIScopeCycleOffset
# LabelClusterScopeIndex
# LabelClusterScopeCycleSize
# LabelClusterScopeCycleIndex
# LabelClusterScopeCycleOffset
appendScope: "no"
################################################
##
## StatefulSet management section
##
################################################
statefulSet:
revisionHistoryLimit: 0
################################################
##
## Pod management section
##
################################################
pod:
# Grace period for Pod termination.
# How many seconds to wait between sending
# SIGTERM and SIGKILL during Pod termination process.
# Increase this number is case of slow shutdown.
terminationGracePeriod: 30
################################################
##
## Log parameters section
##
################################################
logger:
logtostderr: "true"
alsologtostderr: "false"
v: "1"
stderrthreshold: ""
vmodule: ""
log_backtrace_at: ""
templatesdFiles:
001-templates.json.example: |
{
"apiVersion": "clickhouse.altinity.com/v1",
"kind": "ClickHouseInstallationTemplate",
"metadata": {
"name": "01-default-volumeclaimtemplate"
},
"spec": {
"templates": {
"volumeClaimTemplates": [
{
"name": "chi-default-volume-claim-template",
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "2Gi"
}
}
}
}
],
"podTemplates": [
{
"name": "chi-default-oneperhost-pod-template",
"distribution": "OnePerHost",
"spec": {
"containers" : [
{
"name": "clickhouse",
"image": "clickhouse/clickhouse-server:23.8",
"ports": [
{
"name": "http",
"containerPort": 8123
},
{
"name": "client",
"containerPort": 9000
},
{
"name": "interserver",
"containerPort": 9009
}
]
}
]
}
}
]
}
}
}
default-pod-template.yaml.example: |
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallationTemplate"
metadata:
name: "default-oneperhost-pod-template"
spec:
templates:
podTemplates:
- name: default-oneperhost-pod-template
distribution: "OnePerHost"
default-storage-template.yaml.example: |
apiVersion: "clickhouse.altinity.com/v1"
kind: "ClickHouseInstallationTemplate"
metadata:
name: "default-storage-template-2Gi"
spec:
templates:
volumeClaimTemplates:
- name: default-storage-template-2Gi
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
readme: |-
Templates in this folder are packaged with an operator and available via 'useTemplate'
usersdFiles:
01-clickhouse-operator-profile.xml: |
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<!--
#
# Template parameters available:
#
-->
<yandex>
<!-- clickhouse-operator user is generated by the operator based on config.yaml in runtime -->
<profiles>
<clickhouse_operator>
<log_queries>0</log_queries>
<skip_unavailable_shards>1</skip_unavailable_shards>
<http_connection_timeout>10</http_connection_timeout>
<max_concurrent_queries_for_all_users>0</max_concurrent_queries_for_all_users>
<os_thread_priority>0</os_thread_priority>
</clickhouse_operator>
</profiles>
</yandex>
02-clickhouse-default-profile.xml: |-
<!-- IMPORTANT -->
<!-- This file is auto-generated -->
<!-- Do not edit this file - all changes would be lost -->
<!-- Edit appropriate template in the following folder: -->
<!-- deploy/builder/templates-config -->
<!-- IMPORTANT -->
<yandex>
<profiles>
<default>
<os_thread_priority>2</os_thread_priority>
<log_queries>1</log_queries>
<connect_timeout_with_failover_ms>1000</connect_timeout_with_failover_ms>
<distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
<parallel_view_processing>1</parallel_view_processing>
<do_not_merge_across_partitions_select_final>1</do_not_merge_across_partitions_select_final>
<load_balancing>nearest_hostname</load_balancing>
<prefer_localhost_replica>0</prefer_localhost_replica>
<!-- materialize_ttl_recalculate_only>1</materialize_ttl_recalculate_only> 21.10 and above -->
</default>
</profiles>
</yandex>
# additionalResources -- list of additional resources to create (are processed via `tpl` function), useful for create ClickHouse clusters together with clickhouse-operator, look `kubectl explain chi` for details
additionalResources: []
# - |
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-cm
# namespace: {{ .Release.Namespace }}
# - |
# apiVersion: v1
# kind: Secret
# metadata:
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-s
# namespace: {{ .Release.Namespace }}
# stringData:
# mykey: my-value
# - |
# apiVersion: clickhouse.altinity.com/v1
# kind: ClickHouseInstallation
# metadata:
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-chi
# namespace: {{ .Release.Namespace }}
# spec:
# configuration:
# clusters:
# - name: default
# layout:
# shardsCount: 1
dashboards:
# dashboards.enabled -- provision grafana dashboards as secrets (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-6.33.1/charts/grafana/values.yaml#L679 )
enabled: false
# dashboards.additionalLabels -- labels to add to a secret with dashboards
additionalLabels:
grafana_dashboard: ""
# dashboards.annotations -- annotations to add to a secret with dashboards
annotations: {}
grafana_folder: clickhouse

View File

@@ -1,6 +0,0 @@
altinity-clickhouse-operator:
configs:
files:
config.yaml:
watch:
namespaces: [".*"]

View File

@@ -1,3 +1,2 @@
apiVersion: v2
name: cozy-dashboard
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
version: 1.0.0

View File

@@ -1,8 +1,18 @@
NAME=dashboard
NAMESPACE=cozy-$(NAME)
NAMESPACE=cozy-dashboard
PUSH := 1
LOAD := 0
REPOSITORY := ghcr.io/aenix-io/cozystack
TAG := v0.1.0
include ../../../scripts/common-envs.mk
include ../../../scripts/package-system.mk
show:
helm template --dry-run=server -n $(NAMESPACE) $(NAME) .
apply:
helm upgrade -i -n $(NAMESPACE) $(NAME) .
diff:
helm diff upgrade --allow-unreleased --normalize-manifests -n $(NAMESPACE) $(NAME) .
update: update-chart update-dockerfiles
image: image-dashboard image-kubeapps-apis
@@ -27,21 +37,21 @@ update-dockerfiles:
image-dashboard:
docker buildx build images/dashboard \
--provenance false \
--tag $(REGISTRY)/dashboard:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/dashboard:latest \
--tag $(REPOSITORY)/dashboard:$(TAG) \
--cache-from type=registry,ref=$(REPOSITORY)/dashboard:$(TAG) \
--cache-to type=inline \
--metadata-file images/dashboard.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/dashboard:$(call settag,$(TAG))" > images/dashboard.tag
echo "$(REPOSITORY)/dashboard:$(TAG)" > images/dashboard.tag
image-kubeapps-apis:
docker buildx build images/kubeapps-apis \
--provenance false \
--tag $(REGISTRY)/kubeapps-apis:$(call settag,$(TAG)) \
--cache-from type=registry,ref=$(REGISTRY)/kubeapps-apis:latest \
--tag $(REPOSITORY)/kubeapps-apis:$(TAG) \
--cache-from type=registry,ref=$(REPOSITORY)/kubeapps-apis:$(TAG) \
--cache-to type=inline \
--metadata-file images/kubeapps-apis.json \
--push=$(PUSH) \
--load=$(LOAD)
echo "$(REGISTRY)/kubeapps-apis:$(call settag,$(TAG))" > images/kubeapps-apis.tag
echo "$(REPOSITORY)/kubeapps-apis:$(TAG)" > images/kubeapps-apis.tag

Some files were not shown because too many files have changed in this diff Show More