[kamaji] Update Kamaji edge-25.7.1

Signed-off-by: Andrei Kvapil <kvapss@gmail.com>
This commit is contained in:
Andrei Kvapil
2025-07-09 19:00:58 +02:00
parent 81a412517c
commit 5727110542
29 changed files with 0 additions and 1616 deletions

View File

@@ -1,2 +0,0 @@
images
hack

View File

@@ -1,3 +0,0 @@
apiVersion: v2
name: cozy-kamaji-etcd
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process

View File

@@ -1,9 +0,0 @@
update:
rm -rf charts
helm repo add clastix https://clastix.github.io/charts
helm repo update clastix
helm pull clastix/kamaji-etcd --untar --untardir charts
sed -i 's/hook-failed/before-hook-creation,hook-failed/' `grep -rl hook-failed charts`
patch --no-backup-if-mismatch -p4 < patches/fix-svc.diff
patch --no-backup-if-mismatch -p4 < patches/fullnameOverride.diff
patch --no-backup-if-mismatch -p4 < patches/remove-plus.patch

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,15 +0,0 @@
apiVersion: v2
appVersion: 3.5.6
description: Helm chart for deploying a multi-tenant `etcd` cluster.
home: https://github.com/clastix/kamaji-etcd
kubeVersion: '>=1.22.0-0'
maintainers:
- email: me@bsctl.io
name: Adriano Pezzuto
- email: dario@tranchitella.eu
name: Dario Tranchitella
name: kamaji-etcd
sources:
- https://github.com/clastix/kamaji-etcd
type: application
version: 0.5.1

View File

@@ -1,9 +0,0 @@
docs: HELMDOCS_VERSION := v1.8.1
docs: docker
@docker run --rm -v "$$(pwd):/helm-docs" -u $$(id -u) jnorwood/helm-docs:$(HELMDOCS_VERSION)
docker:
@hash docker 2>/dev/null || {\
echo "You need docker" &&\
exit 1;\
}

View File

@@ -1,133 +0,0 @@
# kamaji-etcd
![Version: 0.5.1](https://img.shields.io/badge/Version-0.5.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.5.6](https://img.shields.io/badge/AppVersion-3.5.6-informational?style=flat-square)
Helm chart for deploying a multi-tenant `etcd` cluster.
[Kamaji](https://github.com/clastix/kamaji) turns any Kubernetes cluster into an _admin cluster_ to orchestrate other Kubernetes clusters called _tenant clusters_.
The Control Plane of a _tenant cluster_ is made of regular pods running in a namespace of the _admin cluster_ instead of a dedicated set of Virtual Machines.
This solution makes running control planes at scale cheaper and easier to deploy and operate.
As of any Kubernetes cluster, a _tenant cluster_ needs a datastore where to save the state and be able to retrieve data.
This chart provides a multi-tenant `etcd` as datastore for Kamaji as well as a standalone multi-tenant `etcd` cluster.
## Install kamaji-etcd
To install the Chart with the release name `kamaji-etcd`:
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji-etcd clastix/kamaji-etcd -n kamaji-etcd --create-namespace
Show the status:
helm status kamaji-etcd -n kamaji-etcd
Upgrade the Chart
helm upgrade kamaji-etcd -n kamaji-etcd clastix/kamaji-etcd
Uninstall the Chart
helm uninstall kamaji-etcd -n kamaji-etcd
## Customize the installation
There are two methods for specifying overrides of values during Chart installation: `--values` and `--set`.
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line.
Create a copy of the YAML file `values.yaml` and add your overrides to it.
Specify your overrides file when you install the Chart:
helm upgrade kamaji-etcd --install --namespace kamaji-etcd --create-namespacekamaji-etcd --values myvalues.yaml
The values in your overrides file `myvalues.yaml` will override their counterparts in the Chart's values.yaml file.
Any values in `values.yaml` that weren't overridden will keep their defaults.
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
helm upgrade kamaji-etcd --install --namespace kamaji-etcd --create-namespace kamaji-etcd --set replicas=5
Here the values you can override:
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Kubernetes affinity rules to apply to etcd controller pods |
| alerts.annotations | object | `{}` | Assign additional Annotations |
| alerts.enabled | bool | `false` | Enable alerts for Alertmanager |
| alerts.labels | object | `{}` | Assign additional labels according to Prometheus' Alerts matching labels |
| alerts.namespace | string | `""` | Install the Alerts into a different Namespace, as the monitoring stack one (default: the release one) |
| alerts.rules | list | `[]` | The rules for alerts |
| autoCompactionMode | string | `"periodic"` | Interpret 'auto-compaction-retention' one of: periodic|revision. Use 'periodic' for duration based retention, 'revision' for revision number based retention. |
| autoCompactionRetention | string | `"5m"` | Auto compaction retention length. 0 means disable auto compaction. |
| backup | object | `{"all":false,"enabled":false,"s3":{"accessKey":{"value":"","valueFrom":{}},"bucket":"mybucket","image":{"pullPolicy":"IfNotPresent","repository":"minio/mc","tag":"RELEASE.2022-11-07T23-47-39Z"},"retention":"","secretKey":{"value":"","valueFrom":{}},"url":"http://mys3storage:9000"},"schedule":"20 3 * * *","snapshotDateFormat":"$(date +%Y%m%d)","snapshotNamePrefix":"mysnapshot"}` | Enable storage backup |
| backup.all | bool | `false` | Enable backup for all endpoints. When disabled, only the leader will be taken |
| backup.enabled | bool | `false` | Enable scheduling backup job |
| backup.s3 | object | `{"accessKey":{"value":"","valueFrom":{}},"bucket":"mybucket","image":{"pullPolicy":"IfNotPresent","repository":"minio/mc","tag":"RELEASE.2022-11-07T23-47-39Z"},"retention":"","secretKey":{"value":"","valueFrom":{}},"url":"http://mys3storage:9000"}` | The S3 storage config section |
| backup.s3.accessKey | object | `{"value":"","valueFrom":{}}` | The S3 storage ACCESS KEY credential. The plain value has precedence over the valueFrom that can be used to retrieve the value from a Secret. |
| backup.s3.bucket | string | `"mybucket"` | The S3 storage bucket |
| backup.s3.image | object | `{"pullPolicy":"IfNotPresent","repository":"minio/mc","tag":"RELEASE.2022-11-07T23-47-39Z"}` | The S3 client image config section |
| backup.s3.image.pullPolicy | string | `"IfNotPresent"` | Pull policy to use |
| backup.s3.image.repository | string | `"minio/mc"` | Install image from specific repo |
| backup.s3.image.tag | string | `"RELEASE.2022-11-07T23-47-39Z"` | Install image with specific tag |
| backup.s3.retention | string | `""` | The S3 storage object lifecycle management rules; N.B. enabling this option will delete previously set lifecycle rules |
| backup.s3.secretKey | object | `{"value":"","valueFrom":{}}` | The S3 storage SECRET KEY credential. The plain value has precedence over the valueFrom that can be used to retrieve the value from a Secret. |
| backup.s3.url | string | `"http://mys3storage:9000"` | The S3 storage url |
| backup.schedule | string | `"20 3 * * *"` | The job scheduled maintenance time for backup |
| backup.snapshotDateFormat | string | `"$(date +%Y%m%d)"` | The backup file date format (bash) |
| backup.snapshotNamePrefix | string | `"mysnapshot"` | The backup file name prefix |
| clientPort | int | `2379` | The client request port. |
| datastore.enabled | bool | `false` | Create a datastore custom resource for Kamaji |
| defragmentation | object | `{"schedule":"*/15 * * * *"}` | Enable storage defragmentation |
| defragmentation.schedule | string | `"*/15 * * * *"` | The job scheduled maintenance time for defrag (empty to disable) |
| extraArgs | list | `[]` | A list of extra arguments to add to the etcd default ones |
| image.pullPolicy | string | `"IfNotPresent"` | Pull policy to use |
| image.repository | string | `"quay.io/coreos/etcd"` | Install image from specific repo |
| image.tag | string | `""` | Install image with specific tag, overwrite the tag in the chart |
| livenessProbe | object | `{}` | The livenessProbe for the etcd container |
| metricsPort | int | `2381` | The port where etcd exposes metrics. |
| nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Kubernetes node selector rules to schedule etcd |
| peerApiPort | int | `2380` | The peer API port which servers are listening to. |
| persistentVolumeClaim.accessModes | list | `["ReadWriteOnce"]` | The Access Mode to storage |
| persistentVolumeClaim.customAnnotations | object | `{}` | The custom annotations to add to the PVC |
| persistentVolumeClaim.size | string | `"10Gi"` | The size of persistent storage for etcd data |
| persistentVolumeClaim.storageClassName | string | `""` | A specific storage class |
| podAnnotations | object | `{}` | Annotations to add to all etcd pods |
| podLabels | object | `{"application":"kamaji-etcd"}` | Labels to add to all etcd pods |
| priorityClassName | string | `"system-cluster-critical"` | The priorityClassName to apply to etcd |
| quotaBackendBytes | string | `"8589934592"` | Raise alarms when backend size exceeds the given quota. It will put the cluster into a maintenance mode which only accepts key reads and deletes. |
| replicas | int | `3` | Size of the etcd cluster |
| resources | object | `{"limits":{},"requests":{}}` | Resources assigned to the etcd containers |
| securityContext | object | `{"allowPrivilegeEscalation":false}` | The securityContext to apply to etcd |
| serviceAccount | object | `{"create":true,"name":""}` | Install an etcd with enabled multi-tenancy |
| serviceAccount.create | bool | `true` | Create a ServiceAccount, required to install and provision the etcd backing storage (default: true) |
| serviceAccount.name | string | `""` | Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "") |
| serviceMonitor.annotations | object | `{}` | Assign additional Annotations |
| serviceMonitor.enabled | bool | `false` | Enable ServiceMonitor for Prometheus |
| serviceMonitor.endpoint.interval | string | `"15s"` | Set the scrape interval for the endpoint of the serviceMonitor |
| serviceMonitor.endpoint.metricRelabelings | list | `[]` | Set metricRelabelings for the endpoint of the serviceMonitor |
| serviceMonitor.endpoint.relabelings | list | `[]` | Set relabelings for the endpoint of the serviceMonitor |
| serviceMonitor.endpoint.scrapeTimeout | string | `""` | Set the scrape timeout for the endpoint of the serviceMonitor |
| serviceMonitor.labels | object | `{}` | Assign additional labels according to Prometheus' serviceMonitorSelector matching labels |
| serviceMonitor.matchLabels | object | `{}` | Change matching labels |
| serviceMonitor.namespace | string | `""` | Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one) |
| serviceMonitor.serviceAccount.name | string | `"etcd"` | ServiceAccount for Metrics RBAC |
| serviceMonitor.serviceAccount.namespace | string | `"etcd-system"` | ServiceAccount Namespace for Metrics RBAC |
| serviceMonitor.targetLabels | list | `[]` | Set targetLabels for the serviceMonitor |
| snapshotCount | string | `"10000"` | Number of committed transactions to trigger a snapshot to disk. |
| tolerations | list | `[]` | Kubernetes node taints that the etcd pods would tolerate |
| topologySpreadConstraints | list | `[]` | Kubernetes topology spread constraints to apply to etcd controller pods |
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| Adriano Pezzuto | <me@bsctl.io> | |
| Dario Tranchitella | <dario@tranchitella.eu> | |
## Source Code
* <https://github.com/clastix/kamaji-etcd>

View File

@@ -1,59 +0,0 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.badgesSection" . }}
{{ template "chart.description" . }}
[Kamaji](https://github.com/clastix/kamaji) turns any Kubernetes cluster into an _admin cluster_ to orchestrate other Kubernetes clusters called _tenant clusters_.
The Control Plane of a _tenant cluster_ is made of regular pods running in a namespace of the _admin cluster_ instead of a dedicated set of Virtual Machines.
This solution makes running control planes at scale cheaper and easier to deploy and operate.
As of any Kubernetes cluster, a _tenant cluster_ needs a datastore where to save the state and be able to retrieve data.
This chart provides a multi-tenant `etcd` as datastore for Kamaji as well as a standalone multi-tenant `etcd` cluster.
## Install kamaji-etcd
To install the Chart with the release name `kamaji-etcd`:
helm repo add clastix https://clastix.github.io/charts
helm repo update
helm install kamaji-etcd clastix/kamaji-etcd -n kamaji-etcd --create-namespace
Show the status:
helm status kamaji-etcd -n kamaji-etcd
Upgrade the Chart
helm upgrade kamaji-etcd -n kamaji-etcd clastix/kamaji-etcd
Uninstall the Chart
helm uninstall kamaji-etcd -n kamaji-etcd
## Customize the installation
There are two methods for specifying overrides of values during Chart installation: `--values` and `--set`.
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line.
Create a copy of the YAML file `values.yaml` and add your overrides to it.
Specify your overrides file when you install the Chart:
helm upgrade kamaji-etcd --install --namespace kamaji-etcd --create-namespacekamaji-etcd --values myvalues.yaml
The values in your overrides file `myvalues.yaml` will override their counterparts in the Chart's values.yaml file.
Any values in `values.yaml` that weren't overridden will keep their defaults.
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
helm upgrade kamaji-etcd --install --namespace kamaji-etcd --create-namespace kamaji-etcd --set replicas=5
Here the values you can override:
{{ template "chart.valuesSection" . }}
{{ template "chart.maintainersSection" . }}
{{ template "chart.sourcesSection" . }}

View File

@@ -1,164 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "etcd.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified etcd name.
*/}}
{{- define "etcd.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "etcd.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create the etcd fully-qualified Docker image to use
*/}}
{{- define "etcd.fullyQualifiedDockerImage" -}}
{{- printf "%s:%s" .Values.image.repository ( .Values.image.tag | default (printf "v%s" .Chart.AppVersion) ) -}}
{{- end }}
{{/*
Create the name of the Service to use
*/}}
{{- define "etcd.serviceName" -}}
{{- printf "%s" (include "etcd.fullname" .) | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "etcd.labels" -}}
helm.sh/chart: {{ include "etcd.chart" . }}
{{ include "etcd.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "etcd.selectorLabels" -}}
app.kubernetes.io/name: {{ include "etcd.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "etcd.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "etcd.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Name of the Stateful Set.
*/}}
{{- define "etcd.stsName" }}
{{- printf "%s" (include "etcd.fullname" .) | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Name of the etcd CA secret.
*/}}
{{- define "etcd.caSecretName" }}
{{- printf "%s-%s" (include "etcd.fullname" .) "certs" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Name of the certificate signing requests for the certificates required by etcd.
*/}}
{{- define "etcd.csrConfigMapName" }}
{{- printf "%s-csr" (include "etcd.fullname" .) }}
{{- end }}
{{/*
Name of the etcd role
*/}}
{{- define "etcd.roleName" }}
{{- printf "%s-gen-certs-role" (include "etcd.fullname" .) }}
{{- end }}
{{/*
Name of the etcd role binding
*/}}
{{- define "etcd.roleBindingName" }}
{{- printf "%s-gen-certs-rolebiding" (include "etcd.fullname" .) }}
{{- end }}
{{/*
Name of the etcd root-client secret.
*/}}
{{- define "etcd.clientSecretName" }}
{{- printf "%s-root-client-certs" ( include "etcd.fullname" . ) }}
{{- end }}
{{/*
Retrieve the current Kubernetes version to launch a kubectl container with the minimum version skew possible.
*/}}
{{- define "etcd.jobsTagKubeVersion" -}}
{{- print "v" .Capabilities.KubeVersion.Major "." (.Capabilities.KubeVersion.Minor | replace "+" "") -}}
{{- end }}
{{/*
Comma separated list of etcd cluster peers.
*/}}
{{- define "etcd.initialCluster" }}
{{- $outer := . -}}
{{- $list := list -}}
{{- range $i, $count := until (int $.Values.replicas) -}}
{{- $list = append $list ( printf "%s-%d=https://%s-%d.%s.%s.svc.cluster.local:%d" ( include "etcd.stsName" $outer ) $i ( include "etcd.fullname" $outer ) $count ( include "etcd.serviceName" $outer ) $.Release.Namespace (int $.Values.peerApiPort) ) -}}
{{- end }}
{{- join "," $list -}}
{{- end }}
{{/*
Space separated list of etcd cluster endpoints.
*/}}
{{- define "etcd.endpoints" }}
{{- $outer := . -}}
{{- $list := list -}}
{{- range $i, $count := until (int $.Values.replicas) -}}
{{- $list = append $list ( printf "%s-%d.%s.%s.svc.cluster.local:%d" ( include "etcd.stsName" $outer ) $count ( include "etcd.serviceName" $outer ) $.Release.Namespace (int $.Values.clientPort) ) -}}
{{- end }}
{{- join " " $list -}}
{{- end }}
{{/*
Space separated list of etcd cluster endpoints.
*/}}
{{- define "etcd.endpointsYAML" }}
{{- $outer := . -}}
{{- range $i, $count := until (int $.Values.replicas) -}}
{{ printf "- %s-%d.%s.%s.svc.cluster.local:%d\n" ( include "etcd.stsName" $outer ) $count ( include "etcd.serviceName" $outer ) $.Release.Namespace (int $.Values.clientPort) }}
{{- end }}
{{- end }}
{{/*
Create the minio-client fully-qualified Docker image to use
*/}}
{{- define "minio-client.fullyQualifiedDockerImage" -}}
{{- printf "%s:%s" .Values.backup.s3.image.repository .Values.backup.s3.image.tag -}}
{{- end }}

View File

@@ -1,22 +0,0 @@
{{- if .Values.alerts.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "etcd.fullname" . }}-alerts
namespace: {{ .Values.alerts.namespace | default .Release.Namespace }}
labels:
{{- include "etcd.labels" . | nindent 4 }}
{{- with .Values.alerts.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.alerts.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
groups:
- name: kamaji-etcd
{{- with .Values.alerts.rules }}
rules: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}

View File

@@ -1,98 +0,0 @@
{{- $outer := $ -}}
apiVersion: v1
kind: ConfigMap
metadata:
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "5"
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: {{ include "etcd.csrConfigMapName" . }}
namespace: {{ .Release.Namespace }}
data:
ca-csr.json: |-
{
"CN": "Clastix CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "IT",
"ST": "Italy",
"L": "Milan"
}
]
}
config.json: |-
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"server-authentication": {
"usages": ["signing", "key encipherment", "server auth"],
"expiry": "8760h"
},
"client-authentication": {
"usages": ["signing", "key encipherment", "client auth"],
"expiry": "8760h"
},
"peer-authentication": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}
server-csr.json: |-
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
{{- range $count := until (int $.Values.replicas) -}}
{{ printf "\"%s-%d.%s.%s.svc.cluster.local\"," ( include "etcd.fullname" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
{{ printf "\"%s-%d.%s.%s.svc\"," ( include "etcd.fullname" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
{{- end }}
"etcd-server.{{ .Release.Namespace }}.svc.cluster.local",
"etcd-server.{{ .Release.Namespace }}.svc",
"etcd-server",
"127.0.0.1"
]
}
peer-csr.json: |-
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
{{- range $count := until (int $.Values.replicas) -}}
{{ printf "\"%s-%d\"," ( include "etcd.stsName" $outer ) $count }}
{{ printf "\"%s-%d.%s\"," ( include "etcd.stsName" $outer ) $count (include "etcd.serviceName" $outer) }}
{{ printf "\"%s-%d.%s.%s.svc\"," ( include "etcd.stsName" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
{{ printf "\"%s-%d.%s.%s.svc.cluster.local\"," ( include "etcd.stsName" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
{{- end }}
"127.0.0.1"
]
}
root-client-csr.json: |-
{
"CN": "root",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"O": "system:masters"
}
]
}

View File

@@ -1,115 +0,0 @@
{{- if .Values.backup.enabled -}}
apiVersion: batch/v1
kind: CronJob
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: "{{ .Release.Name }}-backup"
namespace: {{ .Release.Namespace }}
spec:
schedule: "{{ .Values.backup.schedule }}"
successfulJobsHistoryLimit: 7
jobTemplate:
spec:
template:
spec:
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
restartPolicy: OnFailure
initContainers:
- name: etcd-client
image: {{ include "etcd.fullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- bash
- -c
- |-
cd /opt/etcd-dump;
for ENDPOINT in {{ include "etcd.endpoints" . }}; do
isLeader=$(etcdctl --endpoints=${ENDPOINT} endpoint status | awk '{ print $6 }' | tr -d ',' )
if ! {{ .Values.backup.all }} && ! ${isLeader} ; then
continue
elif ! {{ .Values.backup.all }} && ${isLeader} ; then
POD="etcd-leader"
else
POD=${ENDPOINT#*//}
POD=${POD%.{{ include "etcd.serviceName" . }}*}
fi
SNAPSHOT={{ .Values.backup.snapshotNamePrefix }}_${POD}_{{ .Values.backup.snapshotDateFormat }}.db
etcdctl --endpoints=${ENDPOINT} snapshot save ${SNAPSHOT}
etcdutl --write-out=table snapshot status ${SNAPSHOT}
md5sum ${SNAPSHOT};
done;
env:
- name: ETCDCTL_CACERT
value: /opt/certs/ca/ca.crt
- name: ETCDCTL_CERT
value: /opt/certs/root-client-certs/tls.crt
- name: ETCDCTL_KEY
value: /opt/certs/root-client-certs/tls.key
volumeMounts:
- name: root-client-certs
mountPath: /opt/certs/root-client-certs
- name: certs
mountPath: /opt/certs/ca
- name: shared-data
mountPath: /opt/etcd-dump
containers:
- name: minio-client
image: {{ include "minio-client.fullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.backup.s3.image.pullPolicy }}
command:
- bash
- -c
- |-
cd /opt/etcd-dump
if $MC alias set myminio ${S3_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} \
&& $MC ping myminio -c 3 -e 3 ; then
echo -e "\nUploading snapshot(s):"
$MC cp {{ .Values.backup.snapshotNamePrefix }}_*.db myminio/{{ .Values.backup.s3.bucket }}
else
echo -e "\nERROR: S3 storage could not be configured;\nCheck your S3 URL/Credentials or network connectivity"
exit 1
fi
env:
- name: S3_URL
value: {{ .Values.backup.s3.url | quote }}
- name: S3_ACCESS_KEY
{{- if .Values.backup.s3.accessKey.value }}
value: {{ .Values.backup.s3.accessKey.value | quote }}
{{- else }}
valueFrom:
{{- toYaml .Values.backup.s3.accessKey.valueFrom | nindent 16 }}
{{- end }}
- name: S3_SECRET_KEY
{{- if .Values.backup.s3.secretKey.value }}
value: {{ .Values.backup.s3.secretKey.value | quote }}
{{- else }}
valueFrom:
{{- toYaml .Values.backup.s3.secretKey.valueFrom | nindent 16 }}
{{- end }}
- name: MC_CONFIG_DIR
value: /tmp
- name: MC
value: "/usr/bin/mc --config-dir ${MC_CONFIG_DIR}"
volumeMounts:
- name: shared-data
mountPath: /opt/etcd-dump
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 12 }}
{{- end }}
volumes:
- name: shared-data
emptyDir: {}
- name: root-client-certs
secret:
secretName: {{ include "etcd.clientSecretName" . }}
optional: true
- name: certs
secret:
secretName: {{ include "etcd.caSecretName" . }}
optional: true
{{- end }}

View File

@@ -1,62 +0,0 @@
{{- if .Values.defragmentation.schedule -}}
apiVersion: batch/v1
kind: CronJob
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: "{{ .Release.Name }}-defrag"
namespace: {{ .Release.Namespace }}
spec:
schedule: "{{ .Values.defragmentation.schedule }}"
successfulJobsHistoryLimit: 4
jobTemplate:
spec:
template:
spec:
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
restartPolicy: OnFailure
containers:
- name: etcd-client
image: {{ include "etcd.fullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- bash
- -c
- |-
for ENDPOINT in {{ include "etcd.endpoints" . }}; do
etcdctl --endpoints=https://${ENDPOINT} defrag;
etcdctl --endpoints=https://${ENDPOINT} alarm disarm;
etcdctl --endpoints=https://${ENDPOINT} alarm list;
etcdctl --endpoints=https://${ENDPOINT} endpoint status -w table;
etcdctl --endpoints=https://${ENDPOINT} member list -w table;
sleep 15;
done;
env:
- name: ETCDCTL_CACERT
value: /opt/certs/ca/ca.crt
- name: ETCDCTL_CERT
value: /opt/certs/root-client-certs/tls.crt
- name: ETCDCTL_KEY
value: /opt/certs/root-client-certs/tls.key
volumeMounts:
- name: root-client-certs
mountPath: /opt/certs/root-client-certs
- name: certs
mountPath: /opt/certs/ca
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 12 }}
{{- end }}
volumes:
- name: root-client-certs
secret:
secretName: {{ include "etcd.clientSecretName" . }}
optional: true
- name: certs
secret:
secretName: {{ include "etcd.caSecretName" . }}
optional: true
{{- end }}

View File

@@ -1,35 +0,0 @@
{{- if .Values.datastore.enabled }}
apiVersion: kamaji.clastix.io/v1alpha1
kind: DataStore
metadata:
name: {{ include "etcd.fullname" . }}
labels:
{{- include "etcd.labels" . | nindent 4 }}
spec:
driver: etcd
endpoints:
{{- include "etcd.endpointsYAML" . | nindent 4 }}
tlsConfig:
certificateAuthority:
certificate:
secretReference:
keyPath: ca.crt
name: {{ include "etcd.caSecretName" . }}
namespace: {{ .Release.Namespace }}
privateKey:
secretReference:
keyPath: ca.key
name: {{ include "etcd.caSecretName" . }}
namespace: {{ .Release.Namespace }}
clientCertificate:
certificate:
secretReference:
keyPath: tls.crt
name: {{ include "etcd.clientSecretName" . }}
namespace: {{ .Release.Namespace }}
privateKey:
secretReference:
keyPath: tls.key
name: {{ include "etcd.clientSecretName" . }}
namespace: {{ .Release.Namespace }}
{{ end }}

View File

@@ -1,32 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "10"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
name: "{{ .Release.Name }}-etcd-teardown"
namespace: {{ .Release.Namespace }}
spec:
template:
metadata:
name: "{{ .Release.Name }}"
spec:
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
restartPolicy: Never
containers:
- name: kubectl
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
command:
- kubectl
- --namespace={{ .Release.Namespace }}
- delete
- secret
- --ignore-not-found=true
- {{ include "etcd.caSecretName" . }}
- {{ include "etcd.clientSecretName" . }}
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,69 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-weight": "10"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
name: "{{ .Release.Name }}-etcd-setup-1"
namespace: {{ .Release.Namespace }}
spec:
template:
metadata:
name: "{{ .Release.Name }}"
spec:
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
restartPolicy: Never
initContainers:
- name: cfssl
image: cfssl/cfssl:latest
command:
- bash
- -c
- |-
cfssl gencert -initca /csr/ca-csr.json | cfssljson -bare /certs/ca &&
mv /certs/ca.pem /certs/ca.crt && mv /certs/ca-key.pem /certs/ca.key &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/peer-csr.json | cfssljson -bare /certs/peer &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/server-csr.json | cfssljson -bare /certs/server &&
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=client-authentication /csr/root-client-csr.json | cfssljson -bare /certs/root-client
volumeMounts:
- mountPath: /certs
name: certs
- mountPath: /csr
name: csr
containers:
- name: kubectl
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
command: ["/bin/sh", "-c"]
args:
- |
if kubectl get secret {{ include "etcd.caSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
echo "Secret {{ include "etcd.caSecretName" . }} already exists"
else
echo "Creating secret {{ include "etcd.caSecretName" . }}"
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem
fi
if kubectl get secret {{ include "etcd.clientSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
echo "Secret {{ include "etcd.clientSecretName" . }} already exists"
else
echo "Creating secret {{ include "etcd.clientSecretName" . }}"
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
fi
volumeMounts:
- mountPath: /certs
name: certs
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: csr
configMap:
name: {{ include "etcd.csrConfigMapName" . }}
- name: certs
emptyDir: {}

View File

@@ -1,71 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install
"helm.sh/hook-weight": "10"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
name: "{{ .Release.Name }}-etcd-setup-2"
namespace: {{ .Release.Namespace }}
spec:
backoffLimit: 12
template:
metadata:
name: "{{ .Release.Name }}"
spec:
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
restartPolicy: Never
initContainers:
- name: kubectl
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
command:
- sh
- -c
- kubectl --namespace={{ .Release.Namespace }} rollout status sts/{{ include "etcd.stsName" . }} --timeout=300s
containers:
- command:
- bash
- -c
- |-
etcdctl member list -w table
if etcdctl user get root &>/dev/null; then
echo "User already exists, nothing to do"
else
etcdctl user add --no-password=true root &&
etcdctl role add root &&
etcdctl user grant-role root root &&
etcdctl auth enable
fi
env:
- name: ETCDCTL_ENDPOINTS
value: https://{{ include "etcd.fullname" . }}-0.{{ include "etcd.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.clientPort }}
- name: ETCDCTL_CACERT
value: /opt/certs/ca/ca.crt
- name: ETCDCTL_CERT
value: /opt/certs/root-certs/tls.crt
- name: ETCDCTL_KEY
value: /opt/certs/root-certs/tls.key
image: {{ include "etcd.fullyQualifiedDockerImage" . }}
imagePullPolicy: IfNotPresent
name: etcd-client
volumeMounts:
- name: root-certs
mountPath: /opt/certs/root-certs
- name: ca
mountPath: /opt/certs/ca
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- name: root-certs
secret:
secretName: {{ include "etcd.clientSecretName" . }}
- name: ca
secret:
secretName: {{ include "etcd.caSecretName" . }}

View File

@@ -1,77 +0,0 @@
{{- if .Values.backup.enabled -}}
{{- if .Values.backup.s3.retention -}}
apiVersion: batch/v1
kind: Job
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": post-install,post-upgrade,post-rollback
"helm.sh/hook-weight": "5"
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
name: "{{ .Release.Name }}-s3-retention"
namespace: {{ .Release.Namespace }}
spec:
template:
metadata:
name: "{{ .Release.Name }}"
spec:
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
restartPolicy: OnFailure
containers:
- name: minio-client
image: {{ include "minio-client.fullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.backup.s3.image.pullPolicy }}
command:
- bash
- -c
- |-
cd ${MC_CONFIG_DIR}
if $MC alias set myminio ${S3_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} \
&& $MC ping myminio -c 3 -e 3 ; then
echo -e "\nCheck for already created object lifecycle management rule(s):"
if $MC ilm ls myminio/${S3_BUCKET} ; then
echo -e "\nObject lifecycle management rule(s) found - Clean up:"
$MC ilm rm --all --force myminio/${S3_BUCKET}
else
echo -e "\nNo object lifecycle management rule(s) found - Continue"
fi
echo -e "\nAdding object lifecycle management rule(s):"
$MC ilm add {{ .Values.backup.s3.retention }} myminio/${S3_BUCKET}
$MC ilm ls myminio/${S3_BUCKET}
else
echo -e "\nERROR: S3 storage could not be configured;\nCheck your S3 URL/Credentials or network connectivity"
exit 1
fi
env:
- name: S3_URL
value: {{ .Values.backup.s3.url | quote }}
- name: S3_ACCESS_KEY
{{- if .Values.backup.s3.accessKey.value }}
value: {{ .Values.backup.s3.accessKey.value | quote }}
{{- else }}
valueFrom:
{{- toYaml .Values.backup.s3.accessKey.valueFrom | nindent 12 }}
{{- end }}
- name: S3_SECRET_KEY
{{- if .Values.backup.s3.secretKey.value }}
value: {{ .Values.backup.s3.secretKey.value | quote }}
{{- else }}
valueFrom:
{{- toYaml .Values.backup.s3.secretKey.valueFrom | nindent 12 }}
{{- end }}
- name: S3_BUCKET
value: {{ .Values.backup.s3.bucket | quote }}
- name: MC_CONFIG_DIR
value: /tmp
- name: MC
value: "/usr/bin/mc --config-dir ${MC_CONFIG_DIR}"
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
{{- end }}
{{- end }}

View File

@@ -1,46 +0,0 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.labels }}
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "etcd.fullname" . }}-metrics-role
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.labels }}
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
{{- end }}
name: {{ include "etcd.fullname" . }}-metrics-rolebinding
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "etcd.fullname" . }}-metrics-role
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceMonitor.serviceAccount.name }}
namespace: {{ .Values.serviceMonitor.serviceAccount.namespace | default .Release.Namespace }}
{{- end }}

View File

@@ -1,60 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
annotations:
"helm.sh/hook": pre-install,post-install,pre-delete
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
"helm.sh/hook-weight": "5"
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: {{ include "etcd.roleName" . }}
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- patch
- delete
resourceNames:
- {{ include "etcd.caSecretName" . }}
- {{ include "etcd.clientSecretName" . }}
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- list
- watch
- patch
resourceNames:
- {{ include "etcd.stsName" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
annotations:
"helm.sh/hook": pre-install,post-install,pre-delete
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
"helm.sh/hook-weight": "5"
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: {{ include "etcd.roleBindingName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "etcd.roleName" . }}
subjects:
- kind: ServiceAccount
name: {{ include "etcd.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}

View File

@@ -1,15 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "etcd.serviceAccountName" . }}
labels:
{{- include "etcd.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": pre-install
"helm.sh/hook-delete-policy": "before-hook-creation,hook-failed"
"helm.sh/hook-weight": "0"
{{- with .Values.serviceAccount.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: {{ include "etcd.serviceName" . }}
namespace: {{ .Release.Namespace }}
spec:
clusterIP: None
ports:
- port: {{ .Values.clientPort }}
name: client
- port: {{ .Values.peerApiPort }}
name: peer
- port: {{ .Values.metricsPort }}
name: metrics
selector:
{{- include "etcd.selectorLabels" . | nindent 4 }}

View File

@@ -1,47 +0,0 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "etcd.fullname" . }}-monitor
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
labels:
{{- include "etcd.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceMonitor.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
{{- with .Values.serviceMonitor.endpoint }}
- interval: {{ .interval }}
port: metrics
path: /metrics
{{- with .scrapeTimeout }}
scrapeTimeout: {{ . }}
{{- end }}
{{- with .metricRelabelings }}
metricRelabelings: {{- toYaml . | nindent 6 }}
{{- end }}
{{- with .relabelings }}
relabelings: {{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
jobLabel: app.kubernetes.io/name
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels: {{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- if .Values.serviceMonitor.matchLabels }}
{{- toYaml .Values.serviceMonitor.matchLabels | nindent 6 }}
{{- else }}
{{- include "etcd.labels" . | nindent 6 }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,117 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
{{- include "etcd.labels" . | nindent 4 }}
name: {{ include "etcd.stsName" . }}
namespace: {{ .Release.Namespace }}
spec:
serviceName: {{ include "etcd.serviceName" . }}
selector:
matchLabels:
{{- include "etcd.selectorLabels" . | nindent 6 }}
replicas: {{ .Values.replicas }}
template:
metadata:
name: etcd
labels:
{{- include "etcd.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
annotations:
{{- if .Values.podAnnotations }}
{{- toYaml .Values.podAnnotations | nindent 8 }}
{{- end }}
spec:
volumes:
- name: certs
secret:
secretName: {{ include "etcd.caSecretName" . }}
containers:
- name: etcd
image: {{ include "etcd.fullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 12 }}
ports:
- containerPort: {{ .Values.clientPort }}
name: client
- containerPort: {{ .Values.peerApiPort }}
name: peer
- containerPort: {{ .Values.metricsPort }}
name: metrics
volumeMounts:
- name: data
mountPath: /var/run/etcd
- name: certs
mountPath: /etc/etcd/pki
command:
- etcd
- --data-dir=/var/run/etcd
- --name=$(POD_NAME)
- --initial-cluster-state=new
- --initial-cluster={{ include "etcd.initialCluster" . }}
- --initial-advertise-peer-urls=https://$(POD_NAME).{{ include "etcd.serviceName" . }}.$(POD_NAMESPACE).svc.cluster.local:{{ .Values.peerApiPort }}
- --advertise-client-urls=https://$(POD_NAME).{{ include "etcd.serviceName" . }}.$(POD_NAMESPACE).svc.cluster.local:{{ .Values.clientPort }}
- --initial-cluster-token=kamaji
- --listen-client-urls=https://0.0.0.0:{{ .Values.clientPort }}
- --listen-metrics-urls=http://0.0.0.0:{{ .Values.metricsPort }}
- --listen-peer-urls=https://0.0.0.0:{{ .Values.peerApiPort }}
- --client-cert-auth=true
- --peer-client-cert-auth=true
- --trusted-ca-file=/etc/etcd/pki/ca.crt
- --cert-file=/etc/etcd/pki/server.pem
- --key-file=/etc/etcd/pki/server-key.pem
- --peer-trusted-ca-file=/etc/etcd/pki/ca.crt
- --peer-cert-file=/etc/etcd/pki/peer.pem
- --peer-key-file=/etc/etcd/pki/peer-key.pem
- --auto-compaction-mode={{ .Values.autoCompactionMode }}
- --auto-compaction-retention={{ .Values.autoCompactionRetention }}
- --snapshot-count={{ .Values.snapshotCount }}
- --quota-backend-bytes={{ .Values.quotaBackendBytes }}
{{- with .Values.extraArgs }}
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.livenessProbe }}
livenessProbe:
{{- toYaml . | nindent 12 }}
{{- end }}
priorityClassName: {{- toYaml .Values.priorityClassName | nindent 8 }}
{{- with .Values.nodeSelector }}
nodeSelector: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations: {{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints: {{- toYaml . | nindent 8 }}
{{- end }}
volumeClaimTemplates:
- metadata:
name: data
{{- with .Values.persistentVolumeClaim.customAnnotations }}
annotations:
{{- toYaml . | nindent 10 }}
{{- end }}
spec:
storageClassName: {{ .Values.persistentVolumeClaim.storageClassName }}
accessModes:
{{- range .Values.persistentVolumeClaim.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistentVolumeClaim.size }}

View File

@@ -1,223 +0,0 @@
# Default values for kamaji-crane.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Size of the etcd cluster
replicas: 3
# -- Install an etcd with enabled multi-tenancy
serviceAccount:
# -- Create a ServiceAccount, required to install and provision the etcd backing storage (default: true)
create: true
# -- Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "")
name: ""
image:
# -- Install image from specific repo
repository: quay.io/coreos/etcd
# -- Install image with specific tag, overwrite the tag in the chart
tag: ""
# -- Pull policy to use
pullPolicy: IfNotPresent
# -- The peer API port which servers are listening to.
peerApiPort: 2380
# -- The client request port.
clientPort: 2379
# -- The port where etcd exposes metrics.
metricsPort: 2381
# -- The livenessProbe for the etcd container
livenessProbe: {}
# failureThreshold: 8
# httpGet:
# path: /health?serializable=true
# port: 2381
# scheme: HTTP
# initialDelaySeconds: 10
# periodSeconds: 10
# timeoutSeconds: 15
# -- A list of extra arguments to add to the etcd default ones
extraArgs: []
#- --log-level=warn
#- --logger=zap
# -- Interpret 'auto-compaction-retention' one of: periodic|revision. Use 'periodic' for duration based retention, 'revision' for revision number based retention.
autoCompactionMode: periodic
# -- Auto compaction retention length. 0 means disable auto compaction.
autoCompactionRetention: 5m
# -- Number of committed transactions to trigger a snapshot to disk.
snapshotCount: "10000"
# -- Raise alarms when backend size exceeds the given quota. It will put the cluster into a maintenance mode which only accepts key reads and deletes.
quotaBackendBytes: "8589934592" # 8Gi
persistentVolumeClaim:
# -- The size of persistent storage for etcd data
size: 10Gi
# -- A specific storage class
storageClassName: ""
# -- The Access Mode to storage
accessModes:
- ReadWriteOnce
# -- The custom annotations to add to the PVC
customAnnotations: {}
# volumeType: local
# -- Enable storage defragmentation
defragmentation:
# -- The job scheduled maintenance time for defrag (empty to disable)
schedule: "*/15 * * * *" # https://crontab.guru/
# -- Enable storage backup
backup:
# -- Enable scheduling backup job
enabled: false
# -- Enable backup for all endpoints. When disabled, only the leader will be taken
all: false
# -- The job scheduled maintenance time for backup
schedule: "20 3 * * *" # https://crontab.guru/
# -- The backup file name prefix
snapshotNamePrefix: mysnapshot
# -- The backup file date format (bash)
snapshotDateFormat: $(date +%Y%m%d)
# -- The S3 storage config section
s3:
# -- The S3 storage url
url: http://mys3storage:9000
# -- The S3 storage bucket
bucket: mybucket
# -- The S3 storage object lifecycle management rules; N.B. enabling this option will delete previously set lifecycle rules
retention: "" #"--expiry-days 7"
# -- The S3 storage ACCESS KEY credential. The plain value has precedence over the valueFrom that can be used to retrieve the value from a Secret.
accessKey:
value: ""
valueFrom: {}
# secretKeyRef:
# key: access_key
# name: minio-key
# -- The S3 storage SECRET KEY credential. The plain value has precedence over the valueFrom that can be used to retrieve the value from a Secret.
secretKey:
value: ""
valueFrom: {}
# secretKeyRef:
# key: secret_key
# name: minio-key
# -- The S3 client image config section
image:
# -- Install image from specific repo
repository: minio/mc
# -- Install image with specific tag
tag: "RELEASE.2022-11-07T23-47-39Z"
# -- Pull policy to use
pullPolicy: IfNotPresent
# -- Labels to add to all etcd pods
podLabels:
application: kamaji-etcd
# -- Annotations to add to all etcd pods
podAnnotations: {}
# -- The securityContext to apply to etcd
securityContext:
allowPrivilegeEscalation: false
# -- The priorityClassName to apply to etcd
priorityClassName: system-cluster-critical
# -- Resources assigned to the etcd containers
resources:
limits: {}
requests: {}
# -- Kubernetes node selector rules to schedule etcd
nodeSelector:
kubernetes.io/os: linux
# -- Kubernetes node taints that the etcd pods would tolerate
tolerations: []
# -- Kubernetes affinity rules to apply to etcd controller pods
affinity: {}
# -- Kubernetes topology spread constraints to apply to etcd controller pods
topologySpreadConstraints: []
#- maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# application: kamaji-etcd
datastore:
# -- Create a datastore custom resource for Kamaji
enabled: false
serviceMonitor:
# -- Enable ServiceMonitor for Prometheus
enabled: false
# -- Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one)
namespace: ''
# -- Assign additional labels according to Prometheus' serviceMonitorSelector matching labels
labels: {}
# -- Assign additional Annotations
annotations: {}
# -- Change matching labels
matchLabels: {}
# -- Set targetLabels for the serviceMonitor
targetLabels: []
serviceAccount:
# -- ServiceAccount for Metrics RBAC
name: etcd
# -- ServiceAccount Namespace for Metrics RBAC
namespace: etcd-system
endpoint:
# -- Set the scrape interval for the endpoint of the serviceMonitor
interval: "15s"
# -- Set the scrape timeout for the endpoint of the serviceMonitor
scrapeTimeout: ""
# -- Set metricRelabelings for the endpoint of the serviceMonitor
metricRelabelings: []
# -- Set relabelings for the endpoint of the serviceMonitor
relabelings: []
#- action: replace
# regex: (.+)
# replacement: $1
# sourceLabels:
# - __meta_kubernetes_pod_name
# targetLabel: member
#
alerts:
# -- Enable alerts for Alertmanager
enabled: false
# -- Install the Alerts into a different Namespace, as the monitoring stack one (default: the release one)
namespace: ''
# -- Assign additional labels according to Prometheus' Alerts matching labels
labels: {}
# -- Assign additional Annotations
annotations: {}
# -- The rules for alerts
rules: []
# - alert: etcdNoLeader
# annotations:
# message: 'etcd cluster: member {{ $labels.instance }} has no leader.'
# expr: count(etcd_server_has_leader{job=~".*etcd.*"}) == 0
# for: 1m
# labels:
# severity: critical
# - alert: EtcdDataBaseSize
# annotations:
# message: 'etcd cluster: "member {{ $labels.instance }} db has almost exceeded 8GB".'
# expr: |-
# etcd_mvcc_db_total_size_in_bytes{job=~".*etcd.*"} >= 8589934592
# for: 15m
# labels:
# severity: critical
#

View File

@@ -1,12 +0,0 @@
diff --git a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/etcd_cm.yaml b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/etcd_cm.yaml
index 95a2671..bd8ddcb 100644
--- a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/etcd_cm.yaml
+++ b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/etcd_cm.yaml
@@ -57,6 +57,7 @@ data:
"hosts": [
{{- range $count := until (int $.Values.replicas) -}}
{{ printf "\"%s-%d.%s.%s.svc.cluster.local\"," ( include "etcd.fullname" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
+ {{ printf "\"%s-%d.%s.%s.svc\"," ( include "etcd.fullname" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
{{- end }}
"etcd-server.{{ .Release.Namespace }}.svc.cluster.local",
"etcd-server.{{ .Release.Namespace }}.svc",

View File

@@ -1,31 +0,0 @@
diff --git a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
index 4f7014e..403e187 100644
--- a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
+++ b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
@@ -9,8 +9,17 @@ Expand the name of the chart.
Create a default fully qualified etcd name.
*/}}
{{- define "etcd.fullname" -}}
-{{- .Release.Name }}
-{{- end }}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
{{/*
Create chart name and version as used by the chart label.
@@ -156,4 +165,4 @@ Create the minio-client fully-qualified Docker image to use
*/}}
{{- define "minio-client.fullyQualifiedDockerImage" -}}
{{- printf "%s:%s" .Values.backup.s3.image.repository .Values.backup.s3.image.tag -}}
-{{- end }}
\ No newline at end of file
+{{- end }}

View File

@@ -1,16 +0,0 @@
diff --git a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
index 403e187..e68a967 100644
--- a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
+++ b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
@@ -119,11 +119,7 @@ Name of the etcd root-client secret.
Retrieve the current Kubernetes version to launch a kubectl container with the minimum version skew possible.
*/}}
{{- define "etcd.jobsTagKubeVersion" -}}
-{{- if contains "-eks-" .Capabilities.KubeVersion.GitVersion }}
{{- print "v" .Capabilities.KubeVersion.Major "." (.Capabilities.KubeVersion.Minor | replace "+" "") -}}
-{{- else }}
-{{- print "v" .Capabilities.KubeVersion.Major "." .Capabilities.KubeVersion.Minor -}}
-{{- end }}
{{- end }}
{{/*

View File

@@ -1,33 +0,0 @@
apiVersion: kamaji.clastix.io/v1alpha1
kind: DataStore
metadata:
name: {{ .Release.Namespace }}
spec:
driver: etcd
endpoints:
- etcd-0.etcd.{{ .Release.Namespace }}.svc:2379
- etcd-1.etcd.{{ .Release.Namespace }}.svc:2379
- etcd-2.etcd.{{ .Release.Namespace }}.svc:2379
tlsConfig:
certificateAuthority:
certificate:
secretReference:
keyPath: ca.crt
name: etcd-certs
namespace: {{ .Release.Namespace }}
privateKey:
secretReference:
keyPath: ca.key
name: etcd-certs
namespace: {{ .Release.Namespace }}
clientCertificate:
certificate:
secretReference:
keyPath: tls.crt
name: etcd-root-client-certs
namespace: {{ .Release.Namespace }}
privateKey:
secretReference:
keyPath: tls.key
name: etcd-root-client-certs
namespace: {{ .Release.Namespace }}