mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-30 18:19:02 +00:00
Compare commits
28 Commits
setup-env
...
v0.34.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08cb7c0f28 | ||
|
|
847980f03d | ||
|
|
0ecb8585bc | ||
|
|
32aea4254b | ||
|
|
e49918745e | ||
|
|
220c347cc5 | ||
|
|
a4ec46a941 | ||
|
|
2c126786b3 | ||
|
|
784f1454ba | ||
|
|
9d9226b575 | ||
|
|
9ec5863a75 | ||
|
|
50f3089f14 | ||
|
|
1aadefef75 | ||
|
|
5727110542 | ||
|
|
f2fffb03e4 | ||
|
|
ab5eae3fbc | ||
|
|
38cf5fd58c | ||
|
|
cda554b58c | ||
|
|
a73794d751 | ||
|
|
81a412517c | ||
|
|
23a7281fbf | ||
|
|
f32c6426a9 | ||
|
|
91583a4e1a | ||
|
|
9af6ce25bc | ||
|
|
e70dfdec31 | ||
|
|
08c0eecbc5 | ||
|
|
1db08d0b73 | ||
|
|
b2ed7525cd |
1
.github/workflows/tags.yaml
vendored
1
.github/workflows/tags.yaml
vendored
@@ -118,6 +118,7 @@ jobs:
|
||||
git config user.name "cozystack-bot"
|
||||
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
|
||||
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
|
||||
git config --unset-all http.https://github.com/.extraheader || true
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
git push origin HEAD || true
|
||||
|
||||
@@ -12,10 +12,10 @@ repos:
|
||||
name: Run 'make generate' in all app directories
|
||||
entry: |
|
||||
/bin/bash -c '
|
||||
for dir in ./packages/apps/*/; do
|
||||
for dir in ./packages/apps/*/ ./packages/extra/*/ ./packages/system/cozystack-api/; do
|
||||
if [ -d "$dir" ]; then
|
||||
echo "Running make generate in $dir"
|
||||
(cd "$dir" && make generate)
|
||||
make generate -C "$dir"
|
||||
fi
|
||||
done
|
||||
git diff --color=always | cat
|
||||
|
||||
8
docs/changelogs/v0.31.1.md
Normal file
8
docs/changelogs/v0.31.1.md
Normal file
@@ -0,0 +1,8 @@
|
||||
## Fixes
|
||||
|
||||
* [build] Update Talos Linux v1.10.3 and fix assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
|
||||
* [ci] Fix uploading released artifacts to GitHub. (@kvaps in https://github.com/cozystack/cozystack/pull/1009)
|
||||
* [ci] Separate build and testing jobs. (@kvaps in https://github.com/cozystack/cozystack/pull/1005)
|
||||
* [docs] Write a full release post for v0.31.1. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/999)
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.31.1
|
||||
12
docs/changelogs/v0.31.2.md
Normal file
12
docs/changelogs/v0.31.2.md
Normal file
@@ -0,0 +1,12 @@
|
||||
## Security
|
||||
|
||||
* Resolve a security problem that allowed a tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062, backported in https://github.com/cozystack/cozystack/pull/1066)
|
||||
|
||||
## Fixes
|
||||
|
||||
* [platform] Fix dependencies in `distro-full` bundle. (@klinch0 in https://github.com/cozystack/cozystack/pull/1056, backported in https://github.com/cozystack/cozystack/pull/1064)
|
||||
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031, backported in https://github.com/cozystack/cozystack/pull/1037)
|
||||
* [platform] Reduce system resource consumption by using smaller resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054, backported in https://github.com/cozystack/cozystack/pull/1058)
|
||||
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042, backported in https://github.com/cozystack/cozystack/pull/1066)
|
||||
* [apps] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040, backported in https://github.com/cozystack/cozystack/pull/1041)
|
||||
* [apps] Update built-in documentation and configuration reference for managed Clickhouse application. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1059, backported in https://github.com/cozystack/cozystack/pull/1065)
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.6.0@sha256:b7633717cd7449c0042ae92d8ca9b36e4d69566561f5c7d44e21058e7d05c6d5
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.6.0@sha256:50ac1581e3100bd6c477a71161cb455a341ffaf9e5e2f6086802e4e25271e8af
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.1@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||
ghcr.io/cozystack/cozystack/cluster-autoscaler:0.25.2@sha256:3a8170433e1632e5cc2b6d9db34d0605e8e6c63c158282c38450415e700e932e
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.1@sha256:412ed2b3c77249bd1b973e6dc9c87976d31863717fb66ba74ccda573af737eb1
|
||||
ghcr.io/cozystack/cozystack/kubevirt-cloud-provider:0.25.2@sha256:e522960064290747a67502d4e8927c591bdb290bad1f0bae88a02758ebfd380f
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.1@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.2@sha256:761e7235ff9cb7f6f223f00954943e6a5af32ed6624ee592a8610122f96febb0
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.33.1@sha256:03a0002be9cf5926643c295bbf05c3e250401b0f0595b9fcd147d53534f368f5
|
||||
image: ghcr.io/cozystack/cozystack/installer:v0.34.0-beta.1@sha256:6f29c93e52d686ae6144d64bbcd92c138cbd1b432b06a74273c5dc35b11fe048
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.33.1@sha256:eed183a4104b1c142f6c4a358338749efe73baefddd53d7fe4c7149ecb892ce1
|
||||
image: ghcr.io/cozystack/cozystack/e2e-sandbox:v0.34.0-beta.1@sha256:f0a7a45218122b57022e51d41c0e6b18d31621c8ec504651d2347f47e5e5f256
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.33.1@sha256:ca3638c620215ace26ace3f7e8b27391847ab2158b5a67f070f43dcbea071532
|
||||
ghcr.io/cozystack/cozystack/matchbox:v0.34.0-beta.1@sha256:a0bd0076e0bc866858d3f08adca5944fb75004ad0ead2ace369d1c155d780383
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
| Name | Description | Value |
|
||||
| ---------------- | ----------------------------------------------------------------- | ------- |
|
||||
| `replicas` | Number of ingress-nginx replicas | `2` |
|
||||
| `externalIPs` | List of externalIPs for service. | `[]` |
|
||||
| `whitelist` | List of client networks | `[]` |
|
||||
| `clouflareProxy` | Restoring original visitor IPs when Cloudflare proxied is enabled | `false` |
|
||||
|
||||
|
||||
@@ -7,14 +7,6 @@
|
||||
"description": "Number of ingress-nginx replicas",
|
||||
"default": 2
|
||||
},
|
||||
"externalIPs": {
|
||||
"type": "array",
|
||||
"description": "List of externalIPs for service.",
|
||||
"default": "[]",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"whitelist": {
|
||||
"type": "array",
|
||||
"description": "List of client networks",
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:b748d9add5fc4080b143d8690ca1ad851d911948ac8eb296dd9005d53d153c05
|
||||
ghcr.io/cozystack/cozystack/s3manager:v0.5.0@sha256:45e02729edbee171519068b23cd3516009315769b36f59465c420a618320e363
|
||||
|
||||
@@ -79,7 +79,7 @@ annotations:
|
||||
Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that can
|
||||
be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n"
|
||||
apiVersion: v2
|
||||
appVersion: 1.17.4
|
||||
appVersion: 1.17.5
|
||||
description: eBPF-based Networking, Security, and Observability
|
||||
home: https://cilium.io/
|
||||
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
|
||||
@@ -95,4 +95,4 @@ kubeVersion: '>= 1.21.0-0'
|
||||
name: cilium
|
||||
sources:
|
||||
- https://github.com/cilium/cilium
|
||||
version: 1.17.4
|
||||
version: 1.17.5
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# cilium
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
Cilium is open source software for providing and transparently securing
|
||||
network connectivity and loadbalancing between application workloads such as
|
||||
@@ -85,7 +85,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
|
||||
| authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:37f7b378a29ceb4c551b1b5582e27747b855bbfaa73fa11914fe0df028dc581f","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:f85340bf132ae937d2c2a763b8335c9bab35d6e8293f70f606b9c6178d84f42b","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.37.0","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
|
||||
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
|
||||
| authentication.mutual.spire.install.server.annotations | object | `{}` | SPIRE server annotations |
|
||||
@@ -197,7 +197,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
|
||||
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
|
||||
| clustermesh.apiserver.healthPort | int | `9880` | TCP port for the clustermesh-apiserver health API. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:0b72f3046cf36ff9b113d53cc61185e893edb5fe728a2c9e561c1083f806453d","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.4","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:78dc40b9cb8d7b1ad21a76ff3e11541809acda2ac4ef94150cc832100edc247d","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.17.5","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.kvstoremesh.enabled | bool | `true` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
|
||||
@@ -243,6 +243,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| clustermesh.apiserver.service.enableSessionAffinity | string | `"HAOnly"` | Defines when to enable session affinity. Each replica in a clustermesh-apiserver deployment runs its own discrete etcd cluster. Remote clients connect to one of the replicas through a shared Kubernetes Service. A client reconnecting to a different backend will require a full resync to ensure data integrity. Session affinity can reduce the likelihood of this happening, but may not be supported by all cloud providers. Possible values: - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. - "Always" Always enable session affinity. - "Never" Never enable session affinity. Useful in environments where session affinity is not supported, but may lead to slightly degraded performance due to more frequent reconnections. |
|
||||
| clustermesh.apiserver.service.externalTrafficPolicy | string | `"Cluster"` | The externalTrafficPolicy of service used for apiserver access. |
|
||||
| clustermesh.apiserver.service.internalTrafficPolicy | string | `"Cluster"` | The internalTrafficPolicy of service used for apiserver access. |
|
||||
| clustermesh.apiserver.service.labels | object | `{}` | Labels for the clustermesh-apiserver service. |
|
||||
| clustermesh.apiserver.service.loadBalancerClass | string | `nil` | Configure a loadBalancerClass. Allows to configure the loadBalancerClass on the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer (requires Kubernetes 1.24+). |
|
||||
| clustermesh.apiserver.service.loadBalancerIP | string | `nil` | Configure a specific loadBalancerIP. Allows to configure a specific loadBalancerIP on the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. |
|
||||
| clustermesh.apiserver.service.loadBalancerSourceRanges | list | `[]` | Configure loadBalancerSourceRanges. Allows to configure the source IP ranges allowed to access the clustermesh-apiserver LB service in case the Service type is set to LoadBalancer. |
|
||||
@@ -377,7 +378,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.healthPort | int | `9878` | TCP port for the health API. |
|
||||
| envoy.httpRetryCount | int | `3` | Maximum number of retries for each HTTP request |
|
||||
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
|
||||
| envoy.image | object | `{"digest":"sha256:a04218c6879007d60d96339a441c448565b6f86650358652da27582e0efbf182","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.32.6-1746661844-0f602c28cb2aa57b29078195049fb257d5b5246c","useDigest":true}` | Envoy container image. |
|
||||
| envoy.image | object | `{"digest":"sha256:9f69e290a7ea3d4edf9192acd81694089af048ae0d8a67fb63bd62dc1d72203e","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.32.6-1749271279-0864395884b263913eac200ee2048fd985f8e626","useDigest":true}` | Envoy container image. |
|
||||
| envoy.initialFetchTimeoutSeconds | int | `30` | Time in seconds after which the initial fetch on an xDS stream is considered timed out |
|
||||
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
|
||||
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
|
||||
@@ -518,7 +519,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
|
||||
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
|
||||
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:c16de12a64b8b56de62b15c1652d036253b40cd7fa643d7e1a404dc71dc66441","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.4","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:fbb8a6afa8718200fca9381ad274ed695792dbadd2417b0e99c36210ae4964ff","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.17.5","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
|
||||
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
|
||||
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
@@ -625,7 +626,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
|
||||
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd`, `kvstore` or `doublewrite-readkvstore` / `doublewrite-readcrd` for migrating between identity backends). |
|
||||
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
|
||||
| image | object | `{"digest":"sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.4","useDigest":true}` | Agent container image. |
|
||||
| image | object | `{"digest":"sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.5","useDigest":true}` | Agent container image. |
|
||||
| imagePullSecrets | list | `[]` | Configure image pull secrets for pulling container images |
|
||||
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
|
||||
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
|
||||
@@ -763,7 +764,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| operator.hostNetwork | bool | `true` | HostNetwork setting |
|
||||
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
|
||||
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:eaa7b18b7cda65af1d454d54224d175fdb69a35199fa949ae7dfda2789c18dd6","awsDigest":"sha256:3c31583e57648470fbf6646ac67122ac5896ce5f979ab824d9a38cfc7eafc753","azureDigest":"sha256:d8d95049bfeab47cb1a3f995164e1ca2cdec8e6c7036c29799647999cdae07b1","genericDigest":"sha256:a3906412f477b09904f46aac1bed28eb522bef7899ed7dd81c15f78b7aa1b9b5","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.4","useDigest":true}` | cilium-operator image. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:654db67929f716b6178a34a15cb8f95e391465085bcf48cdba49819a56fcd259","awsDigest":"sha256:3e189ec1e286f1bf23d47c45bdeac6025ef7ec3d2dc16190ee768eb94708cbc3","azureDigest":"sha256:add78783fdaced7453a324612eeb9ebecf56002b56c14c73596b3b4923321026","genericDigest":"sha256:f954c97eeb1b47ed67d08cc8fb4108fb829f869373cbb3e698a7f8ef1085b09e","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.17.5","useDigest":true}` | cilium-operator image. |
|
||||
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
|
||||
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
|
||||
@@ -813,7 +814,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
|
||||
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
|
||||
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
|
||||
| preflight.image | object | `{"digest":"sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.4","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.image | object | `{"digest":"sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.17.5","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
|
||||
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
|
||||
|
||||
@@ -1008,7 +1008,7 @@ spec:
|
||||
defaultMode: 0400
|
||||
sources:
|
||||
- secret:
|
||||
name: {{ .Values.hubble.tls.server.existingSecret | default "hubble-metrics-server-certs" }}
|
||||
name: {{ .Values.hubble.metrics.tls.server.existingSecret | default "hubble-metrics-server-certs" }}
|
||||
optional: true
|
||||
items:
|
||||
- key: tls.crt
|
||||
|
||||
@@ -378,7 +378,7 @@ data:
|
||||
bpf-events-default-burst-limit: {{ .Values.bpf.events.default.burstLimit | quote }}
|
||||
{{- end}}
|
||||
|
||||
{{- if .Values.bpf.mapDynamicSizeRatio }}
|
||||
{{- if ne 0.0 ( .Values.bpf.mapDynamicSizeRatio | float64) }}
|
||||
# Specifies the ratio (0.0-1.0] of total system memory to use for dynamic
|
||||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||
bpf-map-dynamic-size-ratio: {{ .Values.bpf.mapDynamicSizeRatio | quote }}
|
||||
|
||||
@@ -11,7 +11,9 @@ metadata:
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.clustermesh.apiserver.service.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.clustermesh.apiserver.service.annotations .Values.clustermesh.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.clustermesh.annotations }}
|
||||
|
||||
@@ -597,7 +597,8 @@
|
||||
"mapDynamicSizeRatio": {
|
||||
"type": [
|
||||
"null",
|
||||
"number"
|
||||
"number",
|
||||
"string"
|
||||
]
|
||||
},
|
||||
"masquerade": {
|
||||
@@ -1246,6 +1247,9 @@
|
||||
"Cluster"
|
||||
]
|
||||
},
|
||||
"labels": {
|
||||
"type": "object"
|
||||
},
|
||||
"loadBalancerClass": {
|
||||
"type": [
|
||||
"null",
|
||||
|
||||
@@ -191,10 +191,10 @@ image:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.17.4"
|
||||
tag: "v1.17.5"
|
||||
pullPolicy: "IfNotPresent"
|
||||
# cilium-digest
|
||||
digest: "sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a"
|
||||
digest: "sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6"
|
||||
useDigest: true
|
||||
# -- Scheduling configurations for cilium pods
|
||||
scheduling:
|
||||
@@ -561,7 +561,7 @@ bpf:
|
||||
# @schema
|
||||
policyMapMax: 16384
|
||||
# @schema
|
||||
# type: [null, number]
|
||||
# type: [null, number, string]
|
||||
# @schema
|
||||
# -- (float64) Configure auto-sizing for all BPF maps based on available memory.
|
||||
# ref: https://docs.cilium.io/en/stable/network/ebpf/maps/
|
||||
@@ -1440,9 +1440,9 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-relay"
|
||||
tag: "v1.17.4"
|
||||
tag: "v1.17.5"
|
||||
# hubble-relay-digest
|
||||
digest: "sha256:c16de12a64b8b56de62b15c1652d036253b40cd7fa643d7e1a404dc71dc66441"
|
||||
digest: "sha256:fbb8a6afa8718200fca9381ad274ed695792dbadd2417b0e99c36210ae4964ff"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Specifies the resources for the hubble-relay pods
|
||||
@@ -2353,9 +2353,9 @@ envoy:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium-envoy"
|
||||
tag: "v1.32.6-1746661844-0f602c28cb2aa57b29078195049fb257d5b5246c"
|
||||
tag: "v1.32.6-1749271279-0864395884b263913eac200ee2048fd985f8e626"
|
||||
pullPolicy: "IfNotPresent"
|
||||
digest: "sha256:a04218c6879007d60d96339a441c448565b6f86650358652da27582e0efbf182"
|
||||
digest: "sha256:9f69e290a7ea3d4edf9192acd81694089af048ae0d8a67fb63bd62dc1d72203e"
|
||||
useDigest: true
|
||||
# -- Additional containers added to the cilium Envoy DaemonSet.
|
||||
extraContainers: []
|
||||
@@ -2710,15 +2710,15 @@ operator:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/operator"
|
||||
tag: "v1.17.4"
|
||||
tag: "v1.17.5"
|
||||
# operator-generic-digest
|
||||
genericDigest: "sha256:a3906412f477b09904f46aac1bed28eb522bef7899ed7dd81c15f78b7aa1b9b5"
|
||||
genericDigest: "sha256:f954c97eeb1b47ed67d08cc8fb4108fb829f869373cbb3e698a7f8ef1085b09e"
|
||||
# operator-azure-digest
|
||||
azureDigest: "sha256:d8d95049bfeab47cb1a3f995164e1ca2cdec8e6c7036c29799647999cdae07b1"
|
||||
azureDigest: "sha256:add78783fdaced7453a324612eeb9ebecf56002b56c14c73596b3b4923321026"
|
||||
# operator-aws-digest
|
||||
awsDigest: "sha256:3c31583e57648470fbf6646ac67122ac5896ce5f979ab824d9a38cfc7eafc753"
|
||||
awsDigest: "sha256:3e189ec1e286f1bf23d47c45bdeac6025ef7ec3d2dc16190ee768eb94708cbc3"
|
||||
# operator-alibabacloud-digest
|
||||
alibabacloudDigest: "sha256:eaa7b18b7cda65af1d454d54224d175fdb69a35199fa949ae7dfda2789c18dd6"
|
||||
alibabacloudDigest: "sha256:654db67929f716b6178a34a15cb8f95e391465085bcf48cdba49819a56fcd259"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
suffix: ""
|
||||
@@ -2993,9 +2993,9 @@ preflight:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.17.4"
|
||||
tag: "v1.17.5"
|
||||
# cilium-digest
|
||||
digest: "sha256:24a73fe795351cf3279ac8e84918633000b52a9654ff73a6b0d7223bcff4a67a"
|
||||
digest: "sha256:baf8541723ee0b72d6c489c741c81a6fdc5228940d66cb76ef5ea2ce3c639ea6"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- The priority class to use for the preflight pod.
|
||||
@@ -3142,9 +3142,9 @@ clustermesh:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/clustermesh-apiserver"
|
||||
tag: "v1.17.4"
|
||||
tag: "v1.17.5"
|
||||
# clustermesh-apiserver-digest
|
||||
digest: "sha256:0b72f3046cf36ff9b113d53cc61185e893edb5fe728a2c9e561c1083f806453d"
|
||||
digest: "sha256:78dc40b9cb8d7b1ad21a76ff3e11541809acda2ac4ef94150cc832100edc247d"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- TCP port for the clustermesh-apiserver health API.
|
||||
@@ -3246,6 +3246,8 @@ clustermesh:
|
||||
# * EKS: service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
|
||||
# * GKE: networking.gke.io/load-balancer-type: "Internal"
|
||||
annotations: {}
|
||||
# -- Labels for the clustermesh-apiserver service.
|
||||
labels: {}
|
||||
# @schema
|
||||
# enum: [Local, Cluster]
|
||||
# @schema
|
||||
@@ -3651,7 +3653,7 @@ authentication:
|
||||
override: ~
|
||||
repository: "docker.io/library/busybox"
|
||||
tag: "1.37.0"
|
||||
digest: "sha256:37f7b378a29ceb4c551b1b5582e27747b855bbfaa73fa11914fe0df028dc581f"
|
||||
digest: "sha256:f85340bf132ae937d2c2a763b8335c9bab35d6e8293f70f606b9c6178d84f42b"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# SPIRE agent configuration
|
||||
|
||||
@@ -566,7 +566,7 @@ bpf:
|
||||
# @schema
|
||||
policyMapMax: 16384
|
||||
# @schema
|
||||
# type: [null, number]
|
||||
# type: [null, number, string]
|
||||
# @schema
|
||||
# -- (float64) Configure auto-sizing for all BPF maps based on available memory.
|
||||
# ref: https://docs.cilium.io/en/stable/network/ebpf/maps/
|
||||
@@ -3276,6 +3276,8 @@ clustermesh:
|
||||
# * EKS: service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
|
||||
# * GKE: networking.gke.io/load-balancer-type: "Internal"
|
||||
annotations: {}
|
||||
# -- Labels for the clustermesh-apiserver service.
|
||||
labels: {}
|
||||
# @schema
|
||||
# enum: [Local, Cluster]
|
||||
# @schema
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
ARG VERSION=v1.17.4
|
||||
ARG VERSION=v1.17.5
|
||||
FROM quay.io/cilium/cilium:${VERSION}
|
||||
|
||||
@@ -14,7 +14,7 @@ cilium:
|
||||
mode: "kubernetes"
|
||||
image:
|
||||
repository: ghcr.io/cozystack/cozystack/cilium
|
||||
tag: 1.17.4
|
||||
digest: "sha256:91f628cbdc4652b4459af79c5a0501282cc0bc0a9fc11e3d8cb65e884f94e751"
|
||||
tag: 1.17.5
|
||||
digest: "sha256:2def2dccfc17870be6e1d63584c25b32e812f21c9cdcfa06deadd2787606654d"
|
||||
envoy:
|
||||
enabled: false
|
||||
|
||||
@@ -21,3 +21,8 @@ image-cozystack-api:
|
||||
IMAGE="$(REGISTRY)/cozystack-api:$(call settag,$(TAG))@$$(yq e '."containerimage.digest"' images/cozystack-api.json -o json -r)" \
|
||||
yq -i '.cozystackAPI.image = strenv(IMAGE)' values.yaml
|
||||
rm -f images/cozystack-api.json
|
||||
|
||||
generate:
|
||||
rm -rf openapi-schemas
|
||||
mkdir -p openapi-schemas
|
||||
find ../../apps ../../extra -maxdepth 2 -name values.schema.json -exec sh -c 'ln -s ../{} openapi-schemas/$$(basename $$(dirname {})).json' \;
|
||||
|
||||
1
packages/system/cozystack-api/openapi-schemas/bootbox.json
vendored
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/bootbox.json
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../extra/bootbox/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/bucket.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/bucket.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/bucket/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/clickhouse.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/clickhouse.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/clickhouse/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/etcd.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/etcd.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../extra/etcd/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/ferretdb.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/ferretdb.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/ferretdb/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/http-cache.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/http-cache.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/http-cache/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/info.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/info.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../extra/info/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/ingress.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/ingress.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../extra/ingress/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/kafka.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/kafka.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/kafka/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/kubernetes.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/kubernetes.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/kubernetes/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/monitoring.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/monitoring.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../extra/monitoring/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/mysql.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/mysql.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/mysql/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/nats.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/nats.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/nats/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/postgres.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/postgres.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/postgres/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/rabbitmq.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/rabbitmq.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/rabbitmq/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/redis.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/redis.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/redis/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/seaweedfs.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/seaweedfs.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../extra/seaweedfs/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/tcp-balancer.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/tcp-balancer.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/tcp-balancer/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/tenant.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/tenant.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/tenant/values.schema.json
|
||||
@@ -0,0 +1 @@
|
||||
../../../apps/virtual-machine/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/vm-disk.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/vm-disk.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/vm-disk/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/vm-instance.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/vm-instance.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/vm-instance/values.schema.json
|
||||
1
packages/system/cozystack-api/openapi-schemas/vpn.json
Symbolic link
1
packages/system/cozystack-api/openapi-schemas/vpn.json
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../apps/vpn/values.schema.json
|
||||
@@ -10,6 +10,7 @@ data:
|
||||
kind: Bucket
|
||||
singular: bucket
|
||||
plural: buckets
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/bucket.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: bucket-
|
||||
labels:
|
||||
@@ -24,6 +25,7 @@ data:
|
||||
kind: ClickHouse
|
||||
singular: clickhouse
|
||||
plural: clickhouses
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/clickhouse.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: clickhouse-
|
||||
labels:
|
||||
@@ -38,6 +40,7 @@ data:
|
||||
kind: HTTPCache
|
||||
singular: httpcache
|
||||
plural: httpcaches
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/http-cache.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: http-cache-
|
||||
labels:
|
||||
@@ -52,6 +55,7 @@ data:
|
||||
kind: NATS
|
||||
singular: nats
|
||||
plural: natses
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/nats.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: nats-
|
||||
labels:
|
||||
@@ -66,6 +70,7 @@ data:
|
||||
kind: TCPBalancer
|
||||
singular: tcpbalancer
|
||||
plural: tcpbalancers
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/tcp-balancer.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: tcp-balancer-
|
||||
labels:
|
||||
@@ -80,6 +85,7 @@ data:
|
||||
kind: VirtualMachine
|
||||
singular: virtualmachine
|
||||
plural: virtualmachines
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/virtual-machine.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: virtual-machine-
|
||||
labels:
|
||||
@@ -94,6 +100,7 @@ data:
|
||||
kind: VPN
|
||||
singular: vpn
|
||||
plural: vpns
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/vpn.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: vpn-
|
||||
labels:
|
||||
@@ -108,6 +115,7 @@ data:
|
||||
kind: MySQL
|
||||
singular: mysql
|
||||
plural: mysqls
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/mysql.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: mysql-
|
||||
labels:
|
||||
@@ -122,6 +130,7 @@ data:
|
||||
kind: Tenant
|
||||
singular: tenant
|
||||
plural: tenants
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/tenant.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: tenant-
|
||||
labels:
|
||||
@@ -136,6 +145,7 @@ data:
|
||||
kind: Kubernetes
|
||||
singular: kubernetes
|
||||
plural: kuberneteses
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/kubernetes.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: kubernetes-
|
||||
labels:
|
||||
@@ -150,6 +160,7 @@ data:
|
||||
kind: Redis
|
||||
singular: redis
|
||||
plural: redises
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/redis.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: redis-
|
||||
labels:
|
||||
@@ -164,6 +175,7 @@ data:
|
||||
kind: RabbitMQ
|
||||
singular: rabbitmq
|
||||
plural: rabbitmqs
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/rabbitmq.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: rabbitmq-
|
||||
labels:
|
||||
@@ -178,6 +190,7 @@ data:
|
||||
kind: Postgres
|
||||
singular: postgres
|
||||
plural: postgreses
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/postgres.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: postgres-
|
||||
labels:
|
||||
@@ -192,6 +205,7 @@ data:
|
||||
kind: FerretDB
|
||||
singular: ferretdb
|
||||
plural: ferretdb
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/ferretdb.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: ferretdb-
|
||||
labels:
|
||||
@@ -206,6 +220,7 @@ data:
|
||||
kind: Kafka
|
||||
singular: kafka
|
||||
plural: kafkas
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/kafka.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: kafka-
|
||||
labels:
|
||||
@@ -220,6 +235,7 @@ data:
|
||||
kind: VMDisk
|
||||
plural: vmdisks
|
||||
singular: vmdisk
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/vm-disk.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: vm-disk-
|
||||
labels:
|
||||
@@ -234,6 +250,7 @@ data:
|
||||
kind: VMInstance
|
||||
plural: vminstances
|
||||
singular: vminstance
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/vm-instance.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: vm-instance-
|
||||
labels:
|
||||
@@ -248,6 +265,7 @@ data:
|
||||
kind: Monitoring
|
||||
plural: monitorings
|
||||
singular: monitoring
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/monitoring.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: ""
|
||||
labels:
|
||||
@@ -262,6 +280,7 @@ data:
|
||||
kind: Etcd
|
||||
plural: etcds
|
||||
singular: etcd
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/etcd.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: ""
|
||||
labels:
|
||||
@@ -276,6 +295,7 @@ data:
|
||||
kind: Ingress
|
||||
plural: ingresses
|
||||
singular: ingress
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/ingress.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: ""
|
||||
labels:
|
||||
@@ -290,6 +310,7 @@ data:
|
||||
kind: SeaweedFS
|
||||
plural: seaweedfses
|
||||
singular: seaweedfs
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/seaweedfs.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: ""
|
||||
labels:
|
||||
@@ -304,6 +325,7 @@ data:
|
||||
kind: BootBox
|
||||
plural: bootboxes
|
||||
singular: bootbox
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/bootbox.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: ""
|
||||
labels:
|
||||
@@ -318,6 +340,7 @@ data:
|
||||
kind: Info
|
||||
plural: infos
|
||||
singular: info
|
||||
openAPISchema: {{ .Files.Get "openapi-schemas/info.json" | fromJson | toJson | quote }}
|
||||
release:
|
||||
prefix: ""
|
||||
labels:
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystackAPI:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.33.1@sha256:ee6b71d3ab1c1484490ff1dc57a7df82813c4f18d6393f149d32acf656aa779d
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-api:v0.34.0-beta.1@sha256:724a166d2daa9cae3caeb18bffdc7146d80de310a6f97360c2beaef340076e6d
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
cozystackController:
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.33.1@sha256:4777488e14f0313b153b153388c78ab89e3a39582c30266f2321704df1976922
|
||||
image: ghcr.io/cozystack/cozystack/cozystack-controller:v0.34.0-beta.1@sha256:cf0b80f2540ac8f6ddd226b3bab87e001602eb0ebc8a527a1c14a0a6f23eb427
|
||||
debug: false
|
||||
disableTelemetry: false
|
||||
cozystackVersion: "v0.33.1"
|
||||
cozystackVersion: "v0.34.0-beta.1"
|
||||
|
||||
@@ -76,7 +76,7 @@ data:
|
||||
"kubeappsNamespace": {{ .Release.Namespace | quote }},
|
||||
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
|
||||
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
|
||||
"appVersion": "v0.33.1",
|
||||
"appVersion": "v0.34.0-beta.1",
|
||||
"authProxyEnabled": {{ .Values.authProxy.enabled }},
|
||||
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
|
||||
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},
|
||||
|
||||
@@ -19,8 +19,8 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: dashboard
|
||||
tag: v0.33.1
|
||||
digest: "sha256:5e514516bd3dc0c693bb346ddeb9740e0439a59deb2a56b87317286e3ce79ac9"
|
||||
tag: v0.34.0-beta.1
|
||||
digest: "sha256:ac2b5348d85fe37ad70a4cc159881c4eaded9175a4b586cfa09a52b0fbe5e1e5"
|
||||
redis:
|
||||
master:
|
||||
resourcesPreset: "none"
|
||||
@@ -37,8 +37,8 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/cozystack/cozystack
|
||||
repository: kubeapps-apis
|
||||
tag: v0.33.1
|
||||
digest: "sha256:ea5b21a27c97b14880042d2a642670e3461e7d946c65b5b557d2eb8df9f03a87"
|
||||
tag: v0.34.0-beta.1
|
||||
digest: "sha256:0270aea2e4b21a906db7f03214e7f6b0786be64a1b66e998e4ed8ef7da12da58"
|
||||
pluginConfig:
|
||||
flux:
|
||||
packages:
|
||||
|
||||
@@ -8,7 +8,7 @@ annotations:
|
||||
- name: Upstream Project
|
||||
url: https://github.com/controlplaneio-fluxcd/flux-operator
|
||||
apiVersion: v2
|
||||
appVersion: v0.23.0
|
||||
appVersion: v0.24.0
|
||||
description: 'A Helm chart for deploying the Flux Operator. '
|
||||
home: https://github.com/controlplaneio-fluxcd
|
||||
icon: https://raw.githubusercontent.com/cncf/artwork/main/projects/flux/icon/color/flux-icon-color.png
|
||||
@@ -25,4 +25,4 @@ sources:
|
||||
- https://github.com/controlplaneio-fluxcd/flux-operator
|
||||
- https://github.com/controlplaneio-fluxcd/charts
|
||||
type: application
|
||||
version: 0.23.0
|
||||
version: 0.24.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# flux-operator
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
The [Flux Operator](https://github.com/controlplaneio-fluxcd/flux-operator) provides a
|
||||
declarative API for the installation and upgrade of CNCF [Flux](https://fluxcd.io) and the
|
||||
@@ -38,6 +38,8 @@ see the Flux Operator [documentation](https://fluxcd.control-plane.io/operator/)
|
||||
| commonLabels | object | `{}` | Common labels to add to all deployed objects including pods. |
|
||||
| extraArgs | list | `[]` | Container extra arguments. |
|
||||
| extraEnvs | list | `[]` | Container extra environment variables. |
|
||||
| extraVolumeMounts | list | `[]` | Container extra volume mounts. |
|
||||
| extraVolumes | list | `[]` | Pod extra volumes. |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| hostNetwork | bool | `false` | If `true`, the container ports (`8080` and `8081`) are exposed on the host network. |
|
||||
| image | object | `{"imagePullPolicy":"IfNotPresent","pullSecrets":[],"repository":"ghcr.io/controlplaneio-fluxcd/flux-operator","tag":""}` | Container image settings. The image tag defaults to the chart appVersion. |
|
||||
|
||||
@@ -586,6 +586,9 @@ spec:
|
||||
description: ServerVersion is the version of the Kubernetes API
|
||||
server.
|
||||
type: string
|
||||
required:
|
||||
- platform
|
||||
- serverVersion
|
||||
type: object
|
||||
components:
|
||||
description: ComponentsStatus is the status of the Flux controller
|
||||
@@ -637,6 +640,23 @@ spec:
|
||||
- entitlement
|
||||
- status
|
||||
type: object
|
||||
operator:
|
||||
description: Operator is the version information of the Flux Operator.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: APIVersion is the API version of the Flux Operator.
|
||||
type: string
|
||||
platform:
|
||||
description: Platform is the os/arch of Flux Operator.
|
||||
type: string
|
||||
version:
|
||||
description: Version is the version number of Flux Operator.
|
||||
type: string
|
||||
required:
|
||||
- apiVersion
|
||||
- platform
|
||||
- version
|
||||
type: object
|
||||
reconcilers:
|
||||
description: |-
|
||||
ReconcilersStatus is the list of Flux reconcilers and
|
||||
@@ -858,8 +878,10 @@ spec:
|
||||
- a PEM-encoded CA certificate (`ca.crt`)
|
||||
- a PEM-encoded client certificate (`tls.crt`) and private key (`tls.key`)
|
||||
|
||||
When connecting to a Git provider that uses self-signed certificates, the CA certificate
|
||||
When connecting to a Git or OCI provider that uses self-signed certificates, the CA certificate
|
||||
must be set in the Secret under the 'ca.crt' key to establish the trust relationship.
|
||||
When connecting to an OCI provider that supports client certificates (mTLS), the client certificate
|
||||
and private key must be set in the Secret under the 'tls.crt' and 'tls.key' keys, respectively.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
@@ -884,11 +906,21 @@ spec:
|
||||
ExcludeBranch specifies the regular expression to filter the branches
|
||||
that the input provider should exclude.
|
||||
type: string
|
||||
excludeTag:
|
||||
description: |-
|
||||
ExcludeTag specifies the regular expression to filter the tags
|
||||
that the input provider should exclude.
|
||||
type: string
|
||||
includeBranch:
|
||||
description: |-
|
||||
IncludeBranch specifies the regular expression to filter the branches
|
||||
that the input provider should include.
|
||||
type: string
|
||||
includeTag:
|
||||
description: |-
|
||||
IncludeTag specifies the regular expression to filter the tags
|
||||
that the input provider should include.
|
||||
type: string
|
||||
labels:
|
||||
description: Labels specifies the list of labels to filter the
|
||||
input provider response.
|
||||
@@ -896,13 +928,17 @@ spec:
|
||||
type: string
|
||||
type: array
|
||||
limit:
|
||||
default: 100
|
||||
description: |-
|
||||
Limit specifies the maximum number of input sets to return.
|
||||
When not set, the default limit is 100.
|
||||
type: integer
|
||||
semver:
|
||||
description: Semver specifies the semantic version range to filter
|
||||
and order the tags.
|
||||
description: |-
|
||||
Semver specifies a semantic version range to filter and sort the tags.
|
||||
If this field is not specified, the tags will be sorted in reverse
|
||||
alphabetical order.
|
||||
Supported only for tags at the moment.
|
||||
type: string
|
||||
type: object
|
||||
schedule:
|
||||
@@ -933,10 +969,12 @@ spec:
|
||||
secretRef:
|
||||
description: |-
|
||||
SecretRef specifies the Kubernetes Secret containing the basic-auth credentials
|
||||
to access the input provider. The secret must contain the keys
|
||||
'username' and 'password'.
|
||||
When connecting to a Git provider, the password should be a personal access token
|
||||
to access the input provider.
|
||||
When connecting to a Git provider, the secret must contain the keys
|
||||
'username' and 'password', and the password should be a personal access token
|
||||
that grants read-only access to the repository.
|
||||
When connecting to an OCI provider, the secret must contain a Kubernetes
|
||||
Image Pull Secret, as if created by `kubectl create secret docker-registry`.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
@@ -944,6 +982,14 @@ spec:
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
serviceAccountName:
|
||||
description: |-
|
||||
ServiceAccountName specifies the name of the Kubernetes ServiceAccount
|
||||
used for authentication with AWS, Azure or GCP services through
|
||||
workload identity federation features. If not specified, the
|
||||
authentication for these cloud providers will use the ServiceAccount
|
||||
of the operator (or any other environment authentication configuration).
|
||||
type: string
|
||||
skip:
|
||||
description: Skip defines whether we need to skip input provider response
|
||||
updates.
|
||||
@@ -966,12 +1012,20 @@ spec:
|
||||
- GitLabBranch
|
||||
- GitLabTag
|
||||
- GitLabMergeRequest
|
||||
- AzureDevOpsBranch
|
||||
- AzureDevOpsTag
|
||||
- AzureDevOpsPullRequest
|
||||
- OCIArtifactTag
|
||||
- ACRArtifactTag
|
||||
- ECRArtifactTag
|
||||
- GARArtifactTag
|
||||
type: string
|
||||
url:
|
||||
description: |-
|
||||
URL specifies the HTTP/S address of the input provider API.
|
||||
URL specifies the HTTP/S or OCI address of the input provider API.
|
||||
When connecting to a Git provider, the URL should point to the repository address.
|
||||
pattern: ^((http|https)://.*){0,1}$
|
||||
When connecting to an OCI provider, the URL should point to the OCI repository address.
|
||||
pattern: ^((http|https|oci)://.*){0,1}$
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
@@ -981,6 +1035,27 @@ spec:
|
||||
rule: self.type != 'Static' || !has(self.url)
|
||||
- message: spec.url must not be empty when spec.type is not 'Static'
|
||||
rule: self.type == 'Static' || has(self.url)
|
||||
- message: spec.url must start with 'http://' or 'https://' when spec.type
|
||||
is a Git provider
|
||||
rule: '!self.type.startsWith(''Git'') || self.url.startsWith(''http'')'
|
||||
- message: spec.url must start with 'http://' or 'https://' when spec.type
|
||||
is a Git provider
|
||||
rule: '!self.type.startsWith(''AzureDevOps'') || self.url.startsWith(''http'')'
|
||||
- message: spec.url must start with 'oci://' when spec.type is an OCI
|
||||
provider
|
||||
rule: '!self.type.endsWith(''ArtifactTag'') || self.url.startsWith(''oci'')'
|
||||
- message: cannot specify spec.serviceAccountName when spec.type is not
|
||||
one of AzureDevOps* or *ArtifactTag
|
||||
rule: '!has(self.serviceAccountName) || self.type.startsWith(''AzureDevOps'')
|
||||
|| self.type.endsWith(''ArtifactTag'')'
|
||||
- message: cannot specify spec.certSecretRef when spec.type is one of
|
||||
Static, AzureDevOps*, ACRArtifactTag, ECRArtifactTag or GARArtifactTag
|
||||
rule: '!has(self.certSecretRef) || !(self.url == ''Static'' || self.type.startsWith(''AzureDevOps'')
|
||||
|| (self.type.endsWith(''ArtifactTag'') && self.type != ''OCIArtifactTag''))'
|
||||
- message: cannot specify spec.secretRef when spec.type is one of Static,
|
||||
ACRArtifactTag, ECRArtifactTag or GARArtifactTag
|
||||
rule: '!has(self.secretRef) || !(self.url == ''Static'' || (self.type.endsWith(''ArtifactTag'')
|
||||
&& self.type != ''OCIArtifactTag''))'
|
||||
status:
|
||||
description: ResourceSetInputProviderStatus defines the observed state
|
||||
of ResourceSetInputProvider.
|
||||
|
||||
@@ -99,9 +99,15 @@ spec:
|
||||
volumeMounts:
|
||||
- name: temp
|
||||
mountPath: /tmp
|
||||
{{- if .Values.extraVolumeMounts }}
|
||||
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: temp
|
||||
emptyDir: {}
|
||||
{{- if .Values.extraVolumes }}
|
||||
{{- toYaml .Values.extraVolumes | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
||||
@@ -116,12 +116,18 @@ nodeSelector: { } # @schema type: object
|
||||
# -- If `true`, the container ports (`8080` and `8081`) are exposed on the host network.
|
||||
hostNetwork: false # @schema default: false
|
||||
|
||||
# -- Pod extra volumes.
|
||||
extraVolumes: [ ] # @schema item: object ; uniqueItems: true
|
||||
|
||||
# -- Container extra environment variables.
|
||||
extraEnvs: [ ] # @schema item: object ; uniqueItems: true
|
||||
|
||||
# -- Container extra arguments.
|
||||
extraArgs: [ ] # @schema item: string ; uniqueItems: true
|
||||
|
||||
# -- Container extra volume mounts.
|
||||
extraVolumeMounts: [ ] # @schema item: object ; uniqueItems: true
|
||||
|
||||
# -- Container logging level flag.
|
||||
logLevel: "info" # @schema enum:[debug,info,error]
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ annotations:
|
||||
- name: Upstream Project
|
||||
url: https://github.com/controlplaneio-fluxcd/flux-operator
|
||||
apiVersion: v2
|
||||
appVersion: v0.23.0
|
||||
appVersion: v0.24.0
|
||||
description: 'A Helm chart for deploying a Flux instance managed by Flux Operator. '
|
||||
home: https://github.com/controlplaneio-fluxcd
|
||||
icon: https://raw.githubusercontent.com/cncf/artwork/main/projects/flux/icon/color/flux-icon-color.png
|
||||
@@ -25,4 +25,4 @@ sources:
|
||||
- https://github.com/controlplaneio-fluxcd/flux-operator
|
||||
- https://github.com/controlplaneio-fluxcd/charts
|
||||
type: application
|
||||
version: 0.23.0
|
||||
version: 0.24.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# flux-instance
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
This chart is a thin wrapper around the `FluxInstance` custom resource, which is
|
||||
used by the [Flux Operator](https://github.com/controlplaneio-fluxcd/flux-operator)
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
images
|
||||
hack
|
||||
@@ -1,3 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: cozy-kamaji-etcd
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
@@ -1,9 +0,0 @@
|
||||
update:
|
||||
rm -rf charts
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update clastix
|
||||
helm pull clastix/kamaji-etcd --untar --untardir charts
|
||||
sed -i 's/hook-failed/before-hook-creation,hook-failed/' `grep -rl hook-failed charts`
|
||||
patch --no-backup-if-mismatch -p4 < patches/fix-svc.diff
|
||||
patch --no-backup-if-mismatch -p4 < patches/fullnameOverride.diff
|
||||
patch --no-backup-if-mismatch -p4 < patches/remove-plus.patch
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v2
|
||||
appVersion: 3.5.6
|
||||
description: Helm chart for deploying a multi-tenant `etcd` cluster.
|
||||
home: https://github.com/clastix/kamaji-etcd
|
||||
kubeVersion: '>=1.22.0-0'
|
||||
maintainers:
|
||||
- email: me@bsctl.io
|
||||
name: Adriano Pezzuto
|
||||
- email: dario@tranchitella.eu
|
||||
name: Dario Tranchitella
|
||||
name: kamaji-etcd
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji-etcd
|
||||
type: application
|
||||
version: 0.5.1
|
||||
@@ -1,9 +0,0 @@
|
||||
docs: HELMDOCS_VERSION := v1.8.1
|
||||
docs: docker
|
||||
@docker run --rm -v "$$(pwd):/helm-docs" -u $$(id -u) jnorwood/helm-docs:$(HELMDOCS_VERSION)
|
||||
|
||||
docker:
|
||||
@hash docker 2>/dev/null || {\
|
||||
echo "You need docker" &&\
|
||||
exit 1;\
|
||||
}
|
||||
@@ -1,133 +0,0 @@
|
||||
# kamaji-etcd
|
||||
|
||||
  
|
||||
|
||||
Helm chart for deploying a multi-tenant `etcd` cluster.
|
||||
|
||||
[Kamaji](https://github.com/clastix/kamaji) turns any Kubernetes cluster into an _admin cluster_ to orchestrate other Kubernetes clusters called _tenant clusters_.
|
||||
The Control Plane of a _tenant cluster_ is made of regular pods running in a namespace of the _admin cluster_ instead of a dedicated set of Virtual Machines.
|
||||
This solution makes running control planes at scale cheaper and easier to deploy and operate.
|
||||
|
||||
As of any Kubernetes cluster, a _tenant cluster_ needs a datastore where to save the state and be able to retrieve data.
|
||||
This chart provides a multi-tenant `etcd` as datastore for Kamaji as well as a standalone multi-tenant `etcd` cluster.
|
||||
|
||||
## Install kamaji-etcd
|
||||
|
||||
To install the Chart with the release name `kamaji-etcd`:
|
||||
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji-etcd clastix/kamaji-etcd -n kamaji-etcd --create-namespace
|
||||
|
||||
Show the status:
|
||||
|
||||
helm status kamaji-etcd -n kamaji-etcd
|
||||
|
||||
Upgrade the Chart
|
||||
|
||||
helm upgrade kamaji-etcd -n kamaji-etcd clastix/kamaji-etcd
|
||||
|
||||
Uninstall the Chart
|
||||
|
||||
helm uninstall kamaji-etcd -n kamaji-etcd
|
||||
|
||||
## Customize the installation
|
||||
|
||||
There are two methods for specifying overrides of values during Chart installation: `--values` and `--set`.
|
||||
|
||||
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line.
|
||||
Create a copy of the YAML file `values.yaml` and add your overrides to it.
|
||||
|
||||
Specify your overrides file when you install the Chart:
|
||||
|
||||
helm upgrade kamaji-etcd --install --namespace kamaji-etcd --create-namespacekamaji-etcd --values myvalues.yaml
|
||||
|
||||
The values in your overrides file `myvalues.yaml` will override their counterparts in the Chart's values.yaml file.
|
||||
Any values in `values.yaml` that weren't overridden will keep their defaults.
|
||||
|
||||
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
|
||||
|
||||
helm upgrade kamaji-etcd --install --namespace kamaji-etcd --create-namespace kamaji-etcd --set replicas=5
|
||||
|
||||
Here the values you can override:
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | Kubernetes affinity rules to apply to etcd controller pods |
|
||||
| alerts.annotations | object | `{}` | Assign additional Annotations |
|
||||
| alerts.enabled | bool | `false` | Enable alerts for Alertmanager |
|
||||
| alerts.labels | object | `{}` | Assign additional labels according to Prometheus' Alerts matching labels |
|
||||
| alerts.namespace | string | `""` | Install the Alerts into a different Namespace, as the monitoring stack one (default: the release one) |
|
||||
| alerts.rules | list | `[]` | The rules for alerts |
|
||||
| autoCompactionMode | string | `"periodic"` | Interpret 'auto-compaction-retention' one of: periodic|revision. Use 'periodic' for duration based retention, 'revision' for revision number based retention. |
|
||||
| autoCompactionRetention | string | `"5m"` | Auto compaction retention length. 0 means disable auto compaction. |
|
||||
| backup | object | `{"all":false,"enabled":false,"s3":{"accessKey":{"value":"","valueFrom":{}},"bucket":"mybucket","image":{"pullPolicy":"IfNotPresent","repository":"minio/mc","tag":"RELEASE.2022-11-07T23-47-39Z"},"retention":"","secretKey":{"value":"","valueFrom":{}},"url":"http://mys3storage:9000"},"schedule":"20 3 * * *","snapshotDateFormat":"$(date +%Y%m%d)","snapshotNamePrefix":"mysnapshot"}` | Enable storage backup |
|
||||
| backup.all | bool | `false` | Enable backup for all endpoints. When disabled, only the leader will be taken |
|
||||
| backup.enabled | bool | `false` | Enable scheduling backup job |
|
||||
| backup.s3 | object | `{"accessKey":{"value":"","valueFrom":{}},"bucket":"mybucket","image":{"pullPolicy":"IfNotPresent","repository":"minio/mc","tag":"RELEASE.2022-11-07T23-47-39Z"},"retention":"","secretKey":{"value":"","valueFrom":{}},"url":"http://mys3storage:9000"}` | The S3 storage config section |
|
||||
| backup.s3.accessKey | object | `{"value":"","valueFrom":{}}` | The S3 storage ACCESS KEY credential. The plain value has precedence over the valueFrom that can be used to retrieve the value from a Secret. |
|
||||
| backup.s3.bucket | string | `"mybucket"` | The S3 storage bucket |
|
||||
| backup.s3.image | object | `{"pullPolicy":"IfNotPresent","repository":"minio/mc","tag":"RELEASE.2022-11-07T23-47-39Z"}` | The S3 client image config section |
|
||||
| backup.s3.image.pullPolicy | string | `"IfNotPresent"` | Pull policy to use |
|
||||
| backup.s3.image.repository | string | `"minio/mc"` | Install image from specific repo |
|
||||
| backup.s3.image.tag | string | `"RELEASE.2022-11-07T23-47-39Z"` | Install image with specific tag |
|
||||
| backup.s3.retention | string | `""` | The S3 storage object lifecycle management rules; N.B. enabling this option will delete previously set lifecycle rules |
|
||||
| backup.s3.secretKey | object | `{"value":"","valueFrom":{}}` | The S3 storage SECRET KEY credential. The plain value has precedence over the valueFrom that can be used to retrieve the value from a Secret. |
|
||||
| backup.s3.url | string | `"http://mys3storage:9000"` | The S3 storage url |
|
||||
| backup.schedule | string | `"20 3 * * *"` | The job scheduled maintenance time for backup |
|
||||
| backup.snapshotDateFormat | string | `"$(date +%Y%m%d)"` | The backup file date format (bash) |
|
||||
| backup.snapshotNamePrefix | string | `"mysnapshot"` | The backup file name prefix |
|
||||
| clientPort | int | `2379` | The client request port. |
|
||||
| datastore.enabled | bool | `false` | Create a datastore custom resource for Kamaji |
|
||||
| defragmentation | object | `{"schedule":"*/15 * * * *"}` | Enable storage defragmentation |
|
||||
| defragmentation.schedule | string | `"*/15 * * * *"` | The job scheduled maintenance time for defrag (empty to disable) |
|
||||
| extraArgs | list | `[]` | A list of extra arguments to add to the etcd default ones |
|
||||
| image.pullPolicy | string | `"IfNotPresent"` | Pull policy to use |
|
||||
| image.repository | string | `"quay.io/coreos/etcd"` | Install image from specific repo |
|
||||
| image.tag | string | `""` | Install image with specific tag, overwrite the tag in the chart |
|
||||
| livenessProbe | object | `{}` | The livenessProbe for the etcd container |
|
||||
| metricsPort | int | `2381` | The port where etcd exposes metrics. |
|
||||
| nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Kubernetes node selector rules to schedule etcd |
|
||||
| peerApiPort | int | `2380` | The peer API port which servers are listening to. |
|
||||
| persistentVolumeClaim.accessModes | list | `["ReadWriteOnce"]` | The Access Mode to storage |
|
||||
| persistentVolumeClaim.customAnnotations | object | `{}` | The custom annotations to add to the PVC |
|
||||
| persistentVolumeClaim.size | string | `"10Gi"` | The size of persistent storage for etcd data |
|
||||
| persistentVolumeClaim.storageClassName | string | `""` | A specific storage class |
|
||||
| podAnnotations | object | `{}` | Annotations to add to all etcd pods |
|
||||
| podLabels | object | `{"application":"kamaji-etcd"}` | Labels to add to all etcd pods |
|
||||
| priorityClassName | string | `"system-cluster-critical"` | The priorityClassName to apply to etcd |
|
||||
| quotaBackendBytes | string | `"8589934592"` | Raise alarms when backend size exceeds the given quota. It will put the cluster into a maintenance mode which only accepts key reads and deletes. |
|
||||
| replicas | int | `3` | Size of the etcd cluster |
|
||||
| resources | object | `{"limits":{},"requests":{}}` | Resources assigned to the etcd containers |
|
||||
| securityContext | object | `{"allowPrivilegeEscalation":false}` | The securityContext to apply to etcd |
|
||||
| serviceAccount | object | `{"create":true,"name":""}` | Install an etcd with enabled multi-tenancy |
|
||||
| serviceAccount.create | bool | `true` | Create a ServiceAccount, required to install and provision the etcd backing storage (default: true) |
|
||||
| serviceAccount.name | string | `""` | Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "") |
|
||||
| serviceMonitor.annotations | object | `{}` | Assign additional Annotations |
|
||||
| serviceMonitor.enabled | bool | `false` | Enable ServiceMonitor for Prometheus |
|
||||
| serviceMonitor.endpoint.interval | string | `"15s"` | Set the scrape interval for the endpoint of the serviceMonitor |
|
||||
| serviceMonitor.endpoint.metricRelabelings | list | `[]` | Set metricRelabelings for the endpoint of the serviceMonitor |
|
||||
| serviceMonitor.endpoint.relabelings | list | `[]` | Set relabelings for the endpoint of the serviceMonitor |
|
||||
| serviceMonitor.endpoint.scrapeTimeout | string | `""` | Set the scrape timeout for the endpoint of the serviceMonitor |
|
||||
| serviceMonitor.labels | object | `{}` | Assign additional labels according to Prometheus' serviceMonitorSelector matching labels |
|
||||
| serviceMonitor.matchLabels | object | `{}` | Change matching labels |
|
||||
| serviceMonitor.namespace | string | `""` | Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one) |
|
||||
| serviceMonitor.serviceAccount.name | string | `"etcd"` | ServiceAccount for Metrics RBAC |
|
||||
| serviceMonitor.serviceAccount.namespace | string | `"etcd-system"` | ServiceAccount Namespace for Metrics RBAC |
|
||||
| serviceMonitor.targetLabels | list | `[]` | Set targetLabels for the serviceMonitor |
|
||||
| snapshotCount | string | `"10000"` | Number of committed transactions to trigger a snapshot to disk. |
|
||||
| tolerations | list | `[]` | Kubernetes node taints that the etcd pods would tolerate |
|
||||
| topologySpreadConstraints | list | `[]` | Kubernetes topology spread constraints to apply to etcd controller pods |
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Adriano Pezzuto | <me@bsctl.io> | |
|
||||
| Dario Tranchitella | <dario@tranchitella.eu> | |
|
||||
|
||||
## Source Code
|
||||
|
||||
* <https://github.com/clastix/kamaji-etcd>
|
||||
@@ -1,59 +0,0 @@
|
||||
{{ template "chart.header" . }}
|
||||
{{ template "chart.deprecationWarning" . }}
|
||||
|
||||
{{ template "chart.badgesSection" . }}
|
||||
|
||||
{{ template "chart.description" . }}
|
||||
|
||||
[Kamaji](https://github.com/clastix/kamaji) turns any Kubernetes cluster into an _admin cluster_ to orchestrate other Kubernetes clusters called _tenant clusters_.
|
||||
The Control Plane of a _tenant cluster_ is made of regular pods running in a namespace of the _admin cluster_ instead of a dedicated set of Virtual Machines.
|
||||
This solution makes running control planes at scale cheaper and easier to deploy and operate.
|
||||
|
||||
As of any Kubernetes cluster, a _tenant cluster_ needs a datastore where to save the state and be able to retrieve data.
|
||||
This chart provides a multi-tenant `etcd` as datastore for Kamaji as well as a standalone multi-tenant `etcd` cluster.
|
||||
|
||||
## Install kamaji-etcd
|
||||
|
||||
To install the Chart with the release name `kamaji-etcd`:
|
||||
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm repo update
|
||||
helm install kamaji-etcd clastix/kamaji-etcd -n kamaji-etcd --create-namespace
|
||||
|
||||
Show the status:
|
||||
|
||||
helm status kamaji-etcd -n kamaji-etcd
|
||||
|
||||
Upgrade the Chart
|
||||
|
||||
helm upgrade kamaji-etcd -n kamaji-etcd clastix/kamaji-etcd
|
||||
|
||||
Uninstall the Chart
|
||||
|
||||
helm uninstall kamaji-etcd -n kamaji-etcd
|
||||
|
||||
## Customize the installation
|
||||
|
||||
There are two methods for specifying overrides of values during Chart installation: `--values` and `--set`.
|
||||
|
||||
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line.
|
||||
Create a copy of the YAML file `values.yaml` and add your overrides to it.
|
||||
|
||||
Specify your overrides file when you install the Chart:
|
||||
|
||||
helm upgrade kamaji-etcd --install --namespace kamaji-etcd --create-namespacekamaji-etcd --values myvalues.yaml
|
||||
|
||||
The values in your overrides file `myvalues.yaml` will override their counterparts in the Chart's values.yaml file.
|
||||
Any values in `values.yaml` that weren't overridden will keep their defaults.
|
||||
|
||||
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
|
||||
|
||||
helm upgrade kamaji-etcd --install --namespace kamaji-etcd --create-namespace kamaji-etcd --set replicas=5
|
||||
|
||||
Here the values you can override:
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
|
||||
{{ template "chart.maintainersSection" . }}
|
||||
|
||||
{{ template "chart.sourcesSection" . }}
|
||||
@@ -1,164 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "etcd.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified etcd name.
|
||||
*/}}
|
||||
{{- define "etcd.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "etcd.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the etcd fully-qualified Docker image to use
|
||||
*/}}
|
||||
{{- define "etcd.fullyQualifiedDockerImage" -}}
|
||||
{{- printf "%s:%s" .Values.image.repository ( .Values.image.tag | default (printf "v%s" .Chart.AppVersion) ) -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the Service to use
|
||||
*/}}
|
||||
{{- define "etcd.serviceName" -}}
|
||||
{{- printf "%s" (include "etcd.fullname" .) | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "etcd.labels" -}}
|
||||
helm.sh/chart: {{ include "etcd.chart" . }}
|
||||
{{ include "etcd.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "etcd.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "etcd.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "etcd.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "etcd.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the Stateful Set.
|
||||
*/}}
|
||||
{{- define "etcd.stsName" }}
|
||||
{{- printf "%s" (include "etcd.fullname" .) | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the etcd CA secret.
|
||||
*/}}
|
||||
{{- define "etcd.caSecretName" }}
|
||||
{{- printf "%s-%s" (include "etcd.fullname" .) "certs" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the certificate signing requests for the certificates required by etcd.
|
||||
*/}}
|
||||
{{- define "etcd.csrConfigMapName" }}
|
||||
{{- printf "%s-csr" (include "etcd.fullname" .) }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the etcd role
|
||||
*/}}
|
||||
{{- define "etcd.roleName" }}
|
||||
{{- printf "%s-gen-certs-role" (include "etcd.fullname" .) }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the etcd role binding
|
||||
*/}}
|
||||
{{- define "etcd.roleBindingName" }}
|
||||
{{- printf "%s-gen-certs-rolebiding" (include "etcd.fullname" .) }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the etcd root-client secret.
|
||||
*/}}
|
||||
{{- define "etcd.clientSecretName" }}
|
||||
{{- printf "%s-root-client-certs" ( include "etcd.fullname" . ) }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Retrieve the current Kubernetes version to launch a kubectl container with the minimum version skew possible.
|
||||
*/}}
|
||||
{{- define "etcd.jobsTagKubeVersion" -}}
|
||||
{{- print "v" .Capabilities.KubeVersion.Major "." (.Capabilities.KubeVersion.Minor | replace "+" "") -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Comma separated list of etcd cluster peers.
|
||||
*/}}
|
||||
{{- define "etcd.initialCluster" }}
|
||||
{{- $outer := . -}}
|
||||
{{- $list := list -}}
|
||||
{{- range $i, $count := until (int $.Values.replicas) -}}
|
||||
{{- $list = append $list ( printf "%s-%d=https://%s-%d.%s.%s.svc.cluster.local:%d" ( include "etcd.stsName" $outer ) $i ( include "etcd.fullname" $outer ) $count ( include "etcd.serviceName" $outer ) $.Release.Namespace (int $.Values.peerApiPort) ) -}}
|
||||
{{- end }}
|
||||
{{- join "," $list -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Space separated list of etcd cluster endpoints.
|
||||
*/}}
|
||||
{{- define "etcd.endpoints" }}
|
||||
{{- $outer := . -}}
|
||||
{{- $list := list -}}
|
||||
{{- range $i, $count := until (int $.Values.replicas) -}}
|
||||
{{- $list = append $list ( printf "%s-%d.%s.%s.svc.cluster.local:%d" ( include "etcd.stsName" $outer ) $count ( include "etcd.serviceName" $outer ) $.Release.Namespace (int $.Values.clientPort) ) -}}
|
||||
{{- end }}
|
||||
{{- join " " $list -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Space separated list of etcd cluster endpoints.
|
||||
*/}}
|
||||
{{- define "etcd.endpointsYAML" }}
|
||||
{{- $outer := . -}}
|
||||
{{- range $i, $count := until (int $.Values.replicas) -}}
|
||||
{{ printf "- %s-%d.%s.%s.svc.cluster.local:%d\n" ( include "etcd.stsName" $outer ) $count ( include "etcd.serviceName" $outer ) $.Release.Namespace (int $.Values.clientPort) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the minio-client fully-qualified Docker image to use
|
||||
*/}}
|
||||
{{- define "minio-client.fullyQualifiedDockerImage" -}}
|
||||
{{- printf "%s:%s" .Values.backup.s3.image.repository .Values.backup.s3.image.tag -}}
|
||||
{{- end }}
|
||||
@@ -1,22 +0,0 @@
|
||||
{{- if .Values.alerts.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: {{ include "etcd.fullname" . }}-alerts
|
||||
namespace: {{ .Values.alerts.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
{{- with .Values.alerts.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.alerts.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
groups:
|
||||
- name: kamaji-etcd
|
||||
{{- with .Values.alerts.rules }}
|
||||
rules: {{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,98 +0,0 @@
|
||||
{{- $outer := $ -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "5"
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.csrConfigMapName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
data:
|
||||
ca-csr.json: |-
|
||||
{
|
||||
"CN": "Clastix CA",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "IT",
|
||||
"ST": "Italy",
|
||||
"L": "Milan"
|
||||
}
|
||||
]
|
||||
}
|
||||
config.json: |-
|
||||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"profiles": {
|
||||
"server-authentication": {
|
||||
"usages": ["signing", "key encipherment", "server auth"],
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"client-authentication": {
|
||||
"usages": ["signing", "key encipherment", "client auth"],
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"peer-authentication": {
|
||||
"usages": ["signing", "key encipherment", "server auth", "client auth"],
|
||||
"expiry": "8760h"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
server-csr.json: |-
|
||||
{
|
||||
"CN": "etcd",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"hosts": [
|
||||
{{- range $count := until (int $.Values.replicas) -}}
|
||||
{{ printf "\"%s-%d.%s.%s.svc.cluster.local\"," ( include "etcd.fullname" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
|
||||
{{ printf "\"%s-%d.%s.%s.svc\"," ( include "etcd.fullname" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
|
||||
{{- end }}
|
||||
"etcd-server.{{ .Release.Namespace }}.svc.cluster.local",
|
||||
"etcd-server.{{ .Release.Namespace }}.svc",
|
||||
"etcd-server",
|
||||
"127.0.0.1"
|
||||
]
|
||||
}
|
||||
peer-csr.json: |-
|
||||
{
|
||||
"CN": "etcd",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"hosts": [
|
||||
{{- range $count := until (int $.Values.replicas) -}}
|
||||
{{ printf "\"%s-%d\"," ( include "etcd.stsName" $outer ) $count }}
|
||||
{{ printf "\"%s-%d.%s\"," ( include "etcd.stsName" $outer ) $count (include "etcd.serviceName" $outer) }}
|
||||
{{ printf "\"%s-%d.%s.%s.svc\"," ( include "etcd.stsName" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
|
||||
{{ printf "\"%s-%d.%s.%s.svc.cluster.local\"," ( include "etcd.stsName" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
|
||||
{{- end }}
|
||||
"127.0.0.1"
|
||||
]
|
||||
}
|
||||
root-client-csr.json: |-
|
||||
{
|
||||
"CN": "root",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"O": "system:masters"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
{{- if .Values.backup.enabled -}}
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: "{{ .Release.Name }}-backup"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
schedule: "{{ .Values.backup.schedule }}"
|
||||
successfulJobsHistoryLimit: 7
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: OnFailure
|
||||
initContainers:
|
||||
- name: etcd-client
|
||||
image: {{ include "etcd.fullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
cd /opt/etcd-dump;
|
||||
for ENDPOINT in {{ include "etcd.endpoints" . }}; do
|
||||
isLeader=$(etcdctl --endpoints=${ENDPOINT} endpoint status | awk '{ print $6 }' | tr -d ',' )
|
||||
if ! {{ .Values.backup.all }} && ! ${isLeader} ; then
|
||||
continue
|
||||
elif ! {{ .Values.backup.all }} && ${isLeader} ; then
|
||||
POD="etcd-leader"
|
||||
else
|
||||
POD=${ENDPOINT#*//}
|
||||
POD=${POD%.{{ include "etcd.serviceName" . }}*}
|
||||
fi
|
||||
SNAPSHOT={{ .Values.backup.snapshotNamePrefix }}_${POD}_{{ .Values.backup.snapshotDateFormat }}.db
|
||||
etcdctl --endpoints=${ENDPOINT} snapshot save ${SNAPSHOT}
|
||||
etcdutl --write-out=table snapshot status ${SNAPSHOT}
|
||||
md5sum ${SNAPSHOT};
|
||||
done;
|
||||
env:
|
||||
- name: ETCDCTL_CACERT
|
||||
value: /opt/certs/ca/ca.crt
|
||||
- name: ETCDCTL_CERT
|
||||
value: /opt/certs/root-client-certs/tls.crt
|
||||
- name: ETCDCTL_KEY
|
||||
value: /opt/certs/root-client-certs/tls.key
|
||||
volumeMounts:
|
||||
- name: root-client-certs
|
||||
mountPath: /opt/certs/root-client-certs
|
||||
- name: certs
|
||||
mountPath: /opt/certs/ca
|
||||
- name: shared-data
|
||||
mountPath: /opt/etcd-dump
|
||||
containers:
|
||||
- name: minio-client
|
||||
image: {{ include "minio-client.fullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: {{ .Values.backup.s3.image.pullPolicy }}
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
cd /opt/etcd-dump
|
||||
if $MC alias set myminio ${S3_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} \
|
||||
&& $MC ping myminio -c 3 -e 3 ; then
|
||||
echo -e "\nUploading snapshot(s):"
|
||||
$MC cp {{ .Values.backup.snapshotNamePrefix }}_*.db myminio/{{ .Values.backup.s3.bucket }}
|
||||
else
|
||||
echo -e "\nERROR: S3 storage could not be configured;\nCheck your S3 URL/Credentials or network connectivity"
|
||||
exit 1
|
||||
fi
|
||||
env:
|
||||
- name: S3_URL
|
||||
value: {{ .Values.backup.s3.url | quote }}
|
||||
- name: S3_ACCESS_KEY
|
||||
{{- if .Values.backup.s3.accessKey.value }}
|
||||
value: {{ .Values.backup.s3.accessKey.value | quote }}
|
||||
{{- else }}
|
||||
valueFrom:
|
||||
{{- toYaml .Values.backup.s3.accessKey.valueFrom | nindent 16 }}
|
||||
{{- end }}
|
||||
- name: S3_SECRET_KEY
|
||||
{{- if .Values.backup.s3.secretKey.value }}
|
||||
value: {{ .Values.backup.s3.secretKey.value | quote }}
|
||||
{{- else }}
|
||||
valueFrom:
|
||||
{{- toYaml .Values.backup.s3.secretKey.valueFrom | nindent 16 }}
|
||||
{{- end }}
|
||||
- name: MC_CONFIG_DIR
|
||||
value: /tmp
|
||||
- name: MC
|
||||
value: "/usr/bin/mc --config-dir ${MC_CONFIG_DIR}"
|
||||
volumeMounts:
|
||||
- name: shared-data
|
||||
mountPath: /opt/etcd-dump
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: shared-data
|
||||
emptyDir: {}
|
||||
- name: root-client-certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.clientSecretName" . }}
|
||||
optional: true
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
optional: true
|
||||
{{- end }}
|
||||
@@ -1,62 +0,0 @@
|
||||
{{- if .Values.defragmentation.schedule -}}
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: "{{ .Release.Name }}-defrag"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
schedule: "{{ .Values.defragmentation.schedule }}"
|
||||
successfulJobsHistoryLimit: 4
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: etcd-client
|
||||
image: {{ include "etcd.fullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
for ENDPOINT in {{ include "etcd.endpoints" . }}; do
|
||||
etcdctl --endpoints=https://${ENDPOINT} defrag;
|
||||
etcdctl --endpoints=https://${ENDPOINT} alarm disarm;
|
||||
etcdctl --endpoints=https://${ENDPOINT} alarm list;
|
||||
etcdctl --endpoints=https://${ENDPOINT} endpoint status -w table;
|
||||
etcdctl --endpoints=https://${ENDPOINT} member list -w table;
|
||||
sleep 15;
|
||||
done;
|
||||
env:
|
||||
- name: ETCDCTL_CACERT
|
||||
value: /opt/certs/ca/ca.crt
|
||||
- name: ETCDCTL_CERT
|
||||
value: /opt/certs/root-client-certs/tls.crt
|
||||
- name: ETCDCTL_KEY
|
||||
value: /opt/certs/root-client-certs/tls.key
|
||||
volumeMounts:
|
||||
- name: root-client-certs
|
||||
mountPath: /opt/certs/root-client-certs
|
||||
- name: certs
|
||||
mountPath: /opt/certs/ca
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: root-client-certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.clientSecretName" . }}
|
||||
optional: true
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
optional: true
|
||||
{{- end }}
|
||||
@@ -1,35 +0,0 @@
|
||||
{{- if .Values.datastore.enabled }}
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
name: {{ include "etcd.fullname" . }}
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
spec:
|
||||
driver: etcd
|
||||
endpoints:
|
||||
{{- include "etcd.endpointsYAML" . | nindent 4 }}
|
||||
tlsConfig:
|
||||
certificateAuthority:
|
||||
certificate:
|
||||
secretReference:
|
||||
keyPath: ca.crt
|
||||
name: {{ include "etcd.caSecretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
privateKey:
|
||||
secretReference:
|
||||
keyPath: ca.key
|
||||
name: {{ include "etcd.caSecretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
clientCertificate:
|
||||
certificate:
|
||||
secretReference:
|
||||
keyPath: tls.crt
|
||||
name: {{ include "etcd.clientSecretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
privateKey:
|
||||
secretReference:
|
||||
keyPath: tls.key
|
||||
name: {{ include "etcd.clientSecretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ end }}
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-weight": "10"
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
|
||||
name: "{{ .Release.Name }}-etcd-teardown"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command:
|
||||
- kubectl
|
||||
- --namespace={{ .Release.Namespace }}
|
||||
- delete
|
||||
- secret
|
||||
- --ignore-not-found=true
|
||||
- {{ include "etcd.caSecretName" . }}
|
||||
- {{ include "etcd.clientSecretName" . }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
@@ -1,69 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "10"
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
|
||||
name: "{{ .Release.Name }}-etcd-setup-1"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: Never
|
||||
initContainers:
|
||||
- name: cfssl
|
||||
image: cfssl/cfssl:latest
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
cfssl gencert -initca /csr/ca-csr.json | cfssljson -bare /certs/ca &&
|
||||
mv /certs/ca.pem /certs/ca.crt && mv /certs/ca-key.pem /certs/ca.key &&
|
||||
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/peer-csr.json | cfssljson -bare /certs/peer &&
|
||||
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/server-csr.json | cfssljson -bare /certs/server &&
|
||||
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=client-authentication /csr/root-client-csr.json | cfssljson -bare /certs/root-client
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: certs
|
||||
- mountPath: /csr
|
||||
name: csr
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
if kubectl get secret {{ include "etcd.caSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
|
||||
echo "Secret {{ include "etcd.caSecretName" . }} already exists"
|
||||
else
|
||||
echo "Creating secret {{ include "etcd.caSecretName" . }}"
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem
|
||||
fi
|
||||
if kubectl get secret {{ include "etcd.clientSecretName" . }} --namespace={{ .Release.Namespace }} &>/dev/null; then
|
||||
echo "Secret {{ include "etcd.clientSecretName" . }} already exists"
|
||||
else
|
||||
echo "Creating secret {{ include "etcd.clientSecretName" . }}"
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: certs
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: csr
|
||||
configMap:
|
||||
name: {{ include "etcd.csrConfigMapName" . }}
|
||||
- name: certs
|
||||
emptyDir: {}
|
||||
@@ -1,71 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook-weight": "10"
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
|
||||
name: "{{ .Release.Name }}-etcd-setup-2"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
backoffLimit: 12
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: Never
|
||||
initContainers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- kubectl --namespace={{ .Release.Namespace }} rollout status sts/{{ include "etcd.stsName" . }} --timeout=300s
|
||||
containers:
|
||||
- command:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
etcdctl member list -w table
|
||||
if etcdctl user get root &>/dev/null; then
|
||||
echo "User already exists, nothing to do"
|
||||
else
|
||||
etcdctl user add --no-password=true root &&
|
||||
etcdctl role add root &&
|
||||
etcdctl user grant-role root root &&
|
||||
etcdctl auth enable
|
||||
fi
|
||||
env:
|
||||
- name: ETCDCTL_ENDPOINTS
|
||||
value: https://{{ include "etcd.fullname" . }}-0.{{ include "etcd.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local:{{ .Values.clientPort }}
|
||||
- name: ETCDCTL_CACERT
|
||||
value: /opt/certs/ca/ca.crt
|
||||
- name: ETCDCTL_CERT
|
||||
value: /opt/certs/root-certs/tls.crt
|
||||
- name: ETCDCTL_KEY
|
||||
value: /opt/certs/root-certs/tls.key
|
||||
image: {{ include "etcd.fullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: etcd-client
|
||||
volumeMounts:
|
||||
- name: root-certs
|
||||
mountPath: /opt/certs/root-certs
|
||||
- name: ca
|
||||
mountPath: /opt/certs/ca
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: root-certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.clientSecretName" . }}
|
||||
- name: ca
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
@@ -1,77 +0,0 @@
|
||||
{{- if .Values.backup.enabled -}}
|
||||
{{- if .Values.backup.s3.retention -}}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install,post-upgrade,post-rollback
|
||||
"helm.sh/hook-weight": "5"
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
|
||||
name: "{{ .Release.Name }}-s3-retention"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: minio-client
|
||||
image: {{ include "minio-client.fullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: {{ .Values.backup.s3.image.pullPolicy }}
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
cd ${MC_CONFIG_DIR}
|
||||
if $MC alias set myminio ${S3_URL} ${S3_ACCESS_KEY} ${S3_SECRET_KEY} \
|
||||
&& $MC ping myminio -c 3 -e 3 ; then
|
||||
echo -e "\nCheck for already created object lifecycle management rule(s):"
|
||||
if $MC ilm ls myminio/${S3_BUCKET} ; then
|
||||
echo -e "\nObject lifecycle management rule(s) found - Clean up:"
|
||||
$MC ilm rm --all --force myminio/${S3_BUCKET}
|
||||
else
|
||||
echo -e "\nNo object lifecycle management rule(s) found - Continue"
|
||||
fi
|
||||
echo -e "\nAdding object lifecycle management rule(s):"
|
||||
$MC ilm add {{ .Values.backup.s3.retention }} myminio/${S3_BUCKET}
|
||||
$MC ilm ls myminio/${S3_BUCKET}
|
||||
else
|
||||
echo -e "\nERROR: S3 storage could not be configured;\nCheck your S3 URL/Credentials or network connectivity"
|
||||
exit 1
|
||||
fi
|
||||
env:
|
||||
- name: S3_URL
|
||||
value: {{ .Values.backup.s3.url | quote }}
|
||||
- name: S3_ACCESS_KEY
|
||||
{{- if .Values.backup.s3.accessKey.value }}
|
||||
value: {{ .Values.backup.s3.accessKey.value | quote }}
|
||||
{{- else }}
|
||||
valueFrom:
|
||||
{{- toYaml .Values.backup.s3.accessKey.valueFrom | nindent 12 }}
|
||||
{{- end }}
|
||||
- name: S3_SECRET_KEY
|
||||
{{- if .Values.backup.s3.secretKey.value }}
|
||||
value: {{ .Values.backup.s3.secretKey.value | quote }}
|
||||
{{- else }}
|
||||
valueFrom:
|
||||
{{- toYaml .Values.backup.s3.secretKey.valueFrom | nindent 12 }}
|
||||
{{- end }}
|
||||
- name: S3_BUCKET
|
||||
value: {{ .Values.backup.s3.bucket | quote }}
|
||||
- name: MC_CONFIG_DIR
|
||||
value: /tmp
|
||||
- name: MC
|
||||
value: "/usr/bin/mc --config-dir ${MC_CONFIG_DIR}"
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,46 +0,0 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "etcd.fullname" . }}-metrics-role
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "etcd.fullname" . }}-metrics-rolebinding
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "etcd.fullname" . }}-metrics-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceMonitor.serviceAccount.name }}
|
||||
namespace: {{ .Values.serviceMonitor.serviceAccount.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,60 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,post-install,pre-delete
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
|
||||
"helm.sh/hook-weight": "5"
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.roleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- delete
|
||||
resourceNames:
|
||||
- {{ include "etcd.caSecretName" . }}
|
||||
- {{ include "etcd.clientSecretName" . }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
resourceNames:
|
||||
- {{ include "etcd.stsName" . }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,post-install,pre-delete
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,before-hook-creation,hook-failed"
|
||||
"helm.sh/hook-weight": "5"
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.roleBindingName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "etcd.roleName" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "etcd.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -1,15 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "etcd.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation,hook-failed"
|
||||
"helm.sh/hook-weight": "0"
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.serviceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: {{ .Values.clientPort }}
|
||||
name: client
|
||||
- port: {{ .Values.peerApiPort }}
|
||||
name: peer
|
||||
- port: {{ .Values.metricsPort }}
|
||||
name: metrics
|
||||
selector:
|
||||
{{- include "etcd.selectorLabels" . | nindent 4 }}
|
||||
@@ -1,47 +0,0 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "etcd.fullname" . }}-monitor
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceMonitor.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
{{- with .Values.serviceMonitor.endpoint }}
|
||||
- interval: {{ .interval }}
|
||||
port: metrics
|
||||
path: /metrics
|
||||
{{- with .scrapeTimeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .metricRelabelings }}
|
||||
metricRelabelings: {{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .relabelings }}
|
||||
relabelings: {{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
jobLabel: app.kubernetes.io/name
|
||||
{{- with .Values.serviceMonitor.targetLabels }}
|
||||
targetLabels: {{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- if .Values.serviceMonitor.matchLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.matchLabels | nindent 6 }}
|
||||
{{- else }}
|
||||
{{- include "etcd.labels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.stsName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
serviceName: {{ include "etcd.serviceName" . }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "etcd.selectorLabels" . | nindent 6 }}
|
||||
replicas: {{ .Values.replicas }}
|
||||
template:
|
||||
metadata:
|
||||
name: etcd
|
||||
labels:
|
||||
{{- include "etcd.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{- toYaml .Values.podAnnotations | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
volumes:
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
containers:
|
||||
- name: etcd
|
||||
image: {{ include "etcd.fullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 12 }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.clientPort }}
|
||||
name: client
|
||||
- containerPort: {{ .Values.peerApiPort }}
|
||||
name: peer
|
||||
- containerPort: {{ .Values.metricsPort }}
|
||||
name: metrics
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/run/etcd
|
||||
- name: certs
|
||||
mountPath: /etc/etcd/pki
|
||||
command:
|
||||
- etcd
|
||||
- --data-dir=/var/run/etcd
|
||||
- --name=$(POD_NAME)
|
||||
- --initial-cluster-state=new
|
||||
- --initial-cluster={{ include "etcd.initialCluster" . }}
|
||||
- --initial-advertise-peer-urls=https://$(POD_NAME).{{ include "etcd.serviceName" . }}.$(POD_NAMESPACE).svc.cluster.local:{{ .Values.peerApiPort }}
|
||||
- --advertise-client-urls=https://$(POD_NAME).{{ include "etcd.serviceName" . }}.$(POD_NAMESPACE).svc.cluster.local:{{ .Values.clientPort }}
|
||||
- --initial-cluster-token=kamaji
|
||||
- --listen-client-urls=https://0.0.0.0:{{ .Values.clientPort }}
|
||||
- --listen-metrics-urls=http://0.0.0.0:{{ .Values.metricsPort }}
|
||||
- --listen-peer-urls=https://0.0.0.0:{{ .Values.peerApiPort }}
|
||||
- --client-cert-auth=true
|
||||
- --peer-client-cert-auth=true
|
||||
- --trusted-ca-file=/etc/etcd/pki/ca.crt
|
||||
- --cert-file=/etc/etcd/pki/server.pem
|
||||
- --key-file=/etc/etcd/pki/server-key.pem
|
||||
- --peer-trusted-ca-file=/etc/etcd/pki/ca.crt
|
||||
- --peer-cert-file=/etc/etcd/pki/peer.pem
|
||||
- --peer-key-file=/etc/etcd/pki/peer-key.pem
|
||||
- --auto-compaction-mode={{ .Values.autoCompactionMode }}
|
||||
- --auto-compaction-retention={{ .Values.autoCompactionRetention }}
|
||||
- --snapshot-count={{ .Values.snapshotCount }}
|
||||
- --quota-backend-bytes={{ .Values.quotaBackendBytes }}
|
||||
{{- with .Values.extraArgs }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- with .Values.livenessProbe }}
|
||||
livenessProbe:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
priorityClassName: {{- toYaml .Values.priorityClassName | nindent 8 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.topologySpreadConstraints }}
|
||||
topologySpreadConstraints: {{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
{{- with .Values.persistentVolumeClaim.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
storageClassName: {{ .Values.persistentVolumeClaim.storageClassName }}
|
||||
accessModes:
|
||||
{{- range .Values.persistentVolumeClaim.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistentVolumeClaim.size }}
|
||||
@@ -1,223 +0,0 @@
|
||||
# Default values for kamaji-crane.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# -- Size of the etcd cluster
|
||||
replicas: 3
|
||||
|
||||
# -- Install an etcd with enabled multi-tenancy
|
||||
serviceAccount:
|
||||
# -- Create a ServiceAccount, required to install and provision the etcd backing storage (default: true)
|
||||
create: true
|
||||
# -- Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "")
|
||||
name: ""
|
||||
|
||||
image:
|
||||
# -- Install image from specific repo
|
||||
repository: quay.io/coreos/etcd
|
||||
# -- Install image with specific tag, overwrite the tag in the chart
|
||||
tag: ""
|
||||
# -- Pull policy to use
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# -- The peer API port which servers are listening to.
|
||||
peerApiPort: 2380
|
||||
|
||||
# -- The client request port.
|
||||
clientPort: 2379
|
||||
|
||||
# -- The port where etcd exposes metrics.
|
||||
metricsPort: 2381
|
||||
|
||||
# -- The livenessProbe for the etcd container
|
||||
livenessProbe: {}
|
||||
# failureThreshold: 8
|
||||
# httpGet:
|
||||
# path: /health?serializable=true
|
||||
# port: 2381
|
||||
# scheme: HTTP
|
||||
# initialDelaySeconds: 10
|
||||
# periodSeconds: 10
|
||||
# timeoutSeconds: 15
|
||||
|
||||
# -- A list of extra arguments to add to the etcd default ones
|
||||
extraArgs: []
|
||||
#- --log-level=warn
|
||||
#- --logger=zap
|
||||
|
||||
# -- Interpret 'auto-compaction-retention' one of: periodic|revision. Use 'periodic' for duration based retention, 'revision' for revision number based retention.
|
||||
autoCompactionMode: periodic
|
||||
|
||||
# -- Auto compaction retention length. 0 means disable auto compaction.
|
||||
autoCompactionRetention: 5m
|
||||
|
||||
# -- Number of committed transactions to trigger a snapshot to disk.
|
||||
snapshotCount: "10000"
|
||||
|
||||
# -- Raise alarms when backend size exceeds the given quota. It will put the cluster into a maintenance mode which only accepts key reads and deletes.
|
||||
quotaBackendBytes: "8589934592" # 8Gi
|
||||
|
||||
persistentVolumeClaim:
|
||||
# -- The size of persistent storage for etcd data
|
||||
size: 10Gi
|
||||
# -- A specific storage class
|
||||
storageClassName: ""
|
||||
# -- The Access Mode to storage
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
# -- The custom annotations to add to the PVC
|
||||
customAnnotations: {}
|
||||
# volumeType: local
|
||||
|
||||
# -- Enable storage defragmentation
|
||||
defragmentation:
|
||||
# -- The job scheduled maintenance time for defrag (empty to disable)
|
||||
schedule: "*/15 * * * *" # https://crontab.guru/
|
||||
|
||||
# -- Enable storage backup
|
||||
backup:
|
||||
# -- Enable scheduling backup job
|
||||
enabled: false
|
||||
# -- Enable backup for all endpoints. When disabled, only the leader will be taken
|
||||
all: false
|
||||
# -- The job scheduled maintenance time for backup
|
||||
schedule: "20 3 * * *" # https://crontab.guru/
|
||||
# -- The backup file name prefix
|
||||
snapshotNamePrefix: mysnapshot
|
||||
# -- The backup file date format (bash)
|
||||
snapshotDateFormat: $(date +%Y%m%d)
|
||||
# -- The S3 storage config section
|
||||
s3:
|
||||
# -- The S3 storage url
|
||||
url: http://mys3storage:9000
|
||||
# -- The S3 storage bucket
|
||||
bucket: mybucket
|
||||
# -- The S3 storage object lifecycle management rules; N.B. enabling this option will delete previously set lifecycle rules
|
||||
retention: "" #"--expiry-days 7"
|
||||
# -- The S3 storage ACCESS KEY credential. The plain value has precedence over the valueFrom that can be used to retrieve the value from a Secret.
|
||||
accessKey:
|
||||
value: ""
|
||||
valueFrom: {}
|
||||
# secretKeyRef:
|
||||
# key: access_key
|
||||
# name: minio-key
|
||||
# -- The S3 storage SECRET KEY credential. The plain value has precedence over the valueFrom that can be used to retrieve the value from a Secret.
|
||||
secretKey:
|
||||
value: ""
|
||||
valueFrom: {}
|
||||
# secretKeyRef:
|
||||
# key: secret_key
|
||||
# name: minio-key
|
||||
# -- The S3 client image config section
|
||||
image:
|
||||
# -- Install image from specific repo
|
||||
repository: minio/mc
|
||||
# -- Install image with specific tag
|
||||
tag: "RELEASE.2022-11-07T23-47-39Z"
|
||||
# -- Pull policy to use
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# -- Labels to add to all etcd pods
|
||||
podLabels:
|
||||
application: kamaji-etcd
|
||||
|
||||
# -- Annotations to add to all etcd pods
|
||||
podAnnotations: {}
|
||||
|
||||
# -- The securityContext to apply to etcd
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# -- The priorityClassName to apply to etcd
|
||||
priorityClassName: system-cluster-critical
|
||||
|
||||
# -- Resources assigned to the etcd containers
|
||||
resources:
|
||||
limits: {}
|
||||
requests: {}
|
||||
|
||||
# -- Kubernetes node selector rules to schedule etcd
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
|
||||
# -- Kubernetes node taints that the etcd pods would tolerate
|
||||
tolerations: []
|
||||
|
||||
# -- Kubernetes affinity rules to apply to etcd controller pods
|
||||
affinity: {}
|
||||
|
||||
# -- Kubernetes topology spread constraints to apply to etcd controller pods
|
||||
topologySpreadConstraints: []
|
||||
#- maxSkew: 1
|
||||
# topologyKey: topology.kubernetes.io/zone
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# application: kamaji-etcd
|
||||
|
||||
datastore:
|
||||
# -- Create a datastore custom resource for Kamaji
|
||||
enabled: false
|
||||
|
||||
serviceMonitor:
|
||||
# -- Enable ServiceMonitor for Prometheus
|
||||
enabled: false
|
||||
# -- Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one)
|
||||
namespace: ''
|
||||
# -- Assign additional labels according to Prometheus' serviceMonitorSelector matching labels
|
||||
labels: {}
|
||||
# -- Assign additional Annotations
|
||||
annotations: {}
|
||||
# -- Change matching labels
|
||||
matchLabels: {}
|
||||
# -- Set targetLabels for the serviceMonitor
|
||||
targetLabels: []
|
||||
serviceAccount:
|
||||
# -- ServiceAccount for Metrics RBAC
|
||||
name: etcd
|
||||
# -- ServiceAccount Namespace for Metrics RBAC
|
||||
namespace: etcd-system
|
||||
endpoint:
|
||||
# -- Set the scrape interval for the endpoint of the serviceMonitor
|
||||
interval: "15s"
|
||||
# -- Set the scrape timeout for the endpoint of the serviceMonitor
|
||||
scrapeTimeout: ""
|
||||
# -- Set metricRelabelings for the endpoint of the serviceMonitor
|
||||
metricRelabelings: []
|
||||
# -- Set relabelings for the endpoint of the serviceMonitor
|
||||
relabelings: []
|
||||
#- action: replace
|
||||
# regex: (.+)
|
||||
# replacement: $1
|
||||
# sourceLabels:
|
||||
# - __meta_kubernetes_pod_name
|
||||
# targetLabel: member
|
||||
#
|
||||
|
||||
alerts:
|
||||
# -- Enable alerts for Alertmanager
|
||||
enabled: false
|
||||
# -- Install the Alerts into a different Namespace, as the monitoring stack one (default: the release one)
|
||||
namespace: ''
|
||||
# -- Assign additional labels according to Prometheus' Alerts matching labels
|
||||
labels: {}
|
||||
# -- Assign additional Annotations
|
||||
annotations: {}
|
||||
# -- The rules for alerts
|
||||
rules: []
|
||||
# - alert: etcdNoLeader
|
||||
# annotations:
|
||||
# message: 'etcd cluster: member {{ $labels.instance }} has no leader.'
|
||||
# expr: count(etcd_server_has_leader{job=~".*etcd.*"}) == 0
|
||||
# for: 1m
|
||||
# labels:
|
||||
# severity: critical
|
||||
# - alert: EtcdDataBaseSize
|
||||
# annotations:
|
||||
# message: 'etcd cluster: "member {{ $labels.instance }} db has almost exceeded 8GB".'
|
||||
# expr: |-
|
||||
# etcd_mvcc_db_total_size_in_bytes{job=~".*etcd.*"} >= 8589934592
|
||||
# for: 15m
|
||||
# labels:
|
||||
# severity: critical
|
||||
#
|
||||
@@ -1,12 +0,0 @@
|
||||
diff --git a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/etcd_cm.yaml b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/etcd_cm.yaml
|
||||
index 95a2671..bd8ddcb 100644
|
||||
--- a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/etcd_cm.yaml
|
||||
+++ b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/etcd_cm.yaml
|
||||
@@ -57,6 +57,7 @@ data:
|
||||
"hosts": [
|
||||
{{- range $count := until (int $.Values.replicas) -}}
|
||||
{{ printf "\"%s-%d.%s.%s.svc.cluster.local\"," ( include "etcd.fullname" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
|
||||
+ {{ printf "\"%s-%d.%s.%s.svc\"," ( include "etcd.fullname" $outer ) $count (include "etcd.serviceName" $outer) $.Release.Namespace }}
|
||||
{{- end }}
|
||||
"etcd-server.{{ .Release.Namespace }}.svc.cluster.local",
|
||||
"etcd-server.{{ .Release.Namespace }}.svc",
|
||||
@@ -1,31 +0,0 @@
|
||||
diff --git a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
|
||||
index 4f7014e..403e187 100644
|
||||
--- a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
|
||||
+++ b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
|
||||
@@ -9,8 +9,17 @@ Expand the name of the chart.
|
||||
Create a default fully qualified etcd name.
|
||||
*/}}
|
||||
{{- define "etcd.fullname" -}}
|
||||
-{{- .Release.Name }}
|
||||
-{{- end }}
|
||||
+{{- if .Values.fullnameOverride -}}
|
||||
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
+{{- else -}}
|
||||
+{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
+{{- if contains $name .Release.Name -}}
|
||||
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
+{{- else -}}
|
||||
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
+{{- end -}}
|
||||
+{{- end -}}
|
||||
+{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
@@ -156,4 +165,4 @@ Create the minio-client fully-qualified Docker image to use
|
||||
*/}}
|
||||
{{- define "minio-client.fullyQualifiedDockerImage" -}}
|
||||
{{- printf "%s:%s" .Values.backup.s3.image.repository .Values.backup.s3.image.tag -}}
|
||||
-{{- end }}
|
||||
\ No newline at end of file
|
||||
+{{- end }}
|
||||
@@ -1,16 +0,0 @@
|
||||
diff --git a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
|
||||
index 403e187..e68a967 100644
|
||||
--- a/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
|
||||
+++ b/packages/system/kamaji-etcd/charts/kamaji-etcd/templates/_helpers.tpl
|
||||
@@ -119,11 +119,7 @@ Name of the etcd root-client secret.
|
||||
Retrieve the current Kubernetes version to launch a kubectl container with the minimum version skew possible.
|
||||
*/}}
|
||||
{{- define "etcd.jobsTagKubeVersion" -}}
|
||||
-{{- if contains "-eks-" .Capabilities.KubeVersion.GitVersion }}
|
||||
{{- print "v" .Capabilities.KubeVersion.Major "." (.Capabilities.KubeVersion.Minor | replace "+" "") -}}
|
||||
-{{- else }}
|
||||
-{{- print "v" .Capabilities.KubeVersion.Major "." .Capabilities.KubeVersion.Minor -}}
|
||||
-{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -1,33 +0,0 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
name: {{ .Release.Namespace }}
|
||||
spec:
|
||||
driver: etcd
|
||||
endpoints:
|
||||
- etcd-0.etcd.{{ .Release.Namespace }}.svc:2379
|
||||
- etcd-1.etcd.{{ .Release.Namespace }}.svc:2379
|
||||
- etcd-2.etcd.{{ .Release.Namespace }}.svc:2379
|
||||
tlsConfig:
|
||||
certificateAuthority:
|
||||
certificate:
|
||||
secretReference:
|
||||
keyPath: ca.crt
|
||||
name: etcd-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
privateKey:
|
||||
secretReference:
|
||||
keyPath: ca.key
|
||||
name: etcd-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
clientCertificate:
|
||||
certificate:
|
||||
secretReference:
|
||||
keyPath: tls.crt
|
||||
name: etcd-root-client-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
privateKey:
|
||||
secretReference:
|
||||
keyPath: tls.key
|
||||
name: etcd-root-client-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
@@ -3,7 +3,7 @@ kamaji:
|
||||
deploy: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v0.33.1@sha256:09fc5c9aeb97880780abfc6d82c216725d6f79e13494bf2399766c882b88f66b
|
||||
tag: v0.34.0-beta.1@sha256:09465ae8285b4ae43203581e443409cd4e1e119dde62a5c14d63ce064fb840b0
|
||||
repository: ghcr.io/cozystack/cozystack/kamaji
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
portSecurity: true
|
||||
routes: ""
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.33.1@sha256:595851560856e3ba7f408f259acf84599494984a9f0252de289bcb1a7fc5b9da
|
||||
image: ghcr.io/cozystack/cozystack/kubeovn-webhook:v0.34.0-beta.1@sha256:c7f42022280a565da8b3091ed2f4fe2768fcd392327d23172a532c24794787c6
|
||||
|
||||
@@ -8,13 +8,13 @@ include ../../../scripts/package.mk
|
||||
|
||||
update:
|
||||
rm -rf charts && mkdir -p charts/kube-ovn
|
||||
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/kubeovn/kube-ovn | awk -F'[/^]' 'END{print $$3}') && \
|
||||
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/kubeovn/kube-ovn | awk -F'[/^]' '{print $$3}' | grep '^v1\.13\.' | tail -n1 ) && \
|
||||
curl -sSL https://github.com/kubeovn/kube-ovn/archive/refs/tags/$${tag}.tar.gz | \
|
||||
tar xzvf - --strip 1 kube-ovn-$${tag#*v}/charts
|
||||
patch --no-backup-if-mismatch -p4 < patches/cozyconfig.diff
|
||||
patch --no-backup-if-mismatch -p4 < patches/mtu.diff
|
||||
version=$$(awk '$$1 == "version:" {print $$2}' charts/kube-ovn/Chart.yaml) && \
|
||||
sed -i "s/ARG VERSION=.*/ARG VERSION=$${version}/" images/kubeovn/Dockerfile
|
||||
sed -i "s/ARG VERSION=.*/ARG VERSION=$${version}/" images/kubeovn/Dockerfile && \
|
||||
sed -i "s/ARG TAG=.*/ARG TAG=$${version}/" images/kubeovn/Dockerfile
|
||||
|
||||
image:
|
||||
|
||||
@@ -15,12 +15,12 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: v1.13.13
|
||||
version: v1.13.14
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.13.13"
|
||||
appVersion: "1.13.14"
|
||||
|
||||
kubeVersion: ">= 1.23.0-0"
|
||||
|
||||
@@ -10,7 +10,7 @@ global:
|
||||
repository: kube-ovn
|
||||
dpdkRepository: kube-ovn-dpdk
|
||||
vpcRepository: vpc-nat-gateway
|
||||
tag: v1.13.13
|
||||
tag: v1.13.14
|
||||
support_arm: true
|
||||
thirdparty: true
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# syntax = docker/dockerfile:experimental
|
||||
ARG VERSION=v1.13.13
|
||||
ARG VERSION=v1.13.14
|
||||
ARG BASE_TAG=$VERSION
|
||||
|
||||
FROM golang:1.23-bookworm as builder
|
||||
|
||||
ARG TAG=v1.13.13
|
||||
ARG TAG=v1.13.14
|
||||
RUN git clone --branch ${TAG} --depth 1 https://github.com/kubeovn/kube-ovn /source
|
||||
|
||||
WORKDIR /source
|
||||
|
||||
@@ -64,4 +64,4 @@ global:
|
||||
images:
|
||||
kubeovn:
|
||||
repository: kubeovn
|
||||
tag: v1.13.13@sha256:c0ffc9a0498b6f8fc392f8fc6ea43d0c7eedeeabda8ef96bca004ec4466a6bf2
|
||||
tag: v1.13.14@sha256:ebcb35362cf522392a1f3daf5460b9d2b3054aeaf8a75415b08d41343f16a7ec
|
||||
|
||||
@@ -110,9 +110,9 @@ spec:
|
||||
description: CDIConfig at CDI level
|
||||
properties:
|
||||
dataVolumeTTLSeconds:
|
||||
description: DataVolumeTTLSeconds is the time in seconds after
|
||||
DataVolume completion it can be garbage collected. Disabled
|
||||
by default.
|
||||
description: |-
|
||||
DataVolumeTTLSeconds is the time in seconds after DataVolume completion it can be garbage collected. Disabled by default.
|
||||
Deprecated: Removed in v1.62.
|
||||
format: int32
|
||||
type: integer
|
||||
featureGates:
|
||||
@@ -2642,9 +2642,9 @@ spec:
|
||||
description: CDIConfig at CDI level
|
||||
properties:
|
||||
dataVolumeTTLSeconds:
|
||||
description: DataVolumeTTLSeconds is the time in seconds after
|
||||
DataVolume completion it can be garbage collected. Disabled
|
||||
by default.
|
||||
description: |-
|
||||
DataVolumeTTLSeconds is the time in seconds after DataVolume completion it can be garbage collected. Disabled by default.
|
||||
Deprecated: Removed in v1.62.
|
||||
format: int32
|
||||
type: integer
|
||||
featureGates:
|
||||
@@ -5683,6 +5683,8 @@ spec:
|
||||
strategy: {}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
openshift.io/required-scc: restricted-v2
|
||||
labels:
|
||||
cdi.kubevirt.io: cdi-operator
|
||||
name: cdi-operator
|
||||
@@ -5706,27 +5708,27 @@ spec:
|
||||
- name: DEPLOY_CLUSTER_RESOURCES
|
||||
value: "true"
|
||||
- name: OPERATOR_VERSION
|
||||
value: v1.61.0
|
||||
value: v1.62.0
|
||||
- name: CONTROLLER_IMAGE
|
||||
value: quay.io/kubevirt/cdi-controller:v1.61.0
|
||||
value: quay.io/kubevirt/cdi-controller:v1.62.0
|
||||
- name: IMPORTER_IMAGE
|
||||
value: quay.io/kubevirt/cdi-importer:v1.61.0
|
||||
value: quay.io/kubevirt/cdi-importer:v1.62.0
|
||||
- name: CLONER_IMAGE
|
||||
value: quay.io/kubevirt/cdi-cloner:v1.61.0
|
||||
value: quay.io/kubevirt/cdi-cloner:v1.62.0
|
||||
- name: OVIRT_POPULATOR_IMAGE
|
||||
value: quay.io/kubevirt/cdi-importer:v1.61.0
|
||||
value: quay.io/kubevirt/cdi-importer:v1.62.0
|
||||
- name: APISERVER_IMAGE
|
||||
value: quay.io/kubevirt/cdi-apiserver:v1.61.0
|
||||
value: quay.io/kubevirt/cdi-apiserver:v1.62.0
|
||||
- name: UPLOAD_SERVER_IMAGE
|
||||
value: quay.io/kubevirt/cdi-uploadserver:v1.61.0
|
||||
value: quay.io/kubevirt/cdi-uploadserver:v1.62.0
|
||||
- name: UPLOAD_PROXY_IMAGE
|
||||
value: quay.io/kubevirt/cdi-uploadproxy:v1.61.0
|
||||
value: quay.io/kubevirt/cdi-uploadproxy:v1.62.0
|
||||
- name: VERBOSITY
|
||||
value: "1"
|
||||
- name: PULL_POLICY
|
||||
value: IfNotPresent
|
||||
- name: MONITORING_NAMESPACE
|
||||
image: quay.io/kubevirt/cdi-operator:v1.61.0
|
||||
image: quay.io/kubevirt/cdi-operator:v1.62.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: cdi-operator
|
||||
ports:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
storageClass: replicated
|
||||
csiDriver:
|
||||
image: ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.1@sha256:445c2727b04ac68595b43c988ff17b3d69a7b22b0644fde3b10c65b47a7bc036
|
||||
image: ghcr.io/cozystack/cozystack/kubevirt-csi-driver:0.25.2@sha256:761e7235ff9cb7f6f223f00954943e6a5af32ed6624ee592a8610122f96febb0
|
||||
|
||||
@@ -594,6 +594,13 @@ spec:
|
||||
If set to true, migrations will still start in pre-copy, but switch to post-copy when
|
||||
CompletionTimeoutPerGiB triggers. Defaults to false
|
||||
type: boolean
|
||||
allowWorkloadDisruption:
|
||||
description: |-
|
||||
AllowWorkloadDisruption indicates that the migration shouldn't be
|
||||
canceled after acceptableCompletionTime is exceeded. Instead, if
|
||||
permitted, migration will be switched to post-copy or the VMI will be
|
||||
paused to allow the migration to complete
|
||||
type: boolean
|
||||
bandwidthPerMigration:
|
||||
anyOf:
|
||||
- type: integer
|
||||
@@ -606,8 +613,8 @@ spec:
|
||||
completionTimeoutPerGiB:
|
||||
description: |-
|
||||
CompletionTimeoutPerGiB is the maximum number of seconds per GiB a migration is allowed to take.
|
||||
If a live-migration takes longer to migrate than this value multiplied by the size of the VMI,
|
||||
the migration will be cancelled, unless AllowPostCopy is true. Defaults to 150
|
||||
If the timeout is reached, the migration will be either paused, switched
|
||||
to post-copy or cancelled depending on other settings. Defaults to 150
|
||||
format: int64
|
||||
type: integer
|
||||
disableTLS:
|
||||
@@ -965,17 +972,17 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
vmRolloutStrategy:
|
||||
description: VMRolloutStrategy defines how changes to a VM object
|
||||
propagate to its VMI
|
||||
description: |-
|
||||
VMRolloutStrategy defines how live-updatable fields, like CPU sockets, memory,
|
||||
tolerations, and affinity, are propagated from a VM to its VMI.
|
||||
enum:
|
||||
- Stage
|
||||
- LiveUpdate
|
||||
nullable: true
|
||||
type: string
|
||||
vmStateStorageClass:
|
||||
description: |-
|
||||
VMStateStorageClass is the name of the storage class to use for the PVCs created to preserve VM state, like TPM.
|
||||
The storage class must support RWX in filesystem mode.
|
||||
description: VMStateStorageClass is the name of the storage class
|
||||
to use for the PVCs created to preserve VM state, like TPM.
|
||||
type: string
|
||||
webhookConfiguration:
|
||||
description: |-
|
||||
@@ -3851,6 +3858,13 @@ spec:
|
||||
If set to true, migrations will still start in pre-copy, but switch to post-copy when
|
||||
CompletionTimeoutPerGiB triggers. Defaults to false
|
||||
type: boolean
|
||||
allowWorkloadDisruption:
|
||||
description: |-
|
||||
AllowWorkloadDisruption indicates that the migration shouldn't be
|
||||
canceled after acceptableCompletionTime is exceeded. Instead, if
|
||||
permitted, migration will be switched to post-copy or the VMI will be
|
||||
paused to allow the migration to complete
|
||||
type: boolean
|
||||
bandwidthPerMigration:
|
||||
anyOf:
|
||||
- type: integer
|
||||
@@ -3863,8 +3877,8 @@ spec:
|
||||
completionTimeoutPerGiB:
|
||||
description: |-
|
||||
CompletionTimeoutPerGiB is the maximum number of seconds per GiB a migration is allowed to take.
|
||||
If a live-migration takes longer to migrate than this value multiplied by the size of the VMI,
|
||||
the migration will be cancelled, unless AllowPostCopy is true. Defaults to 150
|
||||
If the timeout is reached, the migration will be either paused, switched
|
||||
to post-copy or cancelled depending on other settings. Defaults to 150
|
||||
format: int64
|
||||
type: integer
|
||||
disableTLS:
|
||||
@@ -4222,17 +4236,17 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
vmRolloutStrategy:
|
||||
description: VMRolloutStrategy defines how changes to a VM object
|
||||
propagate to its VMI
|
||||
description: |-
|
||||
VMRolloutStrategy defines how live-updatable fields, like CPU sockets, memory,
|
||||
tolerations, and affinity, are propagated from a VM to its VMI.
|
||||
enum:
|
||||
- Stage
|
||||
- LiveUpdate
|
||||
nullable: true
|
||||
type: string
|
||||
vmStateStorageClass:
|
||||
description: |-
|
||||
VMStateStorageClass is the name of the storage class to use for the PVCs created to preserve VM state, like TPM.
|
||||
The storage class must support RWX in filesystem mode.
|
||||
description: VMStateStorageClass is the name of the storage class
|
||||
to use for the PVCs created to preserve VM state, like TPM.
|
||||
type: string
|
||||
webhookConfiguration:
|
||||
description: |-
|
||||
@@ -7141,6 +7155,7 @@ rules:
|
||||
resources:
|
||||
- virtualmachinesnapshots
|
||||
- virtualmachinesnapshots/status
|
||||
- virtualmachinesnapshots/finalizers
|
||||
- virtualmachinesnapshotcontents
|
||||
- virtualmachinesnapshotcontents/status
|
||||
- virtualmachinesnapshotcontents/finalizers
|
||||
@@ -7193,15 +7208,18 @@ rules:
|
||||
- kubevirt.io
|
||||
resources:
|
||||
- virtualmachines/finalizers
|
||||
- virtualmachineinstances/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- subresources.kubevirt.io
|
||||
resources:
|
||||
- virtualmachines/stop
|
||||
- virtualmachineinstances/addvolume
|
||||
- virtualmachineinstances/removevolume
|
||||
- virtualmachineinstances/freeze
|
||||
- virtualmachineinstances/unfreeze
|
||||
- virtualmachineinstances/reset
|
||||
- virtualmachineinstances/softreboot
|
||||
- virtualmachineinstances/sev/setupsession
|
||||
- virtualmachineinstances/sev/injectlaunchsecret
|
||||
@@ -7305,6 +7323,14 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- delete
|
||||
- apiGroups:
|
||||
- kubevirt.io
|
||||
resources:
|
||||
@@ -7416,6 +7442,7 @@ rules:
|
||||
- virtualmachineinstances/freeze
|
||||
- virtualmachineinstances/unfreeze
|
||||
- virtualmachineinstances/softreboot
|
||||
- virtualmachineinstances/reset
|
||||
- virtualmachineinstances/sev/setupsession
|
||||
- virtualmachineinstances/sev/injectlaunchsecret
|
||||
verbs:
|
||||
@@ -7435,7 +7462,6 @@ rules:
|
||||
- virtualmachines/restart
|
||||
- virtualmachines/addvolume
|
||||
- virtualmachines/removevolume
|
||||
- virtualmachines/migrate
|
||||
- virtualmachines/memorydump
|
||||
verbs:
|
||||
- update
|
||||
@@ -7452,7 +7478,6 @@ rules:
|
||||
- virtualmachineinstances
|
||||
- virtualmachineinstancepresets
|
||||
- virtualmachineinstancereplicasets
|
||||
- virtualmachineinstancemigrations
|
||||
verbs:
|
||||
- get
|
||||
- delete
|
||||
@@ -7462,6 +7487,14 @@ rules:
|
||||
- list
|
||||
- watch
|
||||
- deletecollection
|
||||
- apiGroups:
|
||||
- kubevirt.io
|
||||
resources:
|
||||
- virtualmachineinstancemigrations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- snapshot.kubevirt.io
|
||||
resources:
|
||||
@@ -7565,6 +7598,7 @@ rules:
|
||||
- virtualmachineinstances/freeze
|
||||
- virtualmachineinstances/unfreeze
|
||||
- virtualmachineinstances/softreboot
|
||||
- virtualmachineinstances/reset
|
||||
- virtualmachineinstances/sev/setupsession
|
||||
- virtualmachineinstances/sev/injectlaunchsecret
|
||||
verbs:
|
||||
@@ -7584,7 +7618,6 @@ rules:
|
||||
- virtualmachines/restart
|
||||
- virtualmachines/addvolume
|
||||
- virtualmachines/removevolume
|
||||
- virtualmachines/migrate
|
||||
- virtualmachines/memorydump
|
||||
verbs:
|
||||
- update
|
||||
@@ -7601,7 +7634,6 @@ rules:
|
||||
- virtualmachineinstances
|
||||
- virtualmachineinstancepresets
|
||||
- virtualmachineinstancereplicasets
|
||||
- virtualmachineinstancemigrations
|
||||
verbs:
|
||||
- get
|
||||
- delete
|
||||
@@ -7610,6 +7642,14 @@ rules:
|
||||
- patch
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kubevirt.io
|
||||
resources:
|
||||
- virtualmachineinstancemigrations
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- snapshot.kubevirt.io
|
||||
resources:
|
||||
@@ -7788,6 +7828,25 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- subresources.kubevirt.io
|
||||
resources:
|
||||
- virtualmachines/migrate
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- kubevirt.io
|
||||
resources:
|
||||
- virtualmachineinstancemigrations
|
||||
verbs:
|
||||
- get
|
||||
- delete
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- list
|
||||
- watch
|
||||
- deletecollection
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
@@ -7833,6 +7892,8 @@ spec:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
openshift.io/required-scc: restricted-v2
|
||||
labels:
|
||||
kubevirt.io: virt-operator
|
||||
name: virt-operator
|
||||
@@ -7861,14 +7922,14 @@ spec:
|
||||
- virt-operator
|
||||
env:
|
||||
- name: VIRT_OPERATOR_IMAGE
|
||||
value: quay.io/kubevirt/virt-operator:v1.4.0
|
||||
value: quay.io/kubevirt/virt-operator:v1.5.2
|
||||
- name: WATCH_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.annotations['olm.targetNamespaces']
|
||||
- name: KUBEVIRT_VERSION
|
||||
value: v1.4.0
|
||||
image: quay.io/kubevirt/virt-operator:v1.4.0
|
||||
value: v1.5.2
|
||||
image: quay.io/kubevirt/virt-operator:v1.5.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: virt-operator
|
||||
ports:
|
||||
|
||||
@@ -3,8 +3,8 @@ name: piraeus
|
||||
description: |
|
||||
The Piraeus Operator manages software defined storage clusters using LINSTOR in Kubernetes.
|
||||
type: application
|
||||
version: 2.8.1
|
||||
appVersion: "v2.8.1"
|
||||
version: 2.9.0
|
||||
appVersion: "v2.9.0"
|
||||
maintainers:
|
||||
- name: Piraeus Datastore
|
||||
url: https://piraeus.io
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user