mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-02-05 00:15:51 +00:00
Compare commits
1 Commits
v0.25.0
...
talos-firm
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bcd3c5c356 |
1
Makefile
1
Makefile
@@ -36,7 +36,6 @@ assets:
|
||||
make -C packages/core/installer/ assets
|
||||
|
||||
test:
|
||||
test -f _out/assets/nocloud-amd64.raw.xz || make -C packages/core/installer talos-nocloud
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
make -C packages/core/testing test-applications
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1602
dashboards/control-plane/kube-etcd3.json
Normal file
1602
dashboards/control-plane/kube-etcd3.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,7 @@ fix_d8() {
|
||||
}
|
||||
|
||||
swap_pvc_overview() {
|
||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||
jq '(.panels[] | select(.title=="PVC Detailed") | .panels[] | select(.title=="Overview")) as $a | del(.panels[] | select(.title=="PVC Detailed").panels[] | select(.title=="Overview")) | ( (.panels[] | select(.title=="PVC Detailed"))) as $b | del( .panels[] | select(.title=="PVC Detailed")) | (.panels[.panels|length]=($a|.gridPos.y=$b.gridPos.y)) | (.panels[.panels|length]=($b|.gridPos.y=$a.gridPos.y))'
|
||||
}
|
||||
|
||||
deprectaed_remove_faq() {
|
||||
@@ -68,7 +68,7 @@ modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/namespace/
|
||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhost_detail.json
|
||||
modules/402-ingress-nginx/monitoring/grafana-dashboards/ingress-nginx/vhost/vhosts.json
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/control-plane-status.json
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd.json #TODO
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/kube-etcd3.json #TODO
|
||||
modules/340-monitoring-kubernetes-control-plane/monitoring/grafana-dashboards/kubernetes-cluster/deprecated-resources.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/ntp.json #TODO
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//kubernetes-cluster/nodes/nodes.json
|
||||
@@ -78,8 +78,6 @@ modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/pod.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespaces.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/namespace/namespace.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//main/capacity-planning/capacity-planning.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-control-plane.json
|
||||
modules/340-monitoring-kubernetes/monitoring/grafana-dashboards//flux/flux-stats.json
|
||||
EOT
|
||||
|
||||
|
||||
@@ -111,3 +109,4 @@ done <<\EOT
|
||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-namespaces.json
|
||||
https://raw.githubusercontent.com/dotdc/grafana-dashboards-kubernetes/master/dashboards/k8s-views-pods.json
|
||||
EOT
|
||||
|
||||
|
||||
@@ -68,7 +68,7 @@ spec:
|
||||
serviceAccountName: cozystack
|
||||
containers:
|
||||
- name: cozystack
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.25.0"
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.23.1"
|
||||
env:
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: localhost
|
||||
@@ -86,12 +86,13 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: assets
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.25.0"
|
||||
- name: darkhttpd
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.23.1"
|
||||
command:
|
||||
- /usr/bin/cozystack-assets-server
|
||||
- "-dir=/cozystack/assets"
|
||||
- "-address=:8123"
|
||||
- /usr/bin/darkhttpd
|
||||
- /cozystack/assets
|
||||
- --port
|
||||
- "8123"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8123
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:f30e9bd4b8a2a4362b88ac8974b12ed1ded631c4eeea87831bb446bb302b2e29
|
||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:a3c25199acb8e8426e6952658ccc4acaadb50fe2cfa6359743b64e5166b3fc70
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.15.0@sha256:73701e37727eedaafdf9efe4baefcf0835f064ee8731219f0c0186c0d0781a5c
|
||||
ghcr.io/aenix-io/cozystack/cluster-autoscaler:0.15.0@sha256:538ee308f16c9e627ed16ee7c4aaa65919c2e6c4c2778f964a06e4797610d1cd
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.15.0@sha256:5efee8f76d0948936255c4cabad58057523502f77ceb40216ffae7e02841f85b
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-cloud-provider:0.15.0@sha256:7716c88947d13dc90ccfcc3e60bfdd6e6fa9b201339a75e9c84bf825c76e2b1f
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.15.0@sha256:daac6de8ad25de9d30eb5e928271cebaaa59193889231a54fd4f60e8a1c5f51a
|
||||
ghcr.io/aenix-io/cozystack/kubevirt-csi-driver:0.15.0@sha256:be5e0eef92dada3ace5cddda5c68b30c9fe4682774c5e6e938ed31efba11ebbf
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:7bacb7853e656b6149c3a38c5cd925891011eb94358d4d11e154082ab8b3a643
|
||||
ghcr.io/aenix-io/cozystack/ubuntu-container-disk:v1.30.1@sha256:8392f00a7182294ce6fd417d254f7c2aa09fb9203d829dec70344a8050369430
|
||||
|
||||
@@ -109,15 +109,13 @@ virtual-machine 0.4.0 4746d51
|
||||
virtual-machine 0.5.0 cad9cde
|
||||
virtual-machine 0.6.0 0e728870
|
||||
virtual-machine 0.7.0 af58018a
|
||||
virtual-machine 0.7.1 05857b95
|
||||
virtual-machine 0.8.0 HEAD
|
||||
virtual-machine 0.7.1 HEAD
|
||||
vm-disk 0.1.0 HEAD
|
||||
vm-instance 0.1.0 ced8e5b9
|
||||
vm-instance 0.2.0 4f767ee3
|
||||
vm-instance 0.3.0 0e728870
|
||||
vm-instance 0.4.0 af58018a
|
||||
vm-instance 0.4.1 05857b95
|
||||
vm-instance 0.5.0 HEAD
|
||||
vm-instance 0.4.1 HEAD
|
||||
vpn 0.1.0 f642698
|
||||
vpn 0.2.0 7151424
|
||||
vpn 0.3.0 a2bcf100
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.8.0
|
||||
version: 0.7.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.8.0"
|
||||
appVersion: "0.7.1"
|
||||
|
||||
@@ -8,4 +8,3 @@ generate:
|
||||
&& yq -i -o json ".properties.instanceProfile.optional=true | .properties.instanceProfile.enum = $${PREFERENCES}" values.schema.json
|
||||
yq -i -o json '.properties.externalPorts.items.type = "integer"' values.schema.json
|
||||
yq -i -o json '.properties.systemDisk.properties.image.enum = ["ubuntu", "cirros", "alpine", "fedora", "talos"]' values.schema.json
|
||||
yq -i -o json '.properties.externalMethod.enum = ["wholeIP", "PortList"]' values.schema.json
|
||||
|
||||
@@ -39,7 +39,6 @@ virtctl ssh <user>@<vm>
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
|
||||
@@ -6,10 +6,6 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
labels:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
annotations:
|
||||
networking.cozystack.io/wholeIP: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
externalTrafficPolicy: Local
|
||||
@@ -17,13 +13,9 @@ spec:
|
||||
selector:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
ports:
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
- port: 65535
|
||||
{{- else }}
|
||||
{{- range .Values.externalPorts }}
|
||||
- name: port-{{ . }}
|
||||
port: {{ . }}
|
||||
targetPort: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -7,15 +7,6 @@
|
||||
"description": "Enable external access from outside the cluster",
|
||||
"default": false
|
||||
},
|
||||
"externalMethod": {
|
||||
"type": "string",
|
||||
"description": "specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`",
|
||||
"default": "WholeIP",
|
||||
"enum": [
|
||||
"wholeIP",
|
||||
"PortList"
|
||||
]
|
||||
},
|
||||
"externalPorts": {
|
||||
"type": "array",
|
||||
"description": "Specify ports to forward from outside the cluster",
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param externalMethod specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`
|
||||
## @param externalPorts [array] Specify ports to forward from outside the cluster
|
||||
external: false
|
||||
externalMethod: WholeIP
|
||||
externalPorts:
|
||||
- 22
|
||||
|
||||
|
||||
@@ -17,10 +17,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.5.0
|
||||
version: 0.4.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.5.0"
|
||||
appVersion: "0.4.1"
|
||||
|
||||
@@ -8,4 +8,3 @@ generate:
|
||||
PREFERENCES=$$(yq e '.metadata.name' -o=json -r ../../system/kubevirt-instancetypes/templates/preferences.yaml | yq 'split(" ") | . + [""]' -o json) \
|
||||
&& yq -i -o json ".properties.instanceProfile.optional=true | .properties.instanceProfile.enum = $${PREFERENCES}" values.schema.json
|
||||
yq -i -o json '.properties.externalPorts.items.type = "integer"' values.schema.json
|
||||
yq -i -o json '.properties.externalMethod.enum = ["WholeIP", "PortList"]' values.schema.json
|
||||
|
||||
@@ -36,19 +36,18 @@ virtctl ssh <user>@<vm>
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalMethod` | specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList` | `WholeIP` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
||||
| `disks` | List of disks to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ---------------------------------------------------------------------------------- | ---------------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `externalPorts` | Specify ports to forward from outside the cluster | `[]` |
|
||||
| `running` | Determines if the virtual machine should be running | `true` |
|
||||
| `instanceType` | Virtual Machine instance type | `u1.medium` |
|
||||
| `instanceProfile` | Virtual Machine prefferences profile | `ubuntu` |
|
||||
| `disks` | List of disks to attach | `[]` |
|
||||
| `resources.cpu` | The number of CPU cores allocated to the virtual machine | `""` |
|
||||
| `resources.memory` | The amount of memory allocated to the virtual machine | `""` |
|
||||
| `sshKeys` | List of SSH public keys for authentication. Can be a single key or a list of keys. | `[]` |
|
||||
| `cloudInit` | cloud-init user data config. See cloud-init documentation for more details. | `#cloud-config
|
||||
` |
|
||||
|
||||
## U Series
|
||||
|
||||
@@ -6,10 +6,6 @@ metadata:
|
||||
name: {{ include "virtual-machine.fullname" . }}
|
||||
labels:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
annotations:
|
||||
networking.cozystack.io/wholeIP: "true"
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
externalTrafficPolicy: Local
|
||||
@@ -17,13 +13,9 @@ spec:
|
||||
selector:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
ports:
|
||||
{{- if eq .Values.externalMethod "WholeIP" }}
|
||||
- port: 65535
|
||||
{{- else }}
|
||||
{{- range .Values.externalPorts }}
|
||||
- name: port-{{ . }}
|
||||
port: {{ . }}
|
||||
targetPort: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -12,7 +12,7 @@ metadata:
|
||||
labels:
|
||||
{{- include "virtual-machine.labels" . | nindent 4 }}
|
||||
spec:
|
||||
running: {{ .Values.running }}
|
||||
running: {{ .Values.running | default "true" }}
|
||||
{{- with .Values.instanceType }}
|
||||
instancetype:
|
||||
kind: VirtualMachineClusterInstancetype
|
||||
|
||||
@@ -7,15 +7,6 @@
|
||||
"description": "Enable external access from outside the cluster",
|
||||
"default": false
|
||||
},
|
||||
"externalMethod": {
|
||||
"type": "string",
|
||||
"description": "specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`",
|
||||
"default": "WholeIP",
|
||||
"enum": [
|
||||
"WholeIP",
|
||||
"PortList"
|
||||
]
|
||||
},
|
||||
"externalPorts": {
|
||||
"type": "array",
|
||||
"description": "Specify ports to forward from outside the cluster",
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param externalMethod specify method to passthrough the traffic to the virtual machine. Allowed values: `WholeIP` and `PortList`
|
||||
## @param externalPorts [array] Specify ports to forward from outside the cluster
|
||||
external: false
|
||||
externalMethod: WholeIP
|
||||
externalPorts:
|
||||
- 22
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
talos:
|
||||
imager:
|
||||
image: ghcr.io/siderolabs/imager:v1.9.3
|
||||
image: ghcr.io/siderolabs/imager:v1.9.2
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.2
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.2
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
@@ -19,8 +19,8 @@ input:
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.2
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.2
|
||||
output:
|
||||
kind: initramfs
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.2
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.2
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
@@ -19,8 +19,8 @@ input:
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.2
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.2
|
||||
output:
|
||||
kind: installer
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.2
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.2
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
@@ -19,8 +19,8 @@ input:
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.2
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.2
|
||||
output:
|
||||
kind: iso
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.2
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.2
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
@@ -19,8 +19,8 @@ input:
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.2
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.2
|
||||
output:
|
||||
kind: kernel
|
||||
imageOptions: {}
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
arch: amd64
|
||||
platform: metal
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.2
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.2
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
@@ -19,8 +19,8 @@ input:
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.2
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.2
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
arch: amd64
|
||||
platform: nocloud
|
||||
secureboot: false
|
||||
version: v1.9.3
|
||||
version: v1.9.2
|
||||
input:
|
||||
kernel:
|
||||
path: /usr/install/amd64/vmlinuz
|
||||
initramfs:
|
||||
path: /usr/install/amd64/initramfs.xz
|
||||
baseInstaller:
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.3
|
||||
imageRef: ghcr.io/siderolabs/installer:v1.9.2
|
||||
systemExtensions:
|
||||
- imageRef: ghcr.io/siderolabs/amd-ucode:20250109
|
||||
- imageRef: ghcr.io/siderolabs/amdgpu-firmware:20241110
|
||||
@@ -19,8 +19,8 @@ input:
|
||||
- imageRef: ghcr.io/siderolabs/intel-ice-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/intel-ucode:20241112
|
||||
- imageRef: ghcr.io/siderolabs/qlogic-firmware:20250109
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.3
|
||||
- imageRef: ghcr.io/siderolabs/drbd:9.2.12-v1.9.2
|
||||
- imageRef: ghcr.io/siderolabs/zfs:2.2.7-v1.9.2
|
||||
output:
|
||||
kind: image
|
||||
imageOptions: { diskSize: 1306525696, diskFormat: raw }
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystack:
|
||||
image: ghcr.io/aenix-io/cozystack/cozystack:v0.25.0@sha256:86860fd1ddcd9709cb2a756409bc4983c44cd09b961d3b677469767fc93a7e36
|
||||
image: ghcr.io/aenix-io/cozystack/cozystack:v0.23.1@sha256:dfa803a3e02ec9ea221029d361aa9d7aef0b5eb0a36d66c949b265d4ac4fc114
|
||||
|
||||
@@ -31,13 +31,6 @@ releases:
|
||||
autoDirectNodeRoutes: true
|
||||
routingMode: native
|
||||
|
||||
- name: cozy-proxy
|
||||
releaseName: cozystack
|
||||
chart: cozy-cozy-proxy
|
||||
namespace: cozy-system
|
||||
optional: true
|
||||
dependsOn: [cilium]
|
||||
|
||||
- name: cert-manager-crds
|
||||
releaseName: cert-manager-crds
|
||||
chart: cozy-cert-manager-crds
|
||||
@@ -82,10 +75,6 @@ releases:
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [cilium,victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: true
|
||||
|
||||
- name: metallb
|
||||
releaseName: metallb
|
||||
|
||||
@@ -58,10 +58,6 @@ releases:
|
||||
privileged: true
|
||||
optional: true
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: true
|
||||
|
||||
- name: etcd-operator
|
||||
releaseName: etcd-operator
|
||||
|
||||
@@ -50,12 +50,6 @@ releases:
|
||||
SVC_CIDR: "{{ index $cozyConfig.data "ipv4-svc-cidr" }}"
|
||||
JOIN_CIDR: "{{ index $cozyConfig.data "ipv4-join-cidr" }}"
|
||||
|
||||
- name: cozy-proxy
|
||||
releaseName: cozystack
|
||||
chart: cozy-cozy-proxy
|
||||
namespace: cozy-system
|
||||
dependsOn: [cilium,kubeovn]
|
||||
|
||||
- name: cert-manager-crds
|
||||
releaseName: cert-manager-crds
|
||||
chart: cozy-cert-manager-crds
|
||||
@@ -103,10 +97,6 @@ releases:
|
||||
namespace: cozy-monitoring
|
||||
privileged: true
|
||||
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: true
|
||||
|
||||
- name: kubevirt-operator
|
||||
releaseName: kubevirt-operator
|
||||
|
||||
@@ -70,10 +70,6 @@ releases:
|
||||
namespace: cozy-monitoring
|
||||
privileged: true
|
||||
dependsOn: [victoria-metrics-operator]
|
||||
values:
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: true
|
||||
|
||||
- name: etcd-operator
|
||||
releaseName: etcd-operator
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
e2e:
|
||||
image: ghcr.io/aenix-io/cozystack/e2e-sandbox:v0.25.0@sha256:c9165c329fd2536e0342345792586d3e6f4007a6e6bd2cce133f17088fd3e785
|
||||
image: ghcr.io/aenix-io/cozystack/e2e-sandbox:v0.23.1@sha256:0f4ffa7f23d6cdc633c0c4a0b852fde9710edbce96486fd9bd29c7d0d7710380
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/matchbox:v0.25.0@sha256:ffcfc340f19c7c6bc50f49d75b41c802df9bddc71d9e6fdeb9b9ad7dc242a5d1
|
||||
ghcr.io/aenix-io/cozystack/matchbox:v0.23.1
|
||||
|
||||
@@ -3,4 +3,4 @@ name: etcd
|
||||
description: Storage for Kubernetes clusters
|
||||
icon: /logos/etcd.svg
|
||||
type: application
|
||||
version: 2.5.0
|
||||
version: 2.4.0
|
||||
|
||||
@@ -40,12 +40,6 @@ spec:
|
||||
labels:
|
||||
cozystack.io/service: etcd
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 2381
|
||||
protocol: TCP
|
||||
topologySpreadConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VMPodScrape
|
||||
metadata:
|
||||
name: etcd-pod-scrape
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: metrics
|
||||
scheme: http
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: etcd
|
||||
@@ -1,132 +0,0 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PrometheusRule
|
||||
metadata:
|
||||
name: etcd-rules
|
||||
spec:
|
||||
groups:
|
||||
- name: etcd
|
||||
rules:
|
||||
- alert: etcdInsufficientMembers
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}': insufficient members '{{`{{ $value }}`}}'."
|
||||
expr: |
|
||||
sum(up{job=~".*etcd.*"} == bool 1) by (job) < ((count(up{job=~".*etcd.*"}) by (job) + 1) / 2)
|
||||
for: 3m
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- alert: etcdNoLeader
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}': member '{{`{{ $labels.instance }}`}}' has no leader."
|
||||
expr: |
|
||||
etcd_server_has_leader{job=~".*etcd.*"} == 0
|
||||
for: 1m
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- alert: etcdHighNumberOfLeaderChanges
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}': instance '{{`{{ $labels.instance }}`}}' has seen '{{`{{ $value }}`}}' leader changes within the last hour."
|
||||
expr: |
|
||||
rate(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}[15m]) > 3
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- alert: etcdHighNumberOfFailedGRPCRequests
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}': '{{`{{ $value }}`}}' of requests for '{{`{{ $labels.grpc_method }}`}}' failed on etcd instance '{{`{{ $labels.instance }}`}}'."
|
||||
expr: |
|
||||
100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method)
|
||||
/
|
||||
sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method)
|
||||
> 1
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- alert: etcdHighNumberOfFailedGRPCRequests
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}': '{{`{{ $value }}`}}' of requests for '{{`{{ $labels.grpc_method }}`}}' failed on etcd instance '{{`{{ $labels.instance }}`}}'."
|
||||
expr: |
|
||||
100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method)
|
||||
/
|
||||
sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method)
|
||||
> 5
|
||||
for: 5m
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- alert: etcdGRPCRequestsSlow
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}': gRPC requests to '{{`{{ $labels.grpc_method }}`}}' are taking '{{`{{ $value }}`}}' on etcd instance '{{`{{ $labels.instance }}`}}'."
|
||||
expr: |
|
||||
histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_type="unary"}[5m])) by (job, instance, grpc_service, grpc_method, le))
|
||||
> 0.15
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- alert: etcdMemberCommunicationSlow
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}': member communication with '{{`{{ $labels.To }}`}}' is taking '{{`{{ $value }}`}}' on etcd instance '{{`{{ $labels.instance }}`}}'."
|
||||
expr: |
|
||||
histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m]))
|
||||
> 0.15
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- alert: etcdHighNumberOfFailedProposals
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}': '{{`{{ $value }}`}}' proposal failures within the last hour on etcd instance '{{`{{ $labels.instance }}`}}'."
|
||||
expr: |
|
||||
rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5
|
||||
for: 15m
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- alert: etcdHighNumberOfFailedHTTPRequests
|
||||
annotations:
|
||||
summary: "'{{`{{ $value }}`}}' of requests for '{{`{{ $labels.method }}`}}' failed on etcd instance '{{`{{ $labels.instance }}`}}'."
|
||||
expr: |
|
||||
sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) BY (method) > 0.01
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- alert: etcdHighNumberOfFailedHTTPRequests
|
||||
annotations:
|
||||
summary: "'{{`{{ $value }}`}}' of requests for '{{`{{ $labels.method }}`}}' failed on etcd instance '{{`{{ $labels.instance }}`}}'."
|
||||
expr: |
|
||||
sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) BY (method) > 0.05
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- alert: etcdHTTPRequestsSlow
|
||||
annotations:
|
||||
summary: "etcd instance '{{`{{ $labels.instance }}`}}' HTTP requests to '{{`{{ $labels.method }}`}}' are slow."
|
||||
expr: |
|
||||
histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[5m]))
|
||||
> 0.15
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- alert: etcdMembersDown
|
||||
annotations:
|
||||
summary: "etcd cluster '{{`{{ $labels.job }}`}}' members are down."
|
||||
description: 'etcd cluster "{{`{{ $labels.job }}`}}": members are down {{`{{ $value }}`}}.'
|
||||
expr: |
|
||||
max without (endpoint) (
|
||||
sum without (instance, pod) (up{job=~".*etcd.*"} == bool 0)
|
||||
or
|
||||
count without (To) (
|
||||
sum without (instance, pod) (rate(etcd_network_peer_sent_failures_total{job=~".*etcd.*"}[120s])) > 0.01
|
||||
)
|
||||
)
|
||||
> 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: critical
|
||||
@@ -30,7 +30,5 @@ main/nodes
|
||||
control-plane/control-plane-status
|
||||
control-plane/deprecated-resources
|
||||
control-plane/dns-coredns
|
||||
control-plane/kube-etcd
|
||||
control-plane/kube-etcd3
|
||||
kubevirt/kubevirt-control-plane
|
||||
flux/flux-control-plane
|
||||
flux/flux-stats
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/grafana:1.8.0@sha256:0377abd3cb2c6e27b12ac297f1859aa4d550f1aa14989f824f2315d0dfd1a5b2
|
||||
ghcr.io/aenix-io/cozystack/grafana:latest@sha256:0377abd3cb2c6e27b12ac297f1859aa4d550f1aa14989f824f2315d0dfd1a5b2
|
||||
|
||||
@@ -5,8 +5,7 @@ etcd 2.0.1 6fc1cc7d
|
||||
etcd 2.1.0 2b00fcf8
|
||||
etcd 2.2.0 5ca8823
|
||||
etcd 2.3.0 b908400d
|
||||
etcd 2.4.0 cb7b8158
|
||||
etcd 2.5.0 HEAD
|
||||
etcd 2.4.0 HEAD
|
||||
ingress 1.0.0 f642698
|
||||
ingress 1.1.0 838bee5d
|
||||
ingress 1.2.0 ced8e5b
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
apiVersion: v2
|
||||
name: cozy-bootbox
|
||||
name: cozy-smee
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/s3manager:v0.5.0@sha256:e4a4ab82290e6fc940beb799d5c15c7ed76f9752d3fce21fae6c2596a5da20b0
|
||||
ghcr.io/aenix-io/cozystack/s3manager:v0.5.0@sha256:35e9a8ba7e1a3b0cee634f6d2bd92d2b08c47c7ed3316559c9ea25ff733eb5d5
|
||||
|
||||
@@ -79,7 +79,7 @@ annotations:
|
||||
Pod IP Pool\n description: |\n CiliumPodIPPool defines an IP pool that can
|
||||
be used for pooled IPAM (i.e. the multi-pool IPAM mode).\n"
|
||||
apiVersion: v2
|
||||
appVersion: 1.16.6
|
||||
appVersion: 1.16.5
|
||||
description: eBPF-based Networking, Security, and Observability
|
||||
home: https://cilium.io/
|
||||
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
|
||||
@@ -95,4 +95,4 @@ kubeVersion: '>= 1.21.0-0'
|
||||
name: cilium
|
||||
sources:
|
||||
- https://github.com/cilium/cilium
|
||||
version: 1.16.6
|
||||
version: 1.16.5
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# cilium
|
||||
|
||||
 
|
||||
 
|
||||
|
||||
Cilium is open source software for providing and transparently securing
|
||||
network connectivity and loadbalancing between application workloads such as
|
||||
@@ -83,7 +83,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
|
||||
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
|
||||
| authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:71b79694b71639e633452f57fd9de40595d524de308349218d9a6a144b40be02","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.36.1","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:d75b758a4fea99ffff4db799e16f853bbde8643671b5b72464a8ba94cbe3dbe3","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.36.1","useDigest":true}` | init container image of SPIRE agent and server |
|
||||
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
|
||||
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
|
||||
| authentication.mutual.spire.install.server.annotations | object | `{}` | SPIRE server annotations |
|
||||
@@ -182,7 +182,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
|
||||
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
|
||||
| clustermesh.apiserver.healthPort | int | `9880` | TCP port for the clustermesh-apiserver health API. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:ab2070ea48a52a55d961b81b7b5fbac7d40a3f428be9b1b6b9071d47f194456a","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.16.6","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.image | object | `{"digest":"sha256:37a7fdbef806b78ef63df9f1a9828fdddbf548d1f0e43b8eb10a6bdc8fa03958","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.16.5","useDigest":true}` | Clustermesh API server image. |
|
||||
| clustermesh.apiserver.kvstoremesh.enabled | bool | `true` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
|
||||
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
|
||||
@@ -353,7 +353,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| envoy.extraVolumes | list | `[]` | Additional envoy volumes. |
|
||||
| envoy.healthPort | int | `9878` | TCP port for the health API. |
|
||||
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
|
||||
| envoy.image | object | `{"digest":"sha256:a69dfe0e54b24b0ff747385c8feeae0612cfbcae97bfcc8ee42a773bb3f69c88","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.30.9-1737073743-40a016d11c0d863b772961ed0168eea6fe6b10a5","useDigest":true}` | Envoy container image. |
|
||||
| envoy.image | object | `{"digest":"sha256:709c08ade3d17d52da4ca2af33f431360ec26268d288d9a6cd1d98acc9a1dced","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.30.8-1733837904-eaae5aca0fb988583e5617170a65ac5aa51c0aa8","useDigest":true}` | Envoy container image. |
|
||||
| envoy.initialFetchTimeoutSeconds | int | `30` | Time in seconds after which the initial fetch on an xDS stream is considered timed out |
|
||||
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
|
||||
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
|
||||
@@ -485,7 +485,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
|
||||
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
|
||||
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:ca8dcaa5a81a37743b1397ba2221d16d5d63e4a47607584f1bf50a3b0882bf3b","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.16.6","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.image | object | `{"digest":"sha256:6cfae1d1afa566ba941f03d4d7e141feddd05260e5cd0a1509aba1890a45ef00","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.16.5","useDigest":true}` | Hubble-relay container image. |
|
||||
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
|
||||
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
|
||||
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
@@ -591,7 +591,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
|
||||
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd` or `kvstore`). |
|
||||
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
|
||||
| image | object | `{"digest":"sha256:1e0896b1c4c188b4812c7e0bed7ec3f5631388ca88325c1391a0ef9172c448da","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.16.6","useDigest":true}` | Agent container image. |
|
||||
| image | object | `{"digest":"sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.16.5","useDigest":true}` | Agent container image. |
|
||||
| imagePullSecrets | list | `[]` | Configure image pull secrets for pulling container images |
|
||||
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
|
||||
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
|
||||
@@ -718,7 +718,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| operator.hostNetwork | bool | `true` | HostNetwork setting |
|
||||
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
|
||||
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:0e3c7fbcb6bde9a247cd2dd3d25230e2859d40d2eb58aba6265a2aab216775a9","awsDigest":"sha256:d11ee1cfa3465defe2df7ec1c6e8a77bcaf280b44d2c61aa7496c58b29550f6d","azureDigest":"sha256:0a05d7aea760923897aabd715213ab11a706051673d41fab3874a37f897c1bdd","genericDigest":"sha256:13d32071d5a52c069fb7c35959a56009c6914439adc73e99e098917646d154fc","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.16.6","useDigest":true}` | cilium-operator image. |
|
||||
| operator.image | object | `{"alibabacloudDigest":"sha256:c0edf4c8d089e76d6565d3c57128b98bc6c73d14bb4590126ee746aeaedba5e0","awsDigest":"sha256:97e1fe0c2b522583033138eb10c170919d8de49d2788ceefdcff229a92210476","azureDigest":"sha256:265e2b78f572c76b523f91757083ea5f0b9b73b82f2d9714e5a8fb848e4048f9","genericDigest":"sha256:f7884848483bbcd7b1e0ccfd34ba4546f258b460cb4b7e2f06a1bcc96ef88039","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.16.5","useDigest":true}` | cilium-operator image. |
|
||||
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
|
||||
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
|
||||
@@ -768,7 +768,7 @@ contributors across the globe, there is almost always someone available to help.
|
||||
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
|
||||
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
|
||||
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
|
||||
| preflight.image | object | `{"digest":"sha256:1e0896b1c4c188b4812c7e0bed7ec3f5631388ca88325c1391a0ef9172c448da","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.16.6","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.image | object | `{"digest":"sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.16.5","useDigest":true}` | Cilium pre-flight image. |
|
||||
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
|
||||
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
|
||||
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
|
||||
|
||||
@@ -0,0 +1,471 @@
|
||||
{
|
||||
"node": {
|
||||
"id": "host~127.0.0.1~no-id~localdomain",
|
||||
"cluster": "ingress-cluster"
|
||||
},
|
||||
"staticResources": {
|
||||
"listeners": [
|
||||
{{- if .Values.envoy.prometheus.enabled }}
|
||||
{
|
||||
"name": "envoy-prometheus-metrics-listener",
|
||||
"address": {
|
||||
"socket_address": {
|
||||
"address": "0.0.0.0",
|
||||
"port_value": {{ .Values.envoy.prometheus.port }}
|
||||
}
|
||||
},
|
||||
"filter_chains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.http_connection_manager",
|
||||
"typed_config": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
|
||||
"stat_prefix": "envoy-prometheus-metrics-listener",
|
||||
"route_config": {
|
||||
"virtual_hosts": [
|
||||
{
|
||||
"name": "prometheus_metrics_route",
|
||||
"domains": [
|
||||
"*"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"name": "prometheus_metrics_route",
|
||||
"match": {
|
||||
"prefix": "/metrics"
|
||||
},
|
||||
"route": {
|
||||
"cluster": "/envoy-admin",
|
||||
"prefix_rewrite": "/stats/prometheus"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"http_filters": [
|
||||
{
|
||||
"name": "envoy.filters.http.router",
|
||||
"typed_config": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
||||
}
|
||||
}
|
||||
],
|
||||
"internal_address_config": {
|
||||
"cidr_ranges": [
|
||||
{
|
||||
"address_prefix": "10.0.0.0",
|
||||
"prefix_len": 8
|
||||
},
|
||||
{
|
||||
"address_prefix": "172.16.0.0",
|
||||
"prefix_len": 12
|
||||
},
|
||||
{
|
||||
"address_prefix": "192.168.0.0",
|
||||
"prefix_len": 16
|
||||
},
|
||||
{
|
||||
"address_prefix": "127.0.0.1",
|
||||
"prefix_len": 32
|
||||
},
|
||||
{
|
||||
"address_prefix": "::1",
|
||||
"prefix_len": 128
|
||||
}
|
||||
]
|
||||
},
|
||||
"stream_idle_timeout": "0s"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{{- end }}
|
||||
{{- if and .Values.envoy.debug.admin.enabled }}
|
||||
{
|
||||
"name": "envoy-admin-listener",
|
||||
"address": {
|
||||
"socket_address": {
|
||||
"address": {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }},
|
||||
"port_value": {{ .Values.envoy.debug.admin.port }}
|
||||
}
|
||||
},
|
||||
{{- if and .Values.ipv4.enabled .Values.ipv6.enabled }}
|
||||
"additional_addresses": [
|
||||
{
|
||||
"address": {
|
||||
"socket_address": {
|
||||
"address": "::1",
|
||||
"port_value": {{ .Values.envoy.debug.admin.port }}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
{{- end }}
|
||||
"filter_chains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.http_connection_manager",
|
||||
"typed_config": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
|
||||
"stat_prefix": "envoy-admin-listener",
|
||||
"route_config": {
|
||||
"virtual_hosts": [
|
||||
{
|
||||
"name": "admin_route",
|
||||
"domains": [
|
||||
"*"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"name": "admin_route",
|
||||
"match": {
|
||||
"prefix": "/"
|
||||
},
|
||||
"route": {
|
||||
"cluster": "/envoy-admin",
|
||||
"prefix_rewrite": "/"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"http_filters": [
|
||||
{
|
||||
"name": "envoy.filters.http.router",
|
||||
"typed_config": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
||||
}
|
||||
}
|
||||
],
|
||||
"internal_address_config": {
|
||||
"cidr_ranges": [
|
||||
{
|
||||
"address_prefix": "10.0.0.0",
|
||||
"prefix_len": 8
|
||||
},
|
||||
{
|
||||
"address_prefix": "172.16.0.0",
|
||||
"prefix_len": 12
|
||||
},
|
||||
{
|
||||
"address_prefix": "192.168.0.0",
|
||||
"prefix_len": 16
|
||||
},
|
||||
{
|
||||
"address_prefix": "127.0.0.1",
|
||||
"prefix_len": 32
|
||||
},
|
||||
{
|
||||
"address_prefix": "::1",
|
||||
"prefix_len": 128
|
||||
}
|
||||
]
|
||||
},
|
||||
"stream_idle_timeout": "0s"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{{- end }}
|
||||
{
|
||||
"name": "envoy-health-listener",
|
||||
"address": {
|
||||
"socket_address": {
|
||||
"address": {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }},
|
||||
"port_value": {{ .Values.envoy.healthPort }}
|
||||
}
|
||||
},
|
||||
{{- if and .Values.ipv4.enabled .Values.ipv6.enabled }}
|
||||
"additional_addresses": [
|
||||
{
|
||||
"address": {
|
||||
"socket_address": {
|
||||
"address": "::1",
|
||||
"port_value": {{ .Values.envoy.healthPort }}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
{{- end }}
|
||||
"filter_chains": [
|
||||
{
|
||||
"filters": [
|
||||
{
|
||||
"name": "envoy.filters.network.http_connection_manager",
|
||||
"typed_config": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
|
||||
"stat_prefix": "envoy-health-listener",
|
||||
"route_config": {
|
||||
"virtual_hosts": [
|
||||
{
|
||||
"name": "health",
|
||||
"domains": [
|
||||
"*"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"name": "health",
|
||||
"match": {
|
||||
"prefix": "/healthz"
|
||||
},
|
||||
"route": {
|
||||
"cluster": "/envoy-admin",
|
||||
"prefix_rewrite": "/ready"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"http_filters": [
|
||||
{
|
||||
"name": "envoy.filters.http.router",
|
||||
"typed_config": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
||||
}
|
||||
}
|
||||
],
|
||||
"internal_address_config": {
|
||||
"cidr_ranges": [
|
||||
{
|
||||
"address_prefix": "10.0.0.0",
|
||||
"prefix_len": 8
|
||||
},
|
||||
{
|
||||
"address_prefix": "172.16.0.0",
|
||||
"prefix_len": 12
|
||||
},
|
||||
{
|
||||
"address_prefix": "192.168.0.0",
|
||||
"prefix_len": 16
|
||||
},
|
||||
{
|
||||
"address_prefix": "127.0.0.1",
|
||||
"prefix_len": 32
|
||||
},
|
||||
{
|
||||
"address_prefix": "::1",
|
||||
"prefix_len": 128
|
||||
}
|
||||
]
|
||||
},
|
||||
"stream_idle_timeout": "0s"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"clusters": [
|
||||
{
|
||||
"name": "ingress-cluster",
|
||||
"type": "ORIGINAL_DST",
|
||||
"connectTimeout": "{{ .Values.envoy.connectTimeoutSeconds }}s",
|
||||
"lbPolicy": "CLUSTER_PROVIDED",
|
||||
"typedExtensionProtocolOptions": {
|
||||
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
||||
"commonHttpProtocolOptions": {
|
||||
"idleTimeout": "{{ .Values.envoy.idleTimeoutDurationSeconds }}s",
|
||||
"maxConnectionDuration": "{{ .Values.envoy.maxConnectionDurationSeconds }}s",
|
||||
"maxRequestsPerConnection": {{ .Values.envoy.maxRequestsPerConnection }}
|
||||
},
|
||||
"useDownstreamProtocolConfig": {}
|
||||
}
|
||||
},
|
||||
"cleanupInterval": "{{ .Values.envoy.connectTimeoutSeconds }}.500s"
|
||||
},
|
||||
{
|
||||
"name": "egress-cluster-tls",
|
||||
"type": "ORIGINAL_DST",
|
||||
"connectTimeout": "{{ .Values.envoy.connectTimeoutSeconds }}s",
|
||||
"lbPolicy": "CLUSTER_PROVIDED",
|
||||
"typedExtensionProtocolOptions": {
|
||||
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
||||
"commonHttpProtocolOptions": {
|
||||
"idleTimeout": "{{ .Values.envoy.idleTimeoutDurationSeconds }}s",
|
||||
"maxConnectionDuration": "{{ .Values.envoy.maxConnectionDurationSeconds }}s",
|
||||
"maxRequestsPerConnection": {{ .Values.envoy.maxRequestsPerConnection }}
|
||||
},
|
||||
"upstreamHttpProtocolOptions": {},
|
||||
"useDownstreamProtocolConfig": {}
|
||||
}
|
||||
},
|
||||
"cleanupInterval": "{{ .Values.envoy.connectTimeoutSeconds }}.500s",
|
||||
"transportSocket": {
|
||||
"name": "cilium.tls_wrapper",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "egress-cluster",
|
||||
"type": "ORIGINAL_DST",
|
||||
"connectTimeout": "{{ .Values.envoy.connectTimeoutSeconds }}s",
|
||||
"lbPolicy": "CLUSTER_PROVIDED",
|
||||
"typedExtensionProtocolOptions": {
|
||||
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
||||
"commonHttpProtocolOptions": {
|
||||
"idleTimeout": "{{ .Values.envoy.idleTimeoutDurationSeconds }}s",
|
||||
"maxConnectionDuration": "{{ .Values.envoy.maxConnectionDurationSeconds }}s",
|
||||
"maxRequestsPerConnection": {{ .Values.envoy.maxRequestsPerConnection }}
|
||||
},
|
||||
"useDownstreamProtocolConfig": {}
|
||||
}
|
||||
},
|
||||
"cleanupInterval": "{{ .Values.envoy.connectTimeoutSeconds }}.500s"
|
||||
},
|
||||
{
|
||||
"name": "ingress-cluster-tls",
|
||||
"type": "ORIGINAL_DST",
|
||||
"connectTimeout": "{{ .Values.envoy.connectTimeoutSeconds }}s",
|
||||
"lbPolicy": "CLUSTER_PROVIDED",
|
||||
"typedExtensionProtocolOptions": {
|
||||
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
||||
"commonHttpProtocolOptions": {
|
||||
"idleTimeout": "{{ .Values.envoy.idleTimeoutDurationSeconds }}s",
|
||||
"maxConnectionDuration": "{{ .Values.envoy.maxConnectionDurationSeconds }}s",
|
||||
"maxRequestsPerConnection": {{ .Values.envoy.maxRequestsPerConnection }}
|
||||
},
|
||||
"upstreamHttpProtocolOptions": {},
|
||||
"useDownstreamProtocolConfig": {}
|
||||
}
|
||||
},
|
||||
"cleanupInterval": "{{ .Values.envoy.connectTimeoutSeconds }}.500s",
|
||||
"transportSocket": {
|
||||
"name": "cilium.tls_wrapper",
|
||||
"typedConfig": {
|
||||
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "xds-grpc-cilium",
|
||||
"type": "STATIC",
|
||||
"connectTimeout": "{{ .Values.envoy.connectTimeoutSeconds }}s",
|
||||
"loadAssignment": {
|
||||
"clusterName": "xds-grpc-cilium",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"pipe": {
|
||||
"path": "/var/run/cilium/envoy/sockets/xds.sock"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"typedExtensionProtocolOptions": {
|
||||
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
|
||||
"explicitHttpConfig": {
|
||||
"http2ProtocolOptions": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "/envoy-admin",
|
||||
"type": "STATIC",
|
||||
"connectTimeout": "{{ .Values.envoy.connectTimeoutSeconds }}s",
|
||||
"loadAssignment": {
|
||||
"clusterName": "/envoy-admin",
|
||||
"endpoints": [
|
||||
{
|
||||
"lbEndpoints": [
|
||||
{
|
||||
"endpoint": {
|
||||
"address": {
|
||||
"pipe": {
|
||||
"path": "/var/run/cilium/envoy/sockets/admin.sock"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"dynamicResources": {
|
||||
"ldsConfig": {
|
||||
"initialFetchTimeout": "{{ .Values.envoy.initialFetchTimeoutSeconds }}s",
|
||||
"apiConfigSource": {
|
||||
"apiType": "GRPC",
|
||||
"transportApiVersion": "V3",
|
||||
"grpcServices": [
|
||||
{
|
||||
"envoyGrpc": {
|
||||
"clusterName": "xds-grpc-cilium"
|
||||
}
|
||||
}
|
||||
],
|
||||
"setNodeOnFirstMessageOnly": true
|
||||
},
|
||||
"resourceApiVersion": "V3"
|
||||
},
|
||||
"cdsConfig": {
|
||||
"initialFetchTimeout": "{{ .Values.envoy.initialFetchTimeoutSeconds }}s",
|
||||
"apiConfigSource": {
|
||||
"apiType": "GRPC",
|
||||
"transportApiVersion": "V3",
|
||||
"grpcServices": [
|
||||
{
|
||||
"envoyGrpc": {
|
||||
"clusterName": "xds-grpc-cilium"
|
||||
}
|
||||
}
|
||||
],
|
||||
"setNodeOnFirstMessageOnly": true
|
||||
},
|
||||
"resourceApiVersion": "V3"
|
||||
}
|
||||
},
|
||||
"bootstrapExtensions": [
|
||||
{
|
||||
"name": "envoy.bootstrap.internal_listener",
|
||||
"typed_config": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"
|
||||
}
|
||||
}
|
||||
],
|
||||
"overload_manager": {
|
||||
"resource_monitors": [
|
||||
{
|
||||
"name": "envoy.resource_monitors.global_downstream_max_connections",
|
||||
"typed_config": {
|
||||
"@type": "type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig",
|
||||
"max_active_downstream_connections": "50000"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"admin": {
|
||||
"address": {
|
||||
"pipe": {
|
||||
"path": "/var/run/cilium/envoy/sockets/admin.sock"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
node:
|
||||
id: "host~127.0.0.1~no-id~localdomain"
|
||||
cluster: "ingress-cluster"
|
||||
staticResources:
|
||||
listeners:
|
||||
{{- if .Values.envoy.prometheus.enabled }}
|
||||
- name: "envoy-prometheus-metrics-listener"
|
||||
address:
|
||||
socketAddress:
|
||||
address: "0.0.0.0"
|
||||
portValue: {{ .Values.envoy.prometheus.port }}
|
||||
filterChains:
|
||||
- filters:
|
||||
- name: "envoy.filters.network.http_connection_manager"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"
|
||||
statPrefix: "envoy-prometheus-metrics-listener"
|
||||
routeConfig:
|
||||
virtualHosts:
|
||||
- name: "prometheus_metrics_route"
|
||||
domains:
|
||||
- "*"
|
||||
routes:
|
||||
- name: "prometheus_metrics_route"
|
||||
match:
|
||||
prefix: "/metrics"
|
||||
route:
|
||||
cluster: "/envoy-admin"
|
||||
prefixRewrite: "/stats/prometheus"
|
||||
httpFilters:
|
||||
- name: "envoy.filters.http.router"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
||||
internalAddressConfig:
|
||||
cidrRanges:
|
||||
{{- if .Values.ipv4.enabled }}
|
||||
- addressPrefix: "10.0.0.0"
|
||||
prefixLen: 8
|
||||
- addressPrefix: "172.16.0.0"
|
||||
prefixLen: 12
|
||||
- addressPrefix: "192.168.0.0"
|
||||
prefixLen: 16
|
||||
- addressPrefix: "127.0.0.1"
|
||||
prefixLen: 32
|
||||
{{- end }}
|
||||
{{- if .Values.ipv6.enabled }}
|
||||
- addressPrefix: "::1"
|
||||
prefixLen: 128
|
||||
{{- end }}
|
||||
streamIdleTimeout: "0s"
|
||||
{{- end }}
|
||||
{{- if and .Values.envoy.debug.admin.enabled }}
|
||||
- name: "envoy-admin-listener"
|
||||
address:
|
||||
socketAddress:
|
||||
address: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
|
||||
portValue: {{ .Values.envoy.debug.admin.port }}
|
||||
{{- if and .Values.ipv4.enabled .Values.ipv6.enabled }}
|
||||
additionalAddresses:
|
||||
- address:
|
||||
socketAddress:
|
||||
address: "::1"
|
||||
portValue: {{ .Values.envoy.debug.admin.port }}
|
||||
{{- end }}
|
||||
filterChains:
|
||||
- filters:
|
||||
- name: "envoy.filters.network.http_connection_manager"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"
|
||||
statPrefix: "envoy-admin-listener"
|
||||
routeConfig:
|
||||
virtual_hosts:
|
||||
- name: "admin_route"
|
||||
domains:
|
||||
- "*"
|
||||
routes:
|
||||
- name: "admin_route"
|
||||
match:
|
||||
prefix: "/"
|
||||
route:
|
||||
cluster: "/envoy-admin"
|
||||
prefixRewrite: "/"
|
||||
httpFilters:
|
||||
- name: "envoy.filters.http.router"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
||||
internalAddressConfig:
|
||||
cidrRanges:
|
||||
{{- if .Values.ipv4.enabled }}
|
||||
- addressPrefix: "10.0.0.0"
|
||||
prefixLen: 8
|
||||
- addressPrefix: "172.16.0.0"
|
||||
prefixLen: 12
|
||||
- addressPrefix: "192.168.0.0"
|
||||
prefixLen: 16
|
||||
- addressPrefix: "127.0.0.1"
|
||||
prefixLen: 32
|
||||
{{- end }}
|
||||
{{- if .Values.ipv6.enabled }}
|
||||
- addressPrefix: "::1"
|
||||
prefixLen: 128
|
||||
{{- end }}
|
||||
streamIdleTimeout: "0s"
|
||||
{{- end }}
|
||||
- name: "envoy-health-listener"
|
||||
address:
|
||||
socketAddress:
|
||||
address: {{ .Values.ipv4.enabled | ternary "127.0.0.1" "::1" | quote }}
|
||||
portValue: {{ .Values.envoy.healthPort }}
|
||||
{{- if and .Values.ipv4.enabled .Values.ipv6.enabled }}
|
||||
additionalAddresses:
|
||||
- address:
|
||||
socketAddress:
|
||||
address: "::1"
|
||||
portValue: {{ .Values.envoy.healthPort }}
|
||||
{{- end }}
|
||||
filterChains:
|
||||
- filters:
|
||||
- name: "envoy.filters.network.http_connection_manager"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"
|
||||
statPrefix: "envoy-health-listener"
|
||||
routeConfig:
|
||||
virtual_hosts:
|
||||
- name: "health"
|
||||
domains:
|
||||
- "*"
|
||||
routes:
|
||||
- name: "health"
|
||||
match:
|
||||
prefix: "/healthz"
|
||||
route:
|
||||
cluster: "/envoy-admin"
|
||||
prefixRewrite: "/ready"
|
||||
httpFilters:
|
||||
- name: "envoy.filters.http.router"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
|
||||
internalAddressConfig:
|
||||
cidrRanges:
|
||||
{{- if .Values.ipv4.enabled }}
|
||||
- addressPrefix: "10.0.0.0"
|
||||
prefixLen: 8
|
||||
- addressPrefix: "172.16.0.0"
|
||||
prefixLen: 12
|
||||
- addressPrefix: "192.168.0.0"
|
||||
prefixLen: 16
|
||||
- addressPrefix: "127.0.0.1"
|
||||
prefixLen: 32
|
||||
{{- end }}
|
||||
{{- if .Values.ipv6.enabled }}
|
||||
- addressPrefix: "::1"
|
||||
prefixLen: 128
|
||||
{{- end }}
|
||||
streamIdleTimeout: "0s"
|
||||
clusters:
|
||||
- name: "ingress-cluster"
|
||||
type: "ORIGINAL_DST"
|
||||
connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s"
|
||||
lbPolicy: "CLUSTER_PROVIDED"
|
||||
typedExtensionProtocolOptions:
|
||||
envoy.extensions.upstreams.http.v3.HttpProtocolOptions:
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions"
|
||||
commonHttpProtocolOptions:
|
||||
idleTimeout: "{{ .Values.envoy.idleTimeoutDurationSeconds }}s"
|
||||
maxConnectionDuration: "{{ .Values.envoy.maxConnectionDurationSeconds }}s"
|
||||
maxRequestsPerConnection: {{ .Values.envoy.maxRequestsPerConnection }}
|
||||
useDownstreamProtocolConfig: {}
|
||||
cleanupInterval: "{{ .Values.envoy.connectTimeoutSeconds }}.500s"
|
||||
- name: "egress-cluster-tls"
|
||||
type: "ORIGINAL_DST"
|
||||
connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s"
|
||||
lbPolicy: "CLUSTER_PROVIDED"
|
||||
typedExtensionProtocolOptions:
|
||||
envoy.extensions.upstreams.http.v3.HttpProtocolOptions:
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions"
|
||||
commonHttpProtocolOptions:
|
||||
idleTimeout: "{{ .Values.envoy.idleTimeoutDurationSeconds }}s"
|
||||
maxConnectionDuration: "{{ .Values.envoy.maxConnectionDurationSeconds }}s"
|
||||
maxRequestsPerConnection: {{ .Values.envoy.maxRequestsPerConnection }}
|
||||
upstreamHttpProtocolOptions: {}
|
||||
useDownstreamProtocolConfig: {}
|
||||
cleanupInterval: "{{ .Values.envoy.connectTimeoutSeconds }}.500s"
|
||||
transportSocket:
|
||||
name: "cilium.tls_wrapper"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
|
||||
- name: "egress-cluster"
|
||||
type: "ORIGINAL_DST"
|
||||
connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s"
|
||||
lbPolicy: "CLUSTER_PROVIDED"
|
||||
typedExtensionProtocolOptions:
|
||||
envoy.extensions.upstreams.http.v3.HttpProtocolOptions:
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions"
|
||||
commonHttpProtocolOptions:
|
||||
idleTimeout: "{{ .Values.envoy.idleTimeoutDurationSeconds }}s"
|
||||
maxConnectionDuration: "{{ .Values.envoy.maxConnectionDurationSeconds }}s"
|
||||
maxRequestsPerConnection: {{ .Values.envoy.maxRequestsPerConnection }}
|
||||
useDownstreamProtocolConfig: {}
|
||||
cleanupInterval: "{{ .Values.envoy.connectTimeoutSeconds }}.500s"
|
||||
- name: "ingress-cluster-tls"
|
||||
type: "ORIGINAL_DST"
|
||||
connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s"
|
||||
lbPolicy: "CLUSTER_PROVIDED"
|
||||
typedExtensionProtocolOptions:
|
||||
envoy.extensions.upstreams.http.v3.HttpProtocolOptions:
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions"
|
||||
commonHttpProtocolOptions:
|
||||
idleTimeout: "{{ .Values.envoy.idleTimeoutDurationSeconds }}s"
|
||||
maxConnectionDuration: "{{ .Values.envoy.maxConnectionDurationSeconds }}s"
|
||||
maxRequestsPerConnection: {{ .Values.envoy.maxRequestsPerConnection }}
|
||||
upstreamHttpProtocolOptions: {}
|
||||
useDownstreamProtocolConfig: {}
|
||||
cleanupInterval: "{{ .Values.envoy.connectTimeoutSeconds }}.500s"
|
||||
transportSocket:
|
||||
name: "cilium.tls_wrapper"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
|
||||
- name: "xds-grpc-cilium"
|
||||
type: "STATIC"
|
||||
connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s"
|
||||
loadAssignment:
|
||||
clusterName: "xds-grpc-cilium"
|
||||
endpoints:
|
||||
- lbEndpoints:
|
||||
- endpoint:
|
||||
address:
|
||||
pipe:
|
||||
path: "/var/run/cilium/envoy/sockets/xds.sock"
|
||||
typedExtensionProtocolOptions:
|
||||
envoy.extensions.upstreams.http.v3.HttpProtocolOptions:
|
||||
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions"
|
||||
explicitHttpConfig:
|
||||
http2ProtocolOptions: {}
|
||||
- name: "/envoy-admin"
|
||||
type: "STATIC"
|
||||
connectTimeout: "{{ .Values.envoy.connectTimeoutSeconds }}s"
|
||||
loadAssignment:
|
||||
clusterName: "/envoy-admin"
|
||||
endpoints:
|
||||
- lbEndpoints:
|
||||
- endpoint:
|
||||
address:
|
||||
pipe:
|
||||
path: "/var/run/cilium/envoy/sockets/admin.sock"
|
||||
dynamicResources:
|
||||
ldsConfig:
|
||||
initialFetchTimeout: "{{ .Values.envoy.initialFetchTimeoutSeconds }}s"
|
||||
apiConfigSource:
|
||||
apiType: "GRPC"
|
||||
transportApiVersion: "V3"
|
||||
grpcServices:
|
||||
- envoyGrpc:
|
||||
clusterName: "xds-grpc-cilium"
|
||||
setNodeOnFirstMessageOnly: true
|
||||
resourceApiVersion: "V3"
|
||||
cdsConfig:
|
||||
initialFetchTimeout: "{{ .Values.envoy.initialFetchTimeoutSeconds }}s"
|
||||
apiConfigSource:
|
||||
apiType: "GRPC"
|
||||
transportApiVersion: "V3"
|
||||
grpcServices:
|
||||
- envoyGrpc:
|
||||
clusterName: "xds-grpc-cilium"
|
||||
setNodeOnFirstMessageOnly: true
|
||||
resourceApiVersion: "V3"
|
||||
bootstrapExtensions:
|
||||
- name: "envoy.bootstrap.internal_listener"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"
|
||||
overloadManager:
|
||||
resourceMonitors:
|
||||
- name: "envoy.resource_monitors.global_downstream_max_connections"
|
||||
typedConfig:
|
||||
"@type": "type.googleapis.com/envoy.extensions.resource_monitors.downstream_connections.v3.DownstreamConnectionsConfig"
|
||||
max_active_downstream_connections: "50000"
|
||||
admin:
|
||||
address:
|
||||
pipe:
|
||||
path: "/var/run/cilium/envoy/sockets/admin.sock"
|
||||
@@ -315,9 +315,13 @@ spec:
|
||||
{{- end}}
|
||||
- name: cilium-run
|
||||
mountPath: /var/run/cilium
|
||||
{{- /* mount the directory if socketLB.enabled is true and socketLB.terminatePodConnections is not explicitly set to false */ -}}
|
||||
{{- if or (and (kindIs "invalid" .Values.socketLB.terminatePodConnections) .Values.socketLB.enabled)
|
||||
(and .Values.socketLB.enabled .Values.socketLB.terminatePodConnections) }}
|
||||
- name: cilium-netns
|
||||
mountPath: /var/run/cilium/netns
|
||||
mountPropagation: HostToContainer
|
||||
{{- end}}
|
||||
- name: etc-cni-netd
|
||||
mountPath: {{ .Values.cni.hostConfDirMountPath }}
|
||||
{{- if .Values.etcd.enabled }}
|
||||
@@ -793,11 +797,14 @@ spec:
|
||||
hostPath:
|
||||
path: {{ .Values.daemon.runPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- if or (and (kindIs "invalid" .Values.socketLB.terminatePodConnections) .Values.socketLB.enabled)
|
||||
(and .Values.socketLB.enabled .Values.socketLB.terminatePodConnections) }}
|
||||
# To exec into pod network namespaces
|
||||
- name: cilium-netns
|
||||
hostPath:
|
||||
path: /var/run/netns
|
||||
type: DirectoryOrCreate
|
||||
{{- end }}
|
||||
{{- if .Values.bpf.autoMount.enabled }}
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- name: bpf-maps
|
||||
|
||||
@@ -513,10 +513,10 @@ data:
|
||||
subnet-ids-filter: {{ .Values.eni.subnetIDsFilter | join " " | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.eni.subnetTagsFilter }}
|
||||
subnet-tags-filter: {{ .Values.eni.subnetTagsFilter | join "," | quote }}
|
||||
subnet-tags-filter: {{ .Values.eni.subnetTagsFilter | join " " | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.eni.instanceTagsFilter }}
|
||||
instance-tags-filter: {{ .Values.eni.instanceTagsFilter | join "," | quote }}
|
||||
instance-tags-filter: {{ .Values.eni.instanceTagsFilter | join " " | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{ if .Values.eni.gcInterval }}
|
||||
@@ -718,6 +718,8 @@ data:
|
||||
{{- end }}
|
||||
{{- if hasKey $socketLB "terminatePodConnections" }}
|
||||
bpf-lb-sock-terminate-pod-connections: {{ $socketLB.terminatePodConnections | quote }}
|
||||
{{- else if hasKey $socketLB "enabled" }}
|
||||
bpf-lb-sock-terminate-pod-connections: {{ $socketLB.enabled | quote }}
|
||||
{{- end }}
|
||||
{{- if hasKey $socketLB "tracing" }}
|
||||
trace-sock: {{ $socketLB.tracing | quote }}
|
||||
|
||||
@@ -12,7 +12,6 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
data:
|
||||
# Keep the key name as bootstrap-config.json to avoid breaking changes
|
||||
bootstrap-config.json: |
|
||||
{{- (tpl (.Files.Get "files/cilium-envoy/configmap/bootstrap-config.yaml") .) | fromYaml | toJson | nindent 4 }}
|
||||
{{- (tpl (.Files.Glob "files/cilium-envoy/configmap/bootstrap-config.json").AsConfig .) | nindent 2 }}
|
||||
|
||||
{{- end }}
|
||||
|
||||
@@ -13,12 +13,24 @@ server {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
|
||||
# CORS
|
||||
add_header Access-Control-Allow-Methods "GET, POST, PUT, HEAD, DELETE, OPTIONS";
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
add_header Access-Control-Max-Age 1728000;
|
||||
add_header Access-Control-Expose-Headers content-length,grpc-status,grpc-message;
|
||||
add_header Access-Control-Allow-Headers range,keep-alive,user-agent,cache-control,content-type,content-transfer-encoding,x-accept-content-transfer-encoding,x-accept-response-streaming,x-user-agent,x-grpc-web,grpc-timeout;
|
||||
if ($request_method = OPTIONS) {
|
||||
return 204;
|
||||
}
|
||||
# /CORS
|
||||
|
||||
location {{ .Values.hubble.ui.baseUrl }}api {
|
||||
{{- if not (eq .Values.hubble.ui.baseUrl "/") }}
|
||||
rewrite ^{{ (trimSuffix "/" .Values.hubble.ui.baseUrl) }}(/.*)$ $1 break;
|
||||
{{- end }}
|
||||
proxy_http_version 1.1;
|
||||
proxy_pass_request_headers on;
|
||||
proxy_hide_header Access-Control-Allow-Origin;
|
||||
{{- if eq .Values.hubble.ui.baseUrl "/" }}
|
||||
proxy_pass http://127.0.0.1:8090;
|
||||
{{- else }}
|
||||
|
||||
@@ -153,10 +153,10 @@ image:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.16.6"
|
||||
tag: "v1.16.5"
|
||||
pullPolicy: "IfNotPresent"
|
||||
# cilium-digest
|
||||
digest: "sha256:1e0896b1c4c188b4812c7e0bed7ec3f5631388ca88325c1391a0ef9172c448da"
|
||||
digest: "sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d"
|
||||
useDigest: true
|
||||
# -- Affinity for cilium-agent.
|
||||
affinity:
|
||||
@@ -1314,9 +1314,9 @@ hubble:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/hubble-relay"
|
||||
tag: "v1.16.6"
|
||||
tag: "v1.16.5"
|
||||
# hubble-relay-digest
|
||||
digest: "sha256:ca8dcaa5a81a37743b1397ba2221d16d5d63e4a47607584f1bf50a3b0882bf3b"
|
||||
digest: "sha256:6cfae1d1afa566ba941f03d4d7e141feddd05260e5cd0a1509aba1890a45ef00"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- Specifies the resources for the hubble-relay pods
|
||||
@@ -2165,9 +2165,9 @@ envoy:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium-envoy"
|
||||
tag: "v1.30.9-1737073743-40a016d11c0d863b772961ed0168eea6fe6b10a5"
|
||||
tag: "v1.30.8-1733837904-eaae5aca0fb988583e5617170a65ac5aa51c0aa8"
|
||||
pullPolicy: "IfNotPresent"
|
||||
digest: "sha256:a69dfe0e54b24b0ff747385c8feeae0612cfbcae97bfcc8ee42a773bb3f69c88"
|
||||
digest: "sha256:709c08ade3d17d52da4ca2af33f431360ec26268d288d9a6cd1d98acc9a1dced"
|
||||
useDigest: true
|
||||
# -- Additional containers added to the cilium Envoy DaemonSet.
|
||||
extraContainers: []
|
||||
@@ -2480,15 +2480,15 @@ operator:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/operator"
|
||||
tag: "v1.16.6"
|
||||
tag: "v1.16.5"
|
||||
# operator-generic-digest
|
||||
genericDigest: "sha256:13d32071d5a52c069fb7c35959a56009c6914439adc73e99e098917646d154fc"
|
||||
genericDigest: "sha256:f7884848483bbcd7b1e0ccfd34ba4546f258b460cb4b7e2f06a1bcc96ef88039"
|
||||
# operator-azure-digest
|
||||
azureDigest: "sha256:0a05d7aea760923897aabd715213ab11a706051673d41fab3874a37f897c1bdd"
|
||||
azureDigest: "sha256:265e2b78f572c76b523f91757083ea5f0b9b73b82f2d9714e5a8fb848e4048f9"
|
||||
# operator-aws-digest
|
||||
awsDigest: "sha256:d11ee1cfa3465defe2df7ec1c6e8a77bcaf280b44d2c61aa7496c58b29550f6d"
|
||||
awsDigest: "sha256:97e1fe0c2b522583033138eb10c170919d8de49d2788ceefdcff229a92210476"
|
||||
# operator-alibabacloud-digest
|
||||
alibabacloudDigest: "sha256:0e3c7fbcb6bde9a247cd2dd3d25230e2859d40d2eb58aba6265a2aab216775a9"
|
||||
alibabacloudDigest: "sha256:c0edf4c8d089e76d6565d3c57128b98bc6c73d14bb4590126ee746aeaedba5e0"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
suffix: ""
|
||||
@@ -2762,9 +2762,9 @@ preflight:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/cilium"
|
||||
tag: "v1.16.6"
|
||||
tag: "v1.16.5"
|
||||
# cilium-digest
|
||||
digest: "sha256:1e0896b1c4c188b4812c7e0bed7ec3f5631388ca88325c1391a0ef9172c448da"
|
||||
digest: "sha256:758ca0793f5995bb938a2fa219dcce63dc0b3fa7fc4ce5cc851125281fb7361d"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- The priority class to use for the preflight pod.
|
||||
@@ -2911,9 +2911,9 @@ clustermesh:
|
||||
# @schema
|
||||
override: ~
|
||||
repository: "quay.io/cilium/clustermesh-apiserver"
|
||||
tag: "v1.16.6"
|
||||
tag: "v1.16.5"
|
||||
# clustermesh-apiserver-digest
|
||||
digest: "sha256:ab2070ea48a52a55d961b81b7b5fbac7d40a3f428be9b1b6b9071d47f194456a"
|
||||
digest: "sha256:37a7fdbef806b78ef63df9f1a9828fdddbf548d1f0e43b8eb10a6bdc8fa03958"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# -- TCP port for the clustermesh-apiserver health API.
|
||||
@@ -3412,7 +3412,7 @@ authentication:
|
||||
override: ~
|
||||
repository: "docker.io/library/busybox"
|
||||
tag: "1.36.1"
|
||||
digest: "sha256:71b79694b71639e633452f57fd9de40595d524de308349218d9a6a144b40be02"
|
||||
digest: "sha256:d75b758a4fea99ffff4db799e16f853bbde8643671b5b72464a8ba94cbe3dbe3"
|
||||
useDigest: true
|
||||
pullPolicy: "IfNotPresent"
|
||||
# SPIRE agent configuration
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
ARG VERSION=v1.16.6
|
||||
ARG VERSION=v1.16.5
|
||||
FROM quay.io/cilium/cilium:${VERSION}
|
||||
|
||||
@@ -12,7 +12,7 @@ cilium:
|
||||
mode: "kubernetes"
|
||||
image:
|
||||
repository: ghcr.io/aenix-io/cozystack/cilium
|
||||
tag: 1.16.6
|
||||
digest: "sha256:cf64df62897b071d5a9a005564ecbfb9124aa82a96957e329ce28a187864f113"
|
||||
tag: 1.16.5
|
||||
digest: "sha256:eae9d5531c115f8946990a731bfaaebc905b020a2957559b3c9f2ce1c655a834"
|
||||
envoy:
|
||||
enabled: false
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: cozy-cozy-proxy
|
||||
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process
|
||||
@@ -1,11 +0,0 @@
|
||||
NAME=cozy-proxy
|
||||
NAMESPACE=cozy-system
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
include ../../../scripts/package.mk
|
||||
|
||||
update:
|
||||
rm -rf charts
|
||||
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/aenix-io/cozy-proxy | awk -F'[/^]' 'END{print $$3}') && \
|
||||
curl -sSL https://github.com/aenix-io/cozy-proxy/archive/refs/tags/$${tag}.tar.gz | \
|
||||
tar xzvf - --strip 1 cozy-proxy-$${tag#*v}/charts
|
||||
@@ -1,6 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: cozy-proxy
|
||||
description: A simple kube-proxy addon for 1:1 NAT services in Kubernetes using an NFT backend
|
||||
type: application
|
||||
version: 0.1.0
|
||||
appVersion: 0.1.0
|
||||
@@ -1,24 +0,0 @@
|
||||
{{- define "cozy-proxy.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "cozy-proxy.fullname" -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- if eq .Release.Name $name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "cozy-proxy.labels" -}}
|
||||
helm.sh/chart: {{ include "cozy-proxy.name" . }}-{{ .Chart.Version | replace "+" "_" }}
|
||||
app.kubernetes.io/name: {{ include "cozy-proxy.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
@@ -1,27 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: {{ include "cozy-proxy.fullname" . }}
|
||||
labels:
|
||||
{{- include "cozy-proxy.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "cozy-proxy.name" . }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "cozy-proxy.name" . }}
|
||||
annotations:
|
||||
{{- toYaml .Values.daemonset.podAnnotations | nindent 8 }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "cozy-proxy.fullname" . }}
|
||||
hostNetwork: {{ .Values.daemonset.hostNetwork }}
|
||||
containers:
|
||||
- name: cozy-proxy
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["NET_ADMIN"]
|
||||
@@ -1,12 +0,0 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "cozy-proxy.fullname" . }}
|
||||
labels:
|
||||
{{- include "cozy-proxy.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "endpoints"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
{{- end }}
|
||||
@@ -1,16 +0,0 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "cozy-proxy.fullname" . }}
|
||||
labels:
|
||||
{{- include "cozy-proxy.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "cozy-proxy.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "cozy-proxy.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
@@ -1,8 +0,0 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "cozy-proxy.fullname" . }}
|
||||
labels:
|
||||
{{- include "cozy-proxy.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
@@ -1,12 +0,0 @@
|
||||
image:
|
||||
repository: ghcr.io/aenix-io/cozystack/cozy-proxy
|
||||
tag: v0.1.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
daemonset:
|
||||
hostNetwork: true
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
rbac:
|
||||
create: true
|
||||
@@ -1,2 +0,0 @@
|
||||
cozy-proxy:
|
||||
fullnameOverride: cozy-proxy
|
||||
@@ -1,2 +1,2 @@
|
||||
cozystackAPI:
|
||||
image: ghcr.io/aenix-io/cozystack/cozystack-api:v0.25.0@sha256:513c49f8e2c1e2489faff03aacfcd609324ed5dc31ac594f9dd8a6feec0e7884
|
||||
image: ghcr.io/aenix-io/cozystack/cozystack-api:v0.23.1@sha256:b25faba99a8b98c1d3576b47986266c4f391c1998d89b599e9139f43727c5b4c
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
cozystackController:
|
||||
image: ghcr.io/aenix-io/cozystack/cozystack-controller:v0.25.0@sha256:9fe9369a88f7bf317bf84cf05fe733f50879467bf7b2ab58f50d914ff1b253cc
|
||||
image: ghcr.io/aenix-io/cozystack/cozystack-controller:v0.23.1@sha256:ca7801e33fbd38e01b3abe9645956bb235ba7b0f2381bd622d18d4dc5e280020
|
||||
debug: false
|
||||
disableTelemetry: false
|
||||
cozystackVersion: "v0.25.0"
|
||||
cozystackVersion: "v0.23.1"
|
||||
|
||||
@@ -76,7 +76,7 @@ data:
|
||||
"kubeappsNamespace": {{ .Release.Namespace | quote }},
|
||||
"helmGlobalNamespace": {{ include "kubeapps.helmGlobalPackagingNamespace" . | quote }},
|
||||
"carvelGlobalNamespace": {{ .Values.kubeappsapis.pluginConfig.kappController.packages.v1alpha1.globalPackagingNamespace | quote }},
|
||||
"appVersion": "v0.25.0",
|
||||
"appVersion": "v0.23.1",
|
||||
"authProxyEnabled": {{ .Values.authProxy.enabled }},
|
||||
"oauthLoginURI": {{ .Values.authProxy.oauthLoginURI | quote }},
|
||||
"oauthLogoutURI": {{ .Values.authProxy.oauthLogoutURI | quote }},
|
||||
|
||||
@@ -40,14 +40,14 @@ kubeapps:
|
||||
image:
|
||||
registry: ghcr.io/aenix-io/cozystack
|
||||
repository: dashboard
|
||||
tag: v0.25.0
|
||||
tag: v0.23.1
|
||||
digest: "sha256:81e7b625c667bce5fc339eb97c8e115eafb82f66df4501550b3677ac53f6e234"
|
||||
kubeappsapis:
|
||||
image:
|
||||
registry: ghcr.io/aenix-io/cozystack
|
||||
repository: kubeapps-apis
|
||||
tag: v0.25.0
|
||||
digest: "sha256:72308ae00344d48e7ed58c5b1383874e84bcd82ac53b76857172b9ef510d53a6"
|
||||
tag: v0.23.1
|
||||
digest: "sha256:d3767354cf6c785447f30e87bb2017ec45843edfc02635f526d2ecacc82f5d26"
|
||||
pluginConfig:
|
||||
flux:
|
||||
packages:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
dependencies:
|
||||
- name: kamaji-etcd
|
||||
repository: https://clastix.github.io/charts
|
||||
version: 0.8.0
|
||||
digest: sha256:525b0eb2b5bae709d62de9328312d42c54b5219c6df67061de0da79eeca04fb3
|
||||
generated: "2024-08-25T08:44:24.92211307+02:00"
|
||||
version: 0.8.1
|
||||
digest: sha256:381d8ef9619c2daeea37e40c6a9772ae3e5cee80887148879db04e887d5364ad
|
||||
generated: "2024-10-25T19:28:40.880766186+02:00"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v2
|
||||
appVersion: v1.0.0
|
||||
appVersion: v0.0.0
|
||||
description: Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
home: https://github.com/clastix/kamaji
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
|
||||
@@ -17,11 +17,11 @@ name: kamaji
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji
|
||||
type: application
|
||||
version: 2.0.0
|
||||
version: 0.0.0
|
||||
dependencies:
|
||||
- name: kamaji-etcd
|
||||
repository: https://clastix.github.io/charts
|
||||
version: ">=0.7.0"
|
||||
version: ">=0.8.1"
|
||||
condition: kamaji-etcd.deploy
|
||||
annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# kamaji
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
|
||||
@@ -22,7 +22,7 @@ Kubernetes: `>=1.21.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://clastix.github.io/charts | kamaji-etcd | >=0.7.0 |
|
||||
| https://clastix.github.io/charts | kamaji-etcd | >=0.8.1 |
|
||||
|
||||
[Kamaji](https://github.com/clastix/kamaji) requires a [multi-tenant `etcd`](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
|
||||
This Helm Chart starting from v0.1.1 provides the installation of an internal `etcd` in order to streamline the local test. If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
|
||||
@@ -70,7 +70,7 @@ Here the values you can override:
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | Kubernetes affinity rules to apply to Kamaji controller pods |
|
||||
| defaultDatastoreName | string | `"default"` | Specify the default DataStore name for the Kamaji instance. |
|
||||
| defaultDatastoreName | string | `"default"` | If specified, all the Kamaji instances with an unassigned DataStore will inherit this default value. |
|
||||
| extraArgs | list | `[]` | A list of extra arguments to add to the kamaji controller default ones |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
|
||||
|
||||
@@ -66,7 +66,6 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TenantControlPlaneSpec defines the desired state of TenantControlPlane.
|
||||
properties:
|
||||
addons:
|
||||
description: Addons contain which addons are enabled
|
||||
@@ -6413,10 +6412,23 @@ spec:
|
||||
type: object
|
||||
dataStore:
|
||||
description: |-
|
||||
DataStore allows to specify a DataStore that should be used to store the Kubernetes data for the given Tenant Control Plane.
|
||||
This parameter is optional and acts as an override over the default one which is used by the Kamaji Operator.
|
||||
Migration from a different DataStore to another one is not yet supported and the reconciliation will be blocked.
|
||||
DataStore specifies the DataStore that should be used to store the Kubernetes data for the given Tenant Control Plane.
|
||||
When Kamaji runs with the default DataStore flag, all empty values will inherit the default value.
|
||||
By leaving it empty and running Kamaji with no default DataStore flag, it is possible to achieve automatic assignment to a specific DataStore object.
|
||||
|
||||
Migration from one DataStore to another backed by the same Driver is possible. See: https://kamaji.clastix.io/guides/datastore-migration/
|
||||
Migration from one DataStore to another backed by a different Driver is not supported.
|
||||
type: string
|
||||
dataStoreSchema:
|
||||
description: |-
|
||||
DataStoreSchema allows to specify the name of the database (for relational DataStores) or the key prefix (for etcd). This
|
||||
value is optional and immutable. Note that Kamaji currently doesn't ensure that DataStoreSchema values are unique. It's up
|
||||
to the user to avoid clashes between different TenantControlPlanes. If not set upon creation, Kamaji will default the
|
||||
DataStoreSchema by concatenating the namespace and name of the TenantControlPlane.
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: changing the dataStoreSchema is not supported
|
||||
rule: self == oldSelf
|
||||
kubernetes:
|
||||
description: Kubernetes specification for tenant control plane
|
||||
properties:
|
||||
@@ -6539,15 +6551,47 @@ spec:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
clusterDomain:
|
||||
default: cluster.local
|
||||
description: The default domain name used for DNS resolution within the cluster.
|
||||
pattern: .*\..*
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: changing the cluster domain is not supported
|
||||
rule: self == oldSelf
|
||||
dnsServiceIPs:
|
||||
default:
|
||||
- 10.96.0.10
|
||||
description: |-
|
||||
The DNS Service for internal resolution, it must match the Service CIDR.
|
||||
In case of an empty value, it is automatically computed according to the Service CIDR, e.g.:
|
||||
Service CIDR 10.96.0.0/16, the resulting DNS Service IP will be 10.96.0.10 for IPv4,
|
||||
for IPv6 from the CIDR 2001:db8:abcd::/64 the resulting DNS Service IP will be 2001:db8:abcd::10.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
loadBalancerClass:
|
||||
description: |-
|
||||
Specify the LoadBalancer class in case of multiple load balancer implementations.
|
||||
Field supported only for Tenant Control Plane instances exposed using a LoadBalancer Service.
|
||||
minLength: 1
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: LoadBalancerClass is immutable
|
||||
rule: self == oldSelf
|
||||
loadBalancerSourceRanges:
|
||||
description: |-
|
||||
LoadBalancerSourceRanges restricts the IP ranges that can access
|
||||
the LoadBalancer type Service. This field defines a list of IP
|
||||
address ranges (in CIDR format) that are allowed to access the service.
|
||||
If left empty, the service will allow traffic from all IP ranges (0.0.0.0/0).
|
||||
This feature is useful for restricting access to API servers or services
|
||||
to specific networks for security purposes.
|
||||
Example: {"192.168.1.0/24", "10.0.0.0/8"}
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
podCidr:
|
||||
default: 10.244.0.0/16
|
||||
description: CIDR for Kubernetes Pods
|
||||
description: 'CIDR for Kubernetes Pods: if empty, defaulted to 10.244.0.0/16.'
|
||||
type: string
|
||||
port:
|
||||
default: 6443
|
||||
@@ -6556,13 +6600,24 @@ spec:
|
||||
type: integer
|
||||
serviceCidr:
|
||||
default: 10.96.0.0/16
|
||||
description: Kubernetes Service
|
||||
description: 'CIDR for Kubernetes Services: if empty, defaulted to 10.96.0.0/16.'
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- controlPlane
|
||||
- kubernetes
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: unsetting the dataStore is not supported
|
||||
rule: '!has(oldSelf.dataStore) || has(self.dataStore)'
|
||||
- message: unsetting the dataStoreSchema is not supported
|
||||
rule: '!has(oldSelf.dataStoreSchema) || has(self.dataStoreSchema)'
|
||||
- message: LoadBalancer source ranges are supported only with LoadBalancer service type
|
||||
rule: '!has(self.networkProfile.loadBalancerSourceRanges) || (size(self.networkProfile.loadBalancerSourceRanges) == 0 || self.controlPlane.service.serviceType == ''LoadBalancer'')'
|
||||
- message: LoadBalancerClass is supported only with LoadBalancer service type
|
||||
rule: '!has(self.networkProfile.loadBalancerClass) || self.controlPlane.service.serviceType == ''LoadBalancer'''
|
||||
- message: LoadBalancerClass cannot be set or unset at runtime
|
||||
rule: self.controlPlane.service.serviceType != 'LoadBalancer' || (oldSelf.controlPlane.service.serviceType != 'LoadBalancer' && self.controlPlane.service.serviceType == 'LoadBalancer') || has(self.networkProfile.loadBalancerClass) == has(oldSelf.networkProfile.loadBalancerClass)
|
||||
status:
|
||||
description: TenantControlPlaneStatus defines the observed state of TenantControlPlane.
|
||||
properties:
|
||||
|
||||
@@ -33,8 +33,9 @@ spec:
|
||||
- --leader-elect
|
||||
- --metrics-bind-address={{ .Values.metricsBindAddress }}
|
||||
- --tmp-directory={{ .Values.temporaryDirectoryPath }}
|
||||
{{- $datastoreName := .Values.defaultDatastoreName | required ".Values.defaultDatastoreName is required!" }}
|
||||
- --datastore={{ $datastoreName }}
|
||||
{{- if not (eq .Values.defaultDatastoreName "") }}
|
||||
- --datastore={{ .Values.defaultDatastoreName }}
|
||||
{{- end }}
|
||||
{{- if .Values.telemetry.disabled }}
|
||||
- --disable-telemetry
|
||||
{{- end }}
|
||||
|
||||
@@ -95,7 +95,7 @@ loggingDevel:
|
||||
# -- Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false)
|
||||
enable: false
|
||||
|
||||
# -- Specify the default DataStore name for the Kamaji instance.
|
||||
# -- If specified, all the Kamaji instances with an unassigned DataStore will inherit this default value.
|
||||
defaultDatastoreName: default
|
||||
|
||||
kamaji-etcd:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.23 as builder
|
||||
|
||||
ARG VERSION=edge-24.9.2
|
||||
ARG VERSION=edge-24.12.1
|
||||
ARG TARGETOS TARGETARCH
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
@@ -3,7 +3,7 @@ kamaji:
|
||||
deploy: false
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
tag: v0.25.0@sha256:948a496a23b4b3158517473afd74a693380bfecb9f5346c214c8beef6deb405a
|
||||
tag: v0.23.1@sha256:87166056685e4dab9de030ad9389ce58f0d96e7f6c191674fe93483fbe99490f
|
||||
repository: ghcr.io/aenix-io/cozystack/kamaji
|
||||
resources:
|
||||
limits:
|
||||
|
||||
@@ -22,4 +22,4 @@ global:
|
||||
images:
|
||||
kubeovn:
|
||||
repository: kubeovn
|
||||
tag: v1.13.2@sha256:fccaf6b1f6514378f4ba2e17399af5b0c20ecf13f78142409848486b0992e5fd
|
||||
tag: v1.13.2@sha256:ee658a003cd77a1f7b9df1d108255a8b5a69e67dd59fa6a6161c869b00207d4f
|
||||
|
||||
@@ -1,138 +0,0 @@
|
||||
{{- if .Values.scrapeRules.etcd.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-rbac-proxy
|
||||
namespace: cozy-monitoring
|
||||
labels:
|
||||
app: kube-rbac-proxy
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-rbac-proxy
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-rbac-proxy
|
||||
spec:
|
||||
serviceAccountName: kube-rbac-proxy
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
containers:
|
||||
- name: kube-rbac-proxy
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.11.0
|
||||
args:
|
||||
- "--secure-listen-address=$(NODE_IP):9443"
|
||||
- "--upstream=http://127.0.0.1:2381/"
|
||||
env:
|
||||
- name: NODE_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: etcd-metrics
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsNonRoot: true
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-rbac-proxy
|
||||
namespace: cozy-monitoring
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kube-rbac-proxy-auth
|
||||
rules:
|
||||
- apiGroups: ["authentication.k8s.io"]
|
||||
resources: ["tokenreviews"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["authorization.k8s.io"]
|
||||
resources: ["subjectaccessreviews"]
|
||||
verbs: ["create"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kube-rbac-proxy-auth-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kube-rbac-proxy-auth
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-rbac-proxy
|
||||
namespace: cozy-monitoring
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: vm-scrape
|
||||
namespace: cozy-monitoring
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: etcd-metrics-reader
|
||||
rules:
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: etcd-metrics-reader
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: etcd-metrics-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: vm-scrape
|
||||
namespace: cozy-monitoring
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/service-account-token
|
||||
metadata:
|
||||
name: vm-token
|
||||
annotations:
|
||||
kubernetes.io/service-account.name: vm-scrape
|
||||
|
||||
---
|
||||
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
kind: VMPodScrape
|
||||
metadata:
|
||||
name: etcd-managment-scrape
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: etcd-metrics
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
bearerTokenSecret:
|
||||
name: vm-token
|
||||
key: token
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-rbac-proxy
|
||||
{{- end }}
|
||||
34
packages/system/monitoring-agents/templates/etcd-scrape.yaml
Normal file
34
packages/system/monitoring-agents/templates/etcd-scrape.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
#---
|
||||
#apiVersion: operator.victoriametrics.com/v1beta1
|
||||
#kind: VMNodeScrape
|
||||
#metadata:
|
||||
# name: kube-etcd
|
||||
# namespace: cozy-monitoring
|
||||
#spec:
|
||||
# selector:
|
||||
# node-role.kubernetes.io/control-plane: ""
|
||||
# bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
# honorLabels: true
|
||||
# metricRelabelConfigs:
|
||||
# - action: labeldrop
|
||||
# regex: (uid)
|
||||
# - action: labeldrop
|
||||
# regex: (id|name)
|
||||
# - action: drop
|
||||
# regex: (rest_client_request_duration_seconds_bucket|rest_client_request_duration_seconds_sum|rest_client_request_duration_seconds_count)
|
||||
# source_labels:
|
||||
# - __name__
|
||||
# port: "2379"
|
||||
# relabelConfigs:
|
||||
# - action: labelmap
|
||||
# regex: __meta_kubernetes_node_label_(.+)
|
||||
# - sourceLabels:
|
||||
# - __metrics_path__
|
||||
# targetLabel: metrics_path
|
||||
# - replacement: etcd
|
||||
# targetLabel: job
|
||||
# scheme: https
|
||||
# scrapeTimeout: 5s
|
||||
# tlsConfig:
|
||||
# caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
# insecureSkipVerify: true
|
||||
@@ -359,7 +359,3 @@ fluent-bit:
|
||||
Name modify
|
||||
Match *
|
||||
Add cluster root-cluster
|
||||
|
||||
scrapeRules:
|
||||
etcd:
|
||||
enabled: false
|
||||
|
||||
Reference in New Issue
Block a user