update monitoring-agents stack

This commit is contained in:
kklinch0
2025-03-10 12:04:50 +03:00
parent 40aa65caba
commit 6354b564b4
35 changed files with 308 additions and 67 deletions

View File

@@ -1,2 +1,2 @@
cozystack:
image: ghcr.io/aenix-io/cozystack/cozystack:v0.27.0@sha256:aac04571e99e13653f08e6ccc2b2214032455af547f9a887d01f1483e30d2915
image: kklinch0/cozystack:0.26.1.0@sha256:7b98ee8e218acc93638fc8afc0e8e454e5d5c88812555faaa723b3e3ac9327fc

View File

@@ -1,9 +1,9 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: "Updated Fluent Bit OCI image to v3.1.9"
description: "Updated Fluent Bit OCI image to v3.2.8."
apiVersion: v1
appVersion: 3.1.9
appVersion: 3.2.8
description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD
family operating systems.
home: https://fluentbit.io/
@@ -24,4 +24,4 @@ maintainers:
name: fluent-bit
sources:
- https://github.com/fluent/fluent-bit/
version: 0.47.10
version: 0.48.9

View File

@@ -27,7 +27,7 @@ Fluent Bit allows us to build filter to modify the incoming records using custom
### How to use Lua scripts with this Chart
First, you should add your Lua scripts to `luaScripts` in values.yaml, for example:
First, you should add your Lua scripts to `luaScripts` in values.yaml, templating is supported.
```yaml
luaScripts:

View File

@@ -3,6 +3,41 @@ testFramework:
logLevel: debug
extraVolumeMounts:
- name: extra-volume
mountPath: /extra-volume-path
- name: another-extra-volume
mountPath: /another-extra-volume-path
extraVolumes:
- name: extra-volume
emptyDir: {}
- name: another-extra-volume
emptyDir: {}
dashboards:
enabled: true
deterministicUid: true
luaScripts:
filter_example.lua: |
function filter_name(tag, timestamp, record)
-- put your lua code here.
end
filter_with_templating_example.lua: |
local log_level = {{ .Values.logLevel | quote }}
function filter_with_templating_name(tag, timestamp, record)
-- put your lua code here.
end
config:
outputs: |
[OUTPUT]
name stdout
match *
hotReload:
enabled: true
extraWatchVolumes:
- extra-volume
- another-extra-volume

View File

@@ -2,4 +2,5 @@ Get Fluent Bit build information by running these commands:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "fluent-bit.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 2020:2020
curl http://127.0.0.1:2020
curl http://127.0.0.1:2020

View File

@@ -108,11 +108,18 @@ containers:
- {{ printf "-webhook-url=http://localhost:%s/api/v2/reload" (toString .Values.metricsPort) }}
- -volume-dir=/watch/config
- -volume-dir=/watch/scripts
{{- range $idx, $val := .Values.hotReload.extraWatchVolumes }}
- {{ printf "-volume-dir=/watch/extra-%d" (int $idx) }}
{{- end }}
volumeMounts:
- name: config
mountPath: /watch/config
- name: luascripts
mountPath: /watch/scripts
{{- range $idx, $val := .Values.hotReload.extraWatchVolumes }}
- name: {{ $val }}
mountPath: {{ printf "/watch/extra-%d" (int $idx) }}
{{- end }}
{{- with .Values.hotReload.resources }}
resources:
{{- toYaml . | nindent 12 }}
@@ -132,7 +139,7 @@ volumes:
{{- if or .Values.luaScripts .Values.hotReload.enabled }}
- name: luascripts
configMap:
name: {{ include "fluent-bit.fullname" . }}-luascripts
name: {{ include "fluent-bit.fullname" . }}-luascripts
{{- end }}
{{- if eq .Values.kind "DaemonSet" }}
{{- toYaml .Values.daemonSetVolumes | nindent 2 }}

View File

@@ -8,6 +8,6 @@ metadata:
{{- include "fluent-bit.labels" . | nindent 4 }}
data:
{{ range $key, $value := .Values.luaScripts }}
{{ $key }}: {{ $value | quote }}
{{ $key }}: {{ (tpl $value $) | quote }}
{{ end }}
{{- end -}}

View File

@@ -20,12 +20,15 @@ spec:
hostNetwork: {{ .Values.hostNetwork }}
hostIPC: false
hostPID: false
{{- with .Values.podSecurityPolicy.runAsUser }}
runAsUser:
# TODO: Require the container to run without root privileges.
rule: 'RunAsAny'
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.podSecurityPolicy.seLinux }}
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: 'RunAsAny'
{{- toYaml . | nindent 4 }}
{{- end }}
supplementalGroups:
rule: 'MustRunAs'
ranges:

View File

@@ -24,10 +24,14 @@ forbiddenSysctls:
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
{{- with .Values.openShift.securityContextConstraints.runAsUser }}
runAsUser:
type: RunAsAny
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.openShift.securityContextConstraints.seLinuxContext }}
seLinuxContext:
type: MustRunAs
{{- toYaml . | nindent 4 }}
{{- end }}
supplementalGroups:
type: RunAsAny
volumes:

View File

@@ -17,7 +17,7 @@ spec:
image: {{ include "fluent-bit.image" .Values.testFramework.image | quote }}
imagePullPolicy: {{ .Values.testFramework.image.pullPolicy }}
command: ["sh"]
args: ["-c", "wget -O- {{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}"]
args: ["-c", "sleep 5s && wget -O- {{ include "fluent-bit.fullname" . }}:{{ .Values.service.port }}"]
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 4 }}

View File

@@ -15,7 +15,7 @@ spec:
containerPolicies:
- containerName: {{ .Chart.Name }}
{{- with .Values.autoscaling.vpa.controlledResources }}
controlledResources:
controlledResources:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.autoscaling.vpa.maxAllowed }}

View File

@@ -45,6 +45,11 @@ rbac:
podSecurityPolicy:
create: false
annotations: {}
runAsUser:
rule: RunAsAny
seLinux:
# This policy assumes the nodes are using AppArmor rather than SELinux.
rule: RunAsAny
# OpenShift-specific configuration
openShift:
@@ -54,6 +59,10 @@ openShift:
create: true
name: ""
annotations: {}
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
# Use existing SCC in cluster, rather then create new one
existingName: ""
@@ -98,14 +107,13 @@ service:
# nodePort: 30020
# clusterIP: 172.16.10.1
annotations: {}
# prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020"
# prometheus.io/scrape: "true"
# prometheus.io/path: "/api/v1/metrics/prometheus"
# prometheus.io/port: "2020"
# prometheus.io/scrape: "true"
externalIPs: []
# externalIPs:
# - 2.2.2.2
serviceMonitor:
enabled: false
# namespace: monitoring
@@ -362,6 +370,7 @@ networkPolicy:
# ingress:
# from: []
# See Lua script configuration example in README.md
luaScripts: {}
## https://docs.fluentbit.io/manual/administration/configuring-fluent-bit/classic-mode/configuration-file
@@ -506,7 +515,8 @@ hotReload:
enabled: false
image:
repository: ghcr.io/jimmidyson/configmap-reload
tag: v0.11.1
tag: v0.14.0
digest:
pullPolicy: IfNotPresent
resources: {}
extraWatchVolumes: []

View File

@@ -4,7 +4,7 @@ annotations:
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
apiVersion: v2
appVersion: 2.13.0
appVersion: 2.15.0
description: Install kube-state-metrics to generate and expose cluster-level metrics
home: https://github.com/kubernetes/kube-state-metrics/
keywords:
@@ -15,12 +15,15 @@ keywords:
maintainers:
- email: tariq.ibrahim@mulesoft.com
name: tariq1890
url: https://github.com/tariq1890
- email: manuel@rueg.eu
name: mrueg
url: https://github.com/mrueg
- email: david@0xdc.me
name: dotdc
url: https://github.com/dotdc
name: kube-state-metrics
sources:
- https://github.com/kubernetes/kube-state-metrics/
type: application
version: 5.26.0
version: 5.30.1

View File

@@ -66,11 +66,22 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- if .Values.env }}
{{- toYaml .Values.env | nindent 8 }}
{{- end }}
{{ else }}
{{- if .Values.env }}
env:
{{- toYaml .Values.env | nindent 8 }}
{{- end }}
{{- end }}
args:
{{- if .Values.extraArgs }}
{{- .Values.extraArgs | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.kubeRBACProxy.enabled }}
- --host=127.0.0.1
{{- end }}
- --port={{ $servicePort }}
{{- if .Values.collectors }}
- --resources={{ .Values.collectors | join "," }}
@@ -163,8 +174,13 @@ spec:
value: {{ $header.value }}
{{- end }}
path: /healthz
{{- if .Values.kubeRBACProxy.enabled }}
port: {{ .Values.service.port | default 8080 }}
scheme: HTTPS
{{- else }}
port: {{ $servicePort }}
scheme: {{ upper .Values.startupProbe.httpGet.scheme }}
{{- end }}
initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.startupProbe.periodSeconds }}
successThreshold: {{ .Values.startupProbe.successThreshold }}
@@ -182,8 +198,13 @@ spec:
value: {{ $header.value }}
{{- end }}
path: /livez
{{- if .Values.kubeRBACProxy.enabled }}
port: {{ .Values.service.port | default 8080 }}
scheme: HTTPS
{{- else }}
port: {{ $servicePort }}
scheme: {{ upper .Values.livenessProbe.httpGet.scheme }}
{{- end }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
@@ -200,8 +221,13 @@ spec:
value: {{ $header.value }}
{{- end }}
path: /readyz
port: {{ $servicePort }}
{{- if .Values.kubeRBACProxy.enabled }}
port: {{ .Values.selfMonitor.telemetryPort | default 8081 }}
scheme: HTTPS
{{- else }}
port: {{ $telemetryPort }}
scheme: {{ upper .Values.readinessProbe.httpGet.scheme }}
{{- end }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
@@ -299,7 +325,11 @@ spec:
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- if kindIs "map" .Values.affinity }}
{{- toYaml .Values.affinity | nindent 8 }}
{{- else }}
{{- tpl .Values.affinity $ | nindent 8 }}
{{- end }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:

View File

@@ -105,6 +105,30 @@ rules:
- networkpolicies
verbs: ["list", "watch"]
{{ end -}}
{{ if has "ingressclasses" $.Values.collectors }}
- apiGroups: ["networking.k8s.io"]
resources:
- ingressclasses
verbs: ["list", "watch"]
{{ end -}}
{{ if has "clusterrolebindings" $.Values.collectors }}
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- clusterrolebindings
verbs: ["list", "watch"]
{{ end -}}
{{ if has "clusterroles" $.Values.collectors }}
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- clusterroles
verbs: ["list", "watch"]
{{ end -}}
{{ if has "roles" $.Values.collectors }}
- apiGroups: ["rbac.authorization.k8s.io"]
resources:
- roles
verbs: ["list", "watch"]
{{ end -}}
{{ if has "nodes" $.Values.collectors }}
- apiGroups: [""]
resources:

View File

@@ -106,7 +106,7 @@ kubeRBACProxy:
image:
registry: quay.io
repository: brancz/kube-rbac-proxy
tag: v0.18.0
tag: v0.18.2
sha: ""
pullPolicy: IfNotPresent
@@ -160,6 +160,13 @@ serviceAccount:
# If false then the user will opt out of automounting API credentials.
automountServiceAccountToken: true
# Additional Environment variables
env: {}
# - name: GOMAXPROCS
# valueFrom:
# resourceFieldRef:
# resource: limits.cpu
prometheus:
monitor:
enabled: false
@@ -297,8 +304,16 @@ containerSecurityContext:
nodeSelector: {}
## Affinity settings for pod assignment
## Can be defined as either a dict or string. String is useful for `tpl` templating.
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
affinity: {}
# affinity: |
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchLabels:
# {{- include "kube-state-metrics.selectorLabels" . | indent 10 }}
# topologyKey: kubernetes.io/hostname
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
@@ -382,6 +397,10 @@ collectors:
- storageclasses
- validatingwebhookconfigurations
- volumeattachments
# - ingressclasses
# - clusterrolebindings
# - clusterroles
# - roles
# Enabling kubeconfig will pass the --kubeconfig argument to the container
kubeconfig:

View File

@@ -19,3 +19,5 @@
.project
.idea/
*.tmproj
ci/

View File

@@ -4,7 +4,7 @@ annotations:
- name: Chart Source
url: https://github.com/prometheus-community/helm-charts
apiVersion: v2
appVersion: 1.8.2
appVersion: 1.9.0
description: A Helm chart for prometheus node-exporter
home: https://github.com/prometheus/node_exporter/
keywords:
@@ -14,12 +14,15 @@ keywords:
maintainers:
- email: gianrubio@gmail.com
name: gianrubio
url: https://github.com/gianrubio
- email: zanhsieh@gmail.com
name: zanhsieh
url: https://github.com/zanhsieh
- email: rootsandtrees@posteo.de
name: zeritti
url: https://github.com/zeritti
name: prometheus-node-exporter
sources:
- https://github.com/prometheus/node_exporter/
type: application
version: 4.40.0
version: 4.44.1

View File

@@ -50,7 +50,7 @@ kubectl delete daemonset -l app=prometheus-node-exporter
helm upgrade -i prometheus-node-exporter prometheus-community/prometheus-node-exporter
```
If you use your own custom [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor) or [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#podmonitor), please ensure to upgrade their `selector` fields accordingly to the new labels.
If you use your own custom [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor) or [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmonitor), please ensure to upgrade their `selector` fields accordingly to the new labels.
### From 2.x to 3.x

View File

@@ -1,4 +0,0 @@
---
commonLabels:
foo: bar
baz: '{{ include "prometheus-node-exporter.fullname" . }}'

View File

@@ -1,5 +0,0 @@
networkPolicy:
enabled: true
ingress:
- ports:
- port: 9100

View File

@@ -1,4 +0,0 @@
---
podLabels:
foo: bar
baz: '{{ .Chart.AppVersion }}'

View File

@@ -1,3 +0,0 @@
service:
targetPort: 9102
port: 9102

View File

@@ -1,5 +0,0 @@
---
service:
labels:
foo: bar
baz: quux

View File

@@ -26,4 +26,4 @@ rules:
verbs:
- get
```
{{- end }}
{{- end }}

View File

@@ -200,3 +200,38 @@ labelValueLengthLimit: {{ . }}
{{- end }}
{{- end }}
{{- end }}
{{/*
The default node affinity to exclude
- AWS Fargate
- Azure virtual nodes
*/}}
{{- define "prometheus-node-exporter.defaultAffinity" -}}
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: eks.amazonaws.com/compute-type
operator: NotIn
values:
- fargate
- key: type
operator: NotIn
values:
- virtual-kubelet
{{- end -}}
{{- define "prometheus-node-exporter.mergedAffinities" -}}
{{- $defaultAffinity := include "prometheus-node-exporter.defaultAffinity" . | fromYaml -}}
{{- with .Values.affinity -}}
{{- if .nodeAffinity -}}
{{- $_ := set $defaultAffinity "nodeAffinity" (mergeOverwrite $defaultAffinity.nodeAffinity .nodeAffinity) -}}
{{- end -}}
{{- if .podAffinity -}}
{{- $_ := set $defaultAffinity "podAffinity" .podAffinity -}}
{{- end -}}
{{- if .podAntiAffinity -}}
{{- $_ := set $defaultAffinity "podAntiAffinity" .podAntiAffinity -}}
{{- end -}}
{{- end -}}
{{- toYaml $defaultAffinity -}}
{{- end -}}

View File

@@ -178,6 +178,9 @@ spec:
- name: {{ .name }}
mountPath: {{ .mountPath }}
{{- end }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- range .Values.sidecars }}
{{- $overwrites := dict "volumeMounts" (concat (include "prometheus-node-exporter.sidecarVolumeMounts" $ | fromYamlArray) (.volumeMounts | default list) | default list) }}
{{- $defaults := dict "image" (include "prometheus-node-exporter.image" $) "securityContext" $.Values.containerSecurityContext "imagePullPolicy" $.Values.image.pullPolicy }}
@@ -193,9 +196,24 @@ spec:
- --upstream=http://127.0.0.1:{{ $servicePort }}/
- --proxy-endpoints-port={{ .Values.kubeRBACProxy.proxyEndpointsPort }}
- --config-file=/etc/kube-rbac-proxy-config/config-file.yaml
{{- if and .Values.kubeRBACProxy.tls.enabled .Values.tlsSecret.enabled }}
- --tls-cert-file=/tls/private/{{ .Values.tlsSecret.certItem }}
- --tls-private-key-file=/tls/private/{{ .Values.tlsSecret.keyItem }}
{{- if and .Values.kubeRBACProxy.tls.tlsClientAuth .Values.tlsSecret.caItem }}
- --client-ca-file=/tls/private/{{ .Values.tlsSecret.caItem }}
{{- end }}
{{- end }}
volumeMounts:
- name: kube-rbac-proxy-config
mountPath: /etc/kube-rbac-proxy-config
{{- if and .Values.kubeRBACProxy.tls.enabled .Values.tlsSecret.enabled }}
- name: {{ tpl .Values.tlsSecret.volumeName . | quote }}
mountPath: /tls/private
readOnly: true
{{- end }}
{{- with .Values.kubeRBACProxy.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
imagePullPolicy: {{ .Values.kubeRBACProxy.image.pullPolicy }}
{{- if .Values.kubeRBACProxy.image.sha }}
image: "{{ .Values.global.imageRegistry | default .Values.kubeRBACProxy.image.registry}}/{{ .Values.kubeRBACProxy.image.repository }}:{{ .Values.kubeRBACProxy.image.tag }}@sha256:{{ .Values.kubeRBACProxy.image.sha }}"
@@ -249,10 +267,8 @@ spec:
hostNetwork: {{ .Values.hostNetwork }}
hostPID: {{ .Values.hostPID }}
hostIPC: {{ .Values.hostIPC }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- include "prometheus-node-exporter.mergedAffinities" . | nindent 8 }}
{{- with .Values.dnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}
@@ -313,3 +329,20 @@ spec:
configMap:
name: {{ template "prometheus-node-exporter.fullname" . }}-rbac-config
{{- end }}
{{- if .Values.tlsSecret.enabled }}
- name: {{ tpl .Values.tlsSecret.volumeName . | quote }}
secret:
secretName: {{ tpl .Values.tlsSecret.secretName . | quote }}
items:
- key: {{ required "Value tlsSecret.certItem must be set." .Values.tlsSecret.certItem | quote }}
path: {{ .Values.tlsSecret.certItem | quote }}
- key: {{ required "Value tlsSecret.keyItem must be set." .Values.tlsSecret.keyItem | quote }}
path: {{ .Values.tlsSecret.keyItem | quote }}
{{- if .Values.tlsSecret.caItem }}
- key: {{ .Values.tlsSecret.caItem | quote }}
path: {{ .Values.tlsSecret.caItem | quote }}
{{- end }}
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -3,7 +3,7 @@ kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp-{{ include "prometheus-node-exporter.fullname" . }}
labels:
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
rules:
- apiGroups: ['extensions']

View File

@@ -13,4 +13,4 @@ data:
resource: services
subresource: {{ template "prometheus-node-exporter.fullname" . }}
name: {{ template "prometheus-node-exporter.fullname" . }}
{{- end }}
{{- end }}

View File

@@ -4,7 +4,7 @@ kind: ServiceAccount
metadata:
name: {{ include "prometheus-node-exporter.serviceAccountName" . }}
namespace: {{ include "prometheus-node-exporter.namespace" . }}
labels:
labels:
{{- include "prometheus-node-exporter.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:

View File

@@ -16,6 +16,10 @@ spec:
podTargetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.prometheus.monitor.targetLabels }}
targetLabels:
{{- toYaml . | nindent 4 }}
{{- end }}
selector:
matchLabels:
{{- with .Values.prometheus.monitor.selectorOverride }}

View File

@@ -45,7 +45,7 @@ kubeRBACProxy:
image:
registry: quay.io
repository: brancz/kube-rbac-proxy
tag: v0.18.0
tag: v0.18.2
sha: ""
pullPolicy: IfNotPresent
@@ -84,6 +84,37 @@ kubeRBACProxy:
# cpu: 10m
# memory: 32Mi
## Additional volume mounts in the kube-rbac-proxy container
## See extraVolumes below
extraVolumeMounts: []
# - name: extra-volume
# mountPath: /extra
# readOnly: true
## tls enables using TLS resources from a volume on secret referred to in tlsSecret below.
## When enabling tlsClientAuth, client CA certificate must be set in tlsSecret.caItem.
## Ref. https://github.com/brancz/kube-rbac-proxy/issues/187
tls:
enabled: false
tlsClientAuth: false
## tlsSecret refers to an existing secret holding TLS items: client CA certificate, private key and certificate.
## secretName and volumeName can be templated.
## If enabled, volume volumeName gets created on secret secretName.
## The volume's resources will be used by kube-rbac-proxy if kubeRBACProxy.tls.enabled is set.
tlsSecret:
enabled: false
## Key with client CA certificate (optional)
caItem: ""
## Key with certificate
certItem: tls.crt
## Key with private key
keyItem: tls.key
## Name of an existing secret
secretName: prometheus-node-exporter-tls
## Name of the volume to be created
volumeName: prometheus-node-exporter-tls
## Service configuration
service:
## Creating a service is enabled by default
@@ -148,9 +179,13 @@ prometheus:
jobLabel: ""
# List of pod labels to add to node exporter metrics
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
podTargetLabels: []
# List of target labels to add to node exporter metrics
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
targetLabels: []
scheme: http
basicAuth: {}
bearerTokenFile:
@@ -197,7 +232,7 @@ prometheus:
labelValueLengthLimit: 0
# PodMonitor defines monitoring for a set of pods.
# ref. https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.PodMonitor
# ref. https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmonitor
# Using a PodMonitor may be preferred in some environments where there is very large number
# of Node Exporter endpoints (1000+) behind a single service.
# The PodMonitor is disabled by default. When switching from ServiceMonitor to PodMonitor,
@@ -238,10 +273,10 @@ prometheus:
# TLS configuration to use when scraping the endpoint.
tlsConfig: {}
# Authorization section for this endpoint.
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.SafeAuthorization
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#safeauthorization
authorization: {}
# OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer.
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.OAuth2
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#oauth2
oauth2: {}
# ProxyURL eg http://proxyserver:2195. Directs scrapes through proxy to this endpoint.
@@ -373,7 +408,8 @@ hostSysFsMount:
mountPropagation: ""
## Assign a group of affinity scheduling rules
##
## The default nodeAffinity excludes Fargate nodes and virtual kubelets from scheduling
## unless overriden by hard node affinity set in the field.
affinity: {}
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
@@ -461,9 +497,11 @@ extraHostVolumeMounts: []
configmaps: []
# - name: <configMapName>
# mountPath: <mountPath>
secrets: []
# - name: <secretName>
# mountPath: <mountPatch>
## Override the deployment namespace
##
namespaceOverride: ""
@@ -562,5 +600,19 @@ extraManifests: []
# data:
# extra-data: "value"
## Extra volumes to become available in the pod
extraVolumes: []
# - name: extra-volume
# secret:
# defaultMode: 420
# optional: false
# secretName: node-exporter-secret
## Extra volume mounts in the node-exporter container
extraVolumeMounts: []
# - name: extra-volume
# mountPath: /extra
# readOnly: true
# Override version of app, required if image.tag is defined and does not follow semver
version: ""