Compare commits

..

4 Commits

Author SHA1 Message Date
Andrei Kvapil
5310bd3591 add extra bundles 2024-04-01 14:41:13 +02:00
Andrei Kvapil
88d2dcc3e1 match bundle-name from cozystack-config 2024-04-01 11:48:10 +02:00
Andrei Kvapil
b53e264c5e Allow overriding values by prividng values-<release>: <json|yaml> in cozystack-config 2024-04-01 10:36:17 +02:00
Andrei Kvapil
0c92d25669 bundles 2024-04-01 10:13:26 +02:00
62 changed files with 6385 additions and 32700 deletions

View File

@@ -16,4 +16,4 @@ namespaces-apply:
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml | kubectl apply -f-
diff:
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) -s templates/namespaces.yaml | kubectl diff -f-
helm template -n $(NAMESPACE) $(NAME) . --dry-run=server $(API_VERSIONS_FLAGS) | kubectl diff -f-

View File

@@ -0,0 +1,96 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
releases:
- name: cilium
releaseName: cilium
chart: cozy-cilium
namespace: cozy-cilium
privileged: true
dependsOn: []
- name: fluxcd
releaseName: fluxcd
chart: cozy-fluxcd
namespace: cozy-fluxcd
dependsOn: [cilium]
- name: cert-manager
releaseName: cert-manager
chart: cozy-cert-manager
namespace: cozy-cert-manager
dependsOn: [cilium]
- name: cert-manager-issuers
releaseName: cert-manager-issuers
chart: cozy-cert-manager-issuers
namespace: cozy-cert-manager
dependsOn: [cilium,cert-manager]
- name: victoria-metrics-operator
releaseName: victoria-metrics-operator
chart: cozy-victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
dependsOn: [cilium,cert-manager]
- name: monitoring
releaseName: monitoring
chart: cozy-monitoring
namespace: cozy-monitoring
privileged: true
dependsOn: [cilium,victoria-metrics-operator]
- name: metallb
releaseName: metallb
chart: cozy-metallb
namespace: cozy-metallb
privileged: true
dependsOn: [cilium]
- name: grafana-operator
releaseName: grafana-operator
chart: cozy-grafana-operator
namespace: cozy-grafana-operator
dependsOn: [cilium]
- name: mariadb-operator
releaseName: mariadb-operator
chart: cozy-mariadb-operator
namespace: cozy-mariadb-operator
dependsOn: [cilium,cert-manager,victoria-metrics-operator]
- name: postgres-operator
releaseName: postgres-operator
chart: cozy-postgres-operator
namespace: cozy-postgres-operator
dependsOn: [cilium,cert-manager]
- name: rabbitmq-operator
releaseName: rabbitmq-operator
chart: cozy-rabbitmq-operator
namespace: cozy-rabbitmq-operator
dependsOn: [cilium]
- name: redis-operator
releaseName: redis-operator
chart: cozy-redis-operator
namespace: cozy-redis-operator
dependsOn: [cilium]
- name: piraeus-operator
releaseName: piraeus-operator
chart: cozy-piraeus-operator
namespace: cozy-linstor
dependsOn: [cilium,cert-manager]
- name: linstor
releaseName: linstor
chart: cozy-linstor
namespace: cozy-linstor
privileged: true
dependsOn: [piraeus-operator,cilium,cert-manager]
- name: telepresence
releaseName: traffic-manager
chart: cozy-telepresence
namespace: cozy-telepresence
dependsOn: [kubeovn]

View File

@@ -0,0 +1,177 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
releases:
- name: cilium
releaseName: cilium
chart: cozy-cilium
namespace: cozy-cilium
privileged: true
dependsOn: []
- name: kubeovn
releaseName: kubeovn
chart: cozy-kubeovn
namespace: cozy-kubeovn
privileged: true
dependsOn: [cilium]
values:
cozystack:
nodesHash: {{ include "cozystack.master-node-ips" . | sha256sum }}
kube-ovn:
ipv4:
POD_CIDR: "{{ index $cozyConfig.data "ipv4-pod-cidr" }}"
POD_GATEWAY: "{{ index $cozyConfig.data "ipv4-pod-gateway" }}"
SVC_CIDR: "{{ index $cozyConfig.data "ipv4-svc-cidr" }}"
JOIN_CIDR: "{{ index $cozyConfig.data "ipv4-join-cidr" }}"
- name: fluxcd
releaseName: fluxcd
chart: cozy-fluxcd
namespace: cozy-fluxcd
dependsOn: [cilium,kubeovn]
- name: cert-manager
releaseName: cert-manager
chart: cozy-cert-manager
namespace: cozy-cert-manager
dependsOn: [cilium,kubeovn]
- name: cert-manager-issuers
releaseName: cert-manager-issuers
chart: cozy-cert-manager-issuers
namespace: cozy-cert-manager
dependsOn: [cilium,kubeovn,cert-manager]
- name: victoria-metrics-operator
releaseName: victoria-metrics-operator
chart: cozy-victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
dependsOn: [cilium,kubeovn,cert-manager]
- name: monitoring
releaseName: monitoring
chart: cozy-monitoring
namespace: cozy-monitoring
privileged: true
dependsOn: [cilium,kubeovn,victoria-metrics-operator]
- name: kubevirt-operator
releaseName: kubevirt-operator
chart: cozy-kubevirt-operator
namespace: cozy-kubevirt
dependsOn: [cilium,kubeovn]
- name: kubevirt
releaseName: kubevirt
chart: cozy-kubevirt
namespace: cozy-kubevirt
privileged: true
dependsOn: [cilium,kubeovn,kubevirt-operator]
- name: kubevirt-cdi-operator
releaseName: kubevirt-cdi-operator
chart: cozy-kubevirt-cdi-operator
namespace: cozy-kubevirt-cdi
dependsOn: [cilium,kubeovn]
- name: kubevirt-cdi
releaseName: kubevirt-cdi
chart: cozy-kubevirt-cdi
namespace: cozy-kubevirt-cdi
dependsOn: [cilium,kubeovn,kubevirt-cdi-operator]
- name: metallb
releaseName: metallb
chart: cozy-metallb
namespace: cozy-metallb
privileged: true
dependsOn: [cilium,kubeovn]
- name: grafana-operator
releaseName: grafana-operator
chart: cozy-grafana-operator
namespace: cozy-grafana-operator
dependsOn: [cilium,kubeovn]
- name: mariadb-operator
releaseName: mariadb-operator
chart: cozy-mariadb-operator
namespace: cozy-mariadb-operator
dependsOn: [cilium,kubeovn,cert-manager,victoria-metrics-operator]
- name: postgres-operator
releaseName: postgres-operator
chart: cozy-postgres-operator
namespace: cozy-postgres-operator
dependsOn: [cilium,kubeovn,cert-manager]
- name: rabbitmq-operator
releaseName: rabbitmq-operator
chart: cozy-rabbitmq-operator
namespace: cozy-rabbitmq-operator
dependsOn: [cilium,kubeovn]
- name: redis-operator
releaseName: redis-operator
chart: cozy-redis-operator
namespace: cozy-redis-operator
dependsOn: [cilium,kubeovn]
- name: piraeus-operator
releaseName: piraeus-operator
chart: cozy-piraeus-operator
namespace: cozy-linstor
dependsOn: [cilium,kubeovn,cert-manager]
- name: linstor
releaseName: linstor
chart: cozy-linstor
namespace: cozy-linstor
privileged: true
dependsOn: [piraeus-operator,cilium,kubeovn,cert-manager]
- name: telepresence
releaseName: traffic-manager
chart: cozy-telepresence
namespace: cozy-telepresence
dependsOn: [cilium,kubeovn]
- name: dashboard
releaseName: dashboard
chart: cozy-dashboard
namespace: cozy-dashboard
dependsOn: [cilium,kubeovn]
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
values:
kubeapps:
redis:
master:
podAnnotations:
{{- range $index, $repo := . }}
{{- with (($repo.status).artifact).revision }}
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}
- name: kamaji
releaseName: kamaji
chart: cozy-kamaji
namespace: cozy-kamaji
dependsOn: [cilium,kubeovn,cert-manager]
- name: capi-operator
releaseName: capi-operator
chart: cozy-capi-operator
namespace: cozy-cluster-api
privileged: true
dependsOn: [cilium,kubeovn,cert-manager]
- name: capi-providers
releaseName: capi-providers
chart: cozy-capi-providers
namespace: cozy-cluster-api
privileged: true
dependsOn: [cilium,kubeovn,capi-operator]

View File

@@ -0,0 +1,69 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
releases:
- name: fluxcd
releaseName: fluxcd
chart: cozy-fluxcd
namespace: cozy-fluxcd
dependsOn: []
- name: cert-manager
releaseName: cert-manager
chart: cozy-cert-manager
namespace: cozy-cert-manager
dependsOn: []
- name: cert-manager-issuers
releaseName: cert-manager-issuers
chart: cozy-cert-manager-issuers
namespace: cozy-cert-manager
dependsOn: [cert-manager]
- name: victoria-metrics-operator
releaseName: victoria-metrics-operator
chart: cozy-victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
dependsOn: [cert-manager]
- name: monitoring
releaseName: monitoring
chart: cozy-monitoring
namespace: cozy-monitoring
privileged: true
dependsOn: [victoria-metrics-operator]
- name: grafana-operator
releaseName: grafana-operator
chart: cozy-grafana-operator
namespace: cozy-grafana-operator
dependsOn: []
- name: mariadb-operator
releaseName: mariadb-operator
chart: cozy-mariadb-operator
namespace: cozy-mariadb-operator
dependsOn: [victoria-metrics-operator]
- name: postgres-operator
releaseName: postgres-operator
chart: cozy-postgres-operator
namespace: cozy-postgres-operator
dependsOn: [cert-manager]
- name: rabbitmq-operator
releaseName: rabbitmq-operator
chart: cozy-rabbitmq-operator
namespace: cozy-rabbitmq-operator
dependsOn: []
- name: redis-operator
releaseName: redis-operator
chart: cozy-redis-operator
namespace: cozy-redis-operator
dependsOn: []
- name: telepresence
releaseName: traffic-manager
chart: cozy-telepresence
namespace: cozy-telepresence
dependsOn: []

View File

@@ -0,0 +1,95 @@
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
releases:
- name: fluxcd
releaseName: fluxcd
chart: cozy-fluxcd
namespace: cozy-fluxcd
dependsOn: []
- name: cert-manager
releaseName: cert-manager
chart: cozy-cert-manager
namespace: cozy-cert-manager
dependsOn: []
- name: cert-manager-issuers
releaseName: cert-manager-issuers
chart: cozy-cert-manager-issuers
namespace: cozy-cert-manager
dependsOn: [cert-manager]
- name: victoria-metrics-operator
releaseName: victoria-metrics-operator
chart: cozy-victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
dependsOn: [cert-manager]
- name: monitoring
releaseName: monitoring
chart: cozy-monitoring
namespace: cozy-monitoring
privileged: true
dependsOn: [victoria-metrics-operator]
- name: grafana-operator
releaseName: grafana-operator
chart: cozy-grafana-operator
namespace: cozy-grafana-operator
dependsOn: []
- name: mariadb-operator
releaseName: mariadb-operator
chart: cozy-mariadb-operator
namespace: cozy-mariadb-operator
dependsOn: [cert-manager,victoria-metrics-operator]
- name: postgres-operator
releaseName: postgres-operator
chart: cozy-postgres-operator
namespace: cozy-postgres-operator
dependsOn: [cert-manager]
- name: rabbitmq-operator
releaseName: rabbitmq-operator
chart: cozy-rabbitmq-operator
namespace: cozy-rabbitmq-operator
dependsOn: []
- name: redis-operator
releaseName: redis-operator
chart: cozy-redis-operator
namespace: cozy-redis-operator
dependsOn: []
- name: piraeus-operator
releaseName: piraeus-operator
chart: cozy-piraeus-operator
namespace: cozy-linstor
dependsOn: [cert-manager]
- name: telepresence
releaseName: traffic-manager
chart: cozy-telepresence
namespace: cozy-telepresence
dependsOn: []
- name: dashboard
releaseName: dashboard
chart: cozy-dashboard
namespace: cozy-dashboard
dependsOn: []
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
values:
kubeapps:
redis:
master:
podAnnotations:
{{- range $index, $repo := . }}
{{- with (($repo.status).artifact).revision }}
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,7 +1,7 @@
{{/*
Get IP-addresses of master nodes
*/}}
{{- define "master.nodeIPs" -}}
{{- define "cozystack.master-node-ips" -}}
{{- $nodes := lookup "v1" "Node" "" "" -}}
{{- $ips := list -}}
{{- range $node := $nodes.items -}}

View File

@@ -1,38 +1,27 @@
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cilium
namespace: cozy-cilium
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: cilium
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-cilium
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
{{- $bundleName := index $cozyConfig.data "bundle-name" }}
{{- $bundle := tpl (.Files.Get (printf "bundles/%s.yaml" $bundleName)) . | fromYaml }}
{{- $dependencyNamespaces := dict }}
{{- $disabledComponents := splitList "," ((index $cozyConfig.data "bundle-disable") | default "") }}
{{/* collect dependency namespaces from releases */}}
{{- range $x := $bundle.releases }}
{{- $_ := set $dependencyNamespaces $x.name $x.namespace }}
{{- end }}
{{- range $x := $bundle.releases }}
{{- if not (has $x.name $disabledComponents) }}
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
apiVersion: helm.toolkit.fluxcd.io/v2beta2
kind: HelmRelease
metadata:
name: kubeovn
namespace: cozy-kubeovn
name: {{ $x.name }}
namespace: {{ $x.namespace }}
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubeovn
releaseName: {{ $x.releaseName | default $x.name }}
install:
remediation:
retries: -1
@@ -41,718 +30,31 @@ spec:
retries: -1
chart:
spec:
chart: cozy-kubeovn
chart: {{ $x.chart }}
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
{{- $values := dict }}
{{- with $x.values }}
{{- $values = merge . $values }}
{{- end }}
{{- with index $cozyConfig.data (printf "values-%s" $x.name) }}
{{- $values = merge (fromYaml .) $values }}
{{- end }}
{{- with $values }}
values:
cozystack:
configHash: {{ index (lookup "v1" "ConfigMap" "cozy-system" "cozystack") "data" | toJson | sha256sum }}
nodesHash: {{ include "master.nodeIPs" . | sha256sum }}
{{- toYaml . | nindent 4}}
{{- end }}
{{- with $x.dependsOn }}
dependsOn:
- name: cilium
namespace: cozy-cilium
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cozy-fluxcd
namespace: cozy-fluxcd
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: fluxcd
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-fluxcd
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager
namespace: cozy-cert-manager
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: cert-manager
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-cert-manager
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: cert-manager-issuers
namespace: cozy-cert-manager
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: cert-manager-issuers
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-cert-manager-issuers
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: victoria-metrics-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-victoria-metrics-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: monitoring
namespace: cozy-monitoring
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: monitoring
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-monitoring
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubevirt-operator
namespace: cozy-kubevirt
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubevirt-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubevirt-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubevirt
namespace: cozy-kubevirt
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubevirt
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubevirt
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: kubevirt-operator
namespace: cozy-kubevirt
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubevirt-cdi-operator
namespace: cozy-kubevirt-cdi
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubevirt-cdi-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubevirt-cdi-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kubevirt-cdi
namespace: cozy-kubevirt-cdi
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kubevirt-cdi
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kubevirt-cdi
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: kubevirt-cdi-operator
namespace: cozy-kubevirt-cdi
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: metallb
namespace: cozy-metallb
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: metallb
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-metallb
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: grafana-operator
namespace: cozy-grafana-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: grafana-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-grafana-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: mariadb-operator
namespace: cozy-mariadb-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: mariadb-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-mariadb-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
- name: victoria-metrics-operator
namespace: cozy-victoria-metrics-operator
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: postgres-operator
namespace: cozy-postgres-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: postgres-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-postgres-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: rabbitmq-operator
namespace: cozy-rabbitmq-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: rabbitmq-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-rabbitmq-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: redis-operator
namespace: cozy-redis-operator
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: redis-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-redis-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: piraeus-operator
namespace: cozy-linstor
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: piraeus-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-piraeus-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: linstor
namespace: cozy-linstor
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: linstor
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-linstor
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: piraeus-operator
namespace: cozy-linstor
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: telepresence
namespace: cozy-telepresence
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: traffic-manager
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-telepresence
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: dashboard
namespace: cozy-dashboard
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: dashboard
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-dashboard
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
{{- if .Capabilities.APIVersions.Has "source.toolkit.fluxcd.io/v1beta2" }}
{{- with (lookup "source.toolkit.fluxcd.io/v1beta2" "HelmRepository" "cozy-public" "").items }}
values:
kubeapps:
redis:
master:
podAnnotations:
{{- range $index, $repo := . }}
{{- with (($repo.status).artifact).revision }}
repository.cozystack.io/{{ $repo.metadata.name }}: {{ quote . }}
{{- end }}
{{- end }}
{{- range $dep := . }}
{{- if not (has $dep $disabledComponents) }}
- name: {{ $dep }}
namespace: {{ index $dependencyNamespaces $dep }}
{{- end }}
{{- end }}
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: kamaji
namespace: cozy-kamaji
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: kamaji
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-kamaji
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: capi-operator
namespace: cozy-cluster-api
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: capi-operator
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-capi-operator
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
- name: cert-manager
namespace: cozy-cert-manager
---
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: capi-providers
namespace: cozy-cluster-api
labels:
cozystack.io/repository: system
spec:
interval: 1m
releaseName: capi-providers
install:
remediation:
retries: -1
upgrade:
remediation:
retries: -1
chart:
spec:
chart: cozy-capi-providers
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
dependsOn:
- name: capi-operator
namespace: cozy-cluster-api
- name: cilium
namespace: cozy-cilium
- name: kubeovn
namespace: cozy-kubeovn
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,13 +1,29 @@
{{- range $ns := .Values.namespaces }}
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
{{- $bundleName := index $cozyConfig.data "bundle-name" }}
{{- $bundle := tpl (.Files.Get (printf "bundles/%s.yaml" $bundleName)) . | fromYaml }}
{{- $namespaces := dict }}
{{/* collect namespaces from releases */}}
{{- range $x := $bundle.releases }}
{{- if not (hasKey $namespaces $x.namespace) }}
{{- $_ := set $namespaces $x.namespace false }}
{{- end }}
{{/* if at least one release requires a privileged namespace, then it should be privileged */}}
{{- if or $x.privileged (index $namespaces $x.namespace) }}
{{- $_ := set $namespaces $x.namespace true }}
{{- end }}
{{- end }}
{{- range $namespace, $privileged := $namespaces }}
---
apiVersion: v1
kind: Namespace
metadata:
annotations:
"helm.sh/resource-policy": keep
{{- if $ns.privileged }}
{{- if $privileged }}
labels:
pod-security.kubernetes.io/enforce: privileged
{{- end }}
name: {{ $ns.name }}
name: {{ $namespace }}
{{- end }}

View File

@@ -1,30 +0,0 @@
namespaces:
- name: cozy-public
- name: cozy-system
privileged: true
- name: cozy-cert-manager
- name: cozy-cilium
privileged: true
- name: cozy-fluxcd
- name: cozy-grafana-operator
- name: cozy-kamaji
- name: cozy-cluster-api
privileged: true # for capk only
- name: cozy-dashboard
- name: cozy-kubeovn
privileged: true
- name: cozy-kubevirt
privileged: true
- name: cozy-kubevirt-cdi
- name: cozy-linstor
privileged: true
- name: cozy-mariadb-operator
- name: cozy-metallb
privileged: true
- name: cozy-monitoring
privileged: true
- name: cozy-postgres-operator
- name: cozy-rabbitmq-operator
- name: cozy-redis-operator
- name: cozy-telepresence
- name: cozy-victoria-metrics-operator

View File

@@ -14,4 +14,3 @@ update:
rm -rf charts && mkdir -p charts/kube-ovn
curl -sSL https://github.com/kubeovn/kube-ovn/archive/refs/heads/master.tar.gz | \
tar -C charts/kube-ovn -xzvf - --strip 2 kube-ovn-master/charts
patch -p4 < patches/cozyconfig.diff

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: kube-ovn
description: Helm chart for Kube-OVN
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.13.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.13.0"

View File

@@ -0,0 +1,42 @@
# Kube-OVN-helm
Currently supported version: 1.9
Installation :
```bash
$ kubectl label node -lbeta.kubernetes.io/os=linux kubernetes.io/os=linux --overwrite
$ kubectl label node -lnode-role.kubernetes.io/control-plane kube-ovn/role=master --overwrite
$ kubectl label node -lovn.kubernetes.io/ovs_dp_type!=userspace ovn.kubernetes.io/ovs_dp_type=kernel --overwrite
# standard install
$ helm install --debug kubeovn ./charts/kube-ovn --set MASTER_NODES=${Node0}
# high availability install
$ helm install --debug kubeovn ./charts/kube-ovn --set MASTER_NODES=${Node0},${Node1},${Node2}
# upgrade to this version
$ helm upgrade --debug kubeovn ./charts/kube-ovn --set MASTER_NODES=${Node0},${Node1},${Node2}
```
If `MASTER_NODES` unspecified Helm will take internal IPs of nodes with `kube-ovn/role=master` label
### Talos Linux
To install Kube-OVN on Talos Linux, declare openvswitch module in machine config:
```
machine:
kernel:
modules:
- name: openvswitch
```
and use the following options to install this Helm-chart:
```
--set cni_conf.MOUNT_LOCAL_BIN_DIR=false
--set OPENVSWITCH_DIR=/var/lib/openvswitch
--set OVN_DIR=/var/lib/ovn
--set DISABLE_MODULES_MANAGEMENT=true
```

View File

@@ -0,0 +1,54 @@
{{/*
Get IP-addresses of master nodes
*/}}
{{- define "kubeovn.nodeIPs" -}}
{{- $nodes := lookup "v1" "Node" "" "" -}}
{{- $ips := list -}}
{{- range $node := $nodes.items -}}
{{- $label := splitList "=" $.Values.MASTER_NODES_LABEL }}
{{- $key := index $label 0 }}
{{- $val := "" }}
{{- if eq (len $label) 2 }}
{{- $val = index $label 1 }}
{{- end }}
{{- if eq (index $node.metadata.labels $key) $val -}}
{{- range $address := $node.status.addresses -}}
{{- if eq $address.type "InternalIP" -}}
{{- $ips = append $ips $address.address -}}
{{- break -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{ join "," $ips }}
{{- end -}}
{{/*
Number of master nodes
*/}}
{{- define "kubeovn.nodeCount" -}}
{{- len (split "," (.Values.MASTER_NODES | default (include "kubeovn.nodeIPs" .))) }}
{{- end -}}
{{- define "kubeovn.ovs-ovn.updateStrategy" -}}
{{- $ds := lookup "apps/v1" "DaemonSet" $.Values.namespace "ovs-ovn" -}}
{{- if $ds -}}
{{- if eq $ds.spec.updateStrategy.type "RollingUpdate" -}}
RollingUpdate
{{- else -}}
{{- $imageVersion := (index $ds.spec.template.spec.containers 0).image | splitList ":" | last | trimPrefix "v" -}}
{{- $versionRegex := `^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)` -}}
{{- if regexMatch $versionRegex $imageVersion -}}
{{- if regexFind $versionRegex $imageVersion | semverCompare ">= 1.12.0" -}}
RollingUpdate
{{- else -}}
OnDelete
{{- end -}}
{{- else -}}
OnDelete
{{- end -}}
{{- end -}}
{{- else -}}
RollingUpdate
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,161 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovn-central
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/description: |
OVN components: northd, nb and sb.
spec:
replicas: {{ include "kubeovn.nodeCount" . }}
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: ovn-central
template:
metadata:
labels:
app: ovn-central
component: network
type: infra
spec:
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-central
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn-ovs
hostNetwork: true
containers:
- name: ovn-central
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- /kube-ovn/start-db.sh
securityContext:
capabilities:
add: ["SYS_NICE"]
env:
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: NODE_IPS
value: "{{ .Values.MASTER_NODES | default (include "kubeovn.nodeIPs" .) }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IPS
valueFrom:
fieldRef:
fieldPath: status.podIPs
- name: ENABLE_BIND_LOCAL_IP
value: "{{- .Values.func.ENABLE_BIND_LOCAL_IP }}"
- name: PROBE_INTERVAL
value: "{{ .Values.networking.PROBE_INTERVAL }}"
- name: OVN_NORTHD_PROBE_INTERVAL
value: "{{ .Values.networking.OVN_NORTHD_PROBE_INTERVAL}}"
- name: OVN_LEADER_PROBE_INTERVAL
value: "{{ .Values.networking.OVN_LEADER_PROBE_INTERVAL }}"
- name: OVN_NORTHD_N_THREADS
value: "{{ .Values.networking.OVN_NORTHD_N_THREADS }}"
- name: ENABLE_COMPACT
value: "{{ .Values.networking.ENABLE_COMPACT }}"
{{- if include "kubeovn.ovs-ovn.updateStrategy" . | eq "OnDelete" }}
- name: OVN_VERSION_COMPATIBILITY
value: "21.06"
{{- end }}
resources:
requests:
cpu: {{ index .Values "ovn-central" "requests" "cpu" }}
memory: {{ index .Values "ovn-central" "requests" "memory" }}
limits:
cpu: {{ index .Values "ovn-central" "limits" "cpu" }}
memory: {{ index .Values "ovn-central" "limits" "memory" }}
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
- /kube-ovn/ovn-healthcheck.sh
periodSeconds: 15
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
- /kube-ovn/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 15
failureThreshold: 5
timeoutSeconds: 45
nodeSelector:
kubernetes.io/os: "linux"
{{- with splitList "=" .Values.MASTER_NODES_LABEL }}
{{ index . 0 }}: "{{ if eq (len .) 2 }}{{ index . 1 }}{{ end }}"
{{- end }}
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-config-openvswitch
hostPath:
path: {{ .Values.OPENVSWITCH_DIR }}
- name: host-config-ovn
hostPath:
path: {{ .Values.OVN_DIR }}
- name: host-log-ovs
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/openvswitch
- name: host-log-ovn
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls

View File

@@ -0,0 +1,190 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-controller
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/description: |
kube-ovn controller
spec:
replicas: {{ include "kubeovn.nodeCount" . }}
selector:
matchLabels:
app: kube-ovn-controller
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-controller
component: network
type: infra
spec:
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: "ovn.kubernetes.io/ic-gw"
operator: NotIn
values:
- "true"
weight: 100
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-controller
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: kube-ovn-controller
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- /kube-ovn/start-controller.sh
- --default-ls={{ .Values.networking.DEFAULT_SUBNET }}
- --default-cidr=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.POD_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.POD_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.POD_CIDR }}
{{- end }}
- --default-gateway=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.POD_GATEWAY }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.POD_GATEWAY }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.POD_GATEWAY }}
{{- end }}
- --default-gateway-check={{- .Values.func.CHECK_GATEWAY }}
- --default-logical-gateway={{- .Values.func.LOGICAL_GATEWAY }}
- --default-u2o-interconnection={{- .Values.func.U2O_INTERCONNECTION }}
- --default-exclude-ips={{- .Values.networking.EXCLUDE_IPS }}
- --cluster-router={{ .Values.networking.DEFAULT_VPC }}
- --node-switch={{ .Values.networking.NODE_SUBNET }}
- --node-switch-cidr=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.JOIN_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.JOIN_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.JOIN_CIDR }}
{{- end }}
- --service-cluster-ip-range=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.SVC_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.SVC_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.SVC_CIDR }}
{{- end }}
- --network-type={{- .Values.networking.NETWORK_TYPE }}
- --default-provider-name={{ .Values.networking.vlan.PROVIDER_NAME }}
- --default-interface-name={{- .Values.networking.vlan.VLAN_INTERFACE_NAME }}
- --default-exchange-link-name={{- .Values.networking.EXCHANGE_LINK_NAME }}
- --default-vlan-name={{- .Values.networking.vlan.VLAN_NAME }}
- --default-vlan-id={{- .Values.networking.vlan.VLAN_ID }}
- --ls-dnat-mod-dl-dst={{- .Values.func.LS_DNAT_MOD_DL_DST }}
- --ls-ct-skip-dst-lport-ips={{- .Values.func.LS_CT_SKIP_DST_LPORT_IPS }}
- --pod-nic-type={{- .Values.networking.POD_NIC_TYPE }}
- --enable-lb={{- .Values.func.ENABLE_LB }}
- --enable-np={{- .Values.func.ENABLE_NP }}
- --enable-eip-snat={{- .Values.networking.ENABLE_EIP_SNAT }}
- --enable-external-vpc={{- .Values.func.ENABLE_EXTERNAL_VPC }}
- --enable-ecmp={{- .Values.networking.ENABLE_ECMP }}
- --logtostderr=false
- --alsologtostderr=true
- --gc-interval={{- .Values.performance.GC_INTERVAL }}
- --inspect-interval={{- .Values.performance.INSPECT_INTERVAL }}
- --log_file=/var/log/kube-ovn/kube-ovn-controller.log
- --log_file_max_size=0
- --enable-lb-svc={{- .Values.func.ENABLE_LB_SVC }}
- --keep-vm-ip={{- .Values.func.ENABLE_KEEP_VM_IP }}
- --enable-metrics={{- .Values.networking.ENABLE_METRICS }}
- --node-local-dns-ip={{- .Values.networking.NODE_LOCAL_DNS_IP }}
env:
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OVN_DB_IPS
value: "{{ .Values.MASTER_NODES | default (include "kubeovn.nodeIPs" .) }}"
- name: POD_IPS
valueFrom:
fieldRef:
fieldPath: status.podIPs
- name: ENABLE_BIND_LOCAL_IP
value: "{{- .Values.func.ENABLE_BIND_LOCAL_IP }}"
volumeMounts:
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
# ovn-ic log directory
- mountPath: /var/log/ovn
name: ovn-log
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- /kube-ovn/kube-ovn-controller-healthcheck
periodSeconds: 3
timeoutSeconds: 45
livenessProbe:
exec:
command:
- /kube-ovn/kube-ovn-controller-healthcheck
initialDelaySeconds: 300
periodSeconds: 7
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
cpu: {{ index .Values "kube-ovn-controller" "requests" "cpu" }}
memory: {{ index .Values "kube-ovn-controller" "requests" "memory" }}
limits:
cpu: {{ index .Values "kube-ovn-controller" "limits" "cpu" }}
memory: {{ index .Values "kube-ovn-controller" "limits" "memory" }}
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-log
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/kube-ovn
- name: ovn-log
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/ovn
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls

View File

@@ -0,0 +1,16 @@
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-controller
namespace: {{ .Values.namespace }}
labels:
app: kube-ovn-controller
spec:
selector:
app: kube-ovn-controller
ports:
- port: 10660
name: metrics
{{- if eq .Values.networking.NET_STACK "dual_stack" }}
ipFamilyPolicy: PreferDualStack
{{- end }}

View File

@@ -0,0 +1,109 @@
{{- if .Values.func.ENABLE_IC }}
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovn-ic-controller
namespace: kube-system
annotations:
kubernetes.io/description: |
OVN IC Client
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: ovn-ic-controller
template:
metadata:
labels:
app: ovn-ic-controller
component: network
type: infra
spec:
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-ic-controller
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: ovn
hostNetwork: true
containers:
- name: ovn-ic-controller
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/kube-ovn/start-ic-controller.sh"]
args:
- --log_file=/var/log/kube-ovn/kube-ovn-ic-controller.log
- --log_file_max_size=0
- --logtostderr=false
- --alsologtostderr=true
securityContext:
capabilities:
add: ["SYS_NICE"]
env:
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OVN_DB_IPS
value: "{{ .Values.MASTER_NODES }}"
resources:
requests:
cpu: 300m
memory: 200Mi
limits:
cpu: 3
memory: 1Gi
volumeMounts:
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
- mountPath: /var/run/tls
name: kube-ovn-tls
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
nodeSelector:
kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-config-ovn
hostPath:
path: /etc/origin/ovn
- name: host-log-ovn
hostPath:
path: /var/log/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-log
hostPath:
path: /var/log/kube-ovn
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,139 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-monitor
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/description: |
Metrics for OVN components: northd, nb and sb.
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: kube-ovn-monitor
template:
metadata:
labels:
app: kube-ovn-monitor
component: network
type: infra
spec:
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-monitor
topologyKey: kubernetes.io/hostname
priorityClassName: system-cluster-critical
serviceAccountName: kube-ovn-app
hostNetwork: true
containers:
- name: kube-ovn-monitor
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/kube-ovn/start-ovn-monitor.sh"]
args:
- --log_file=/var/log/kube-ovn/kube-ovn-monitor.log
- --logtostderr=false
- --alsologtostderr=true
- --log_file_max_size=0
securityContext:
runAsUser: 0
privileged: false
env:
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IPS
valueFrom:
fieldRef:
fieldPath: status.podIPs
- name: ENABLE_BIND_LOCAL_IP
value: "{{- .Values.func.ENABLE_BIND_LOCAL_IP }}"
resources:
requests:
cpu: {{ index .Values "kube-ovn-monitor" "requests" "cpu" }}
memory: {{ index .Values "kube-ovn-monitor" "requests" "memory" }}
limits:
cpu: {{ index .Values "kube-ovn-monitor" "limits" "cpu" }}
memory: {{ index .Values "kube-ovn-monitor" "limits" "memory" }}
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/ovn
name: host-log-ovn
readOnly: true
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /var/run/tls
name: kube-ovn-tls
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 7
successThreshold: 1
tcpSocket:
port: 10661
timeoutSeconds: 3
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 7
successThreshold: 1
tcpSocket:
port: 10661
timeoutSeconds: 3
nodeSelector:
kubernetes.io/os: "linux"
{{- with splitList "=" .Values.MASTER_NODES_LABEL }}
{{ index . 0 }}: "{{ if eq (len .) 2 }}{{ index . 1 }}{{ end }}"
{{- end }}
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-config-openvswitch
hostPath:
path: {{ .Values.OPENVSWITCH_DIR }}
- name: host-config-ovn
hostPath:
path: {{ .Values.OVN_DIR }}
- name: host-log-ovn
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
- name: kube-ovn-log
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/kube-ovn

View File

@@ -0,0 +1,18 @@
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-monitor
namespace: {{ .Values.namespace }}
labels:
app: kube-ovn-monitor
spec:
ports:
- name: metrics
port: 10661
type: ClusterIP
selector:
app: kube-ovn-monitor
sessionAffinity: None
{{- if eq .Values.networking.NET_STACK "dual_stack" }}
ipFamilyPolicy: PreferDualStack
{{- end }}

View File

@@ -0,0 +1,19 @@
kind: Service
apiVersion: v1
metadata:
name: ovn-nb
namespace: {{ .Values.namespace }}
spec:
ports:
- name: ovn-nb
protocol: TCP
port: 6641
targetPort: 6641
type: ClusterIP
{{- if eq .Values.networking.NET_STACK "dual_stack" }}
ipFamilyPolicy: PreferDualStack
{{- end }}
selector:
app: ovn-central
ovn-nb-leader: "true"
sessionAffinity: None

View File

@@ -0,0 +1,19 @@
kind: Service
apiVersion: v1
metadata:
name: ovn-northd
namespace: {{ .Values.namespace }}
spec:
ports:
- name: ovn-northd
protocol: TCP
port: 6643
targetPort: 6643
type: ClusterIP
{{- if eq .Values.networking.NET_STACK "dual_stack" }}
ipFamilyPolicy: PreferDualStack
{{- end }}
selector:
app: ovn-central
ovn-northd-leader: "true"
sessionAffinity: None

View File

@@ -0,0 +1,256 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:ovn
rules:
- apiGroups:
- "kubeovn.io"
resources:
- vpcs
- vpcs/status
- vpc-nat-gateways
- vpc-nat-gateways/status
- subnets
- subnets/status
- ippools
- ippools/status
- ips
- vips
- vips/status
- vlans
- vlans/status
- provider-networks
- provider-networks/status
- security-groups
- security-groups/status
- iptables-eips
- iptables-fip-rules
- iptables-dnat-rules
- iptables-snat-rules
- iptables-eips/status
- iptables-fip-rules/status
- iptables-dnat-rules/status
- iptables-snat-rules/status
- ovn-eips
- ovn-fips
- ovn-snat-rules
- ovn-eips/status
- ovn-fips/status
- ovn-snat-rules/status
- ovn-dnat-rules
- ovn-dnat-rules/status
- switch-lb-rules
- switch-lb-rules/status
- vpc-dnses
- vpc-dnses/status
- qos-policies
- qos-policies/status
verbs:
- "*"
- apiGroups:
- ""
resources:
- pods
- namespaces
verbs:
- get
- list
- patch
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/exec
verbs:
- create
- apiGroups:
- "k8s.cni.cncf.io"
resources:
- network-attachment-definitions
verbs:
- get
- apiGroups:
- ""
- networking.k8s.io
resources:
- networkpolicies
- configmaps
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
- apiGroups:
- ""
resources:
- services
- services/status
verbs:
- get
- list
- update
- create
- delete
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- update
- get
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
- deployments
- deployments/scale
verbs:
- get
- list
- create
- delete
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- "*"
- apiGroups:
- "kubevirt.io"
resources:
- virtualmachines
- virtualmachineinstances
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:ovn-ovs
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- patch
- apiGroups:
- ""
resources:
- services
- endpoints
verbs:
- get
- apiGroups:
- apps
resources:
- controllerrevisions
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:kube-ovn-cni
rules:
- apiGroups:
- "kubeovn.io"
- ""
resources:
- subnets
- provider-networks
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
- "kubeovn.io"
resources:
- ovn-eips
- ovn-eips/status
- nodes
verbs:
- get
- list
- patch
- watch
- apiGroups:
- "kubeovn.io"
resources:
- ips
verbs:
- get
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:kube-ovn-app
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
verbs:
- get
- list
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get

View File

@@ -0,0 +1,54 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn
roleRef:
name: system:ovn
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: {{ .Values.namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn-ovs
roleRef:
name: system:ovn-ovs
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn-ovs
namespace: {{ .Values.namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-ovn-cni
roleRef:
name: system:kube-ovn-cni
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: kube-ovn-cni
namespace: {{ .Values.namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-ovn-app
roleRef:
name: system:kube-ovn-app
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: kube-ovn-app
namespace: {{ .Values.namespace }}

View File

@@ -0,0 +1,164 @@
{{- if .Values.HYBRID_DPDK }}
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovs-ovn-dpdk
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: ovs-dpdk
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: ovs-dpdk
component: network
type: infra
spec:
tolerations:
- operator: Exists
priorityClassName: system-node-critical
serviceAccountName: ovn-ovs
hostNetwork: true
hostPID: true
containers:
- name: openvswitch
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}-dpdk
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/kube-ovn/start-ovs-dpdk-v2.sh"]
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HW_OFFLOAD
value: "{{- .Values.func.HW_OFFLOAD }}"
- name: TUNNEL_TYPE
value: "{{- .Values.networking.TUNNEL_TYPE }}"
- name: DPDK_TUNNEL_IFACE
value: "{{- .Values.networking.DPDK_TUNNEL_IFACE }}"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OVN_DB_IPS
value: "{{ .Values.MASTER_NODES | default (include "kubeovn.nodeIPs" .) }}"
- name: OVN_REMOTE_PROBE_INTERVAL
value: "{{ .Values.networking.OVN_REMOTE_PROBE_INTERVAL }}"
- name: OVN_REMOTE_OPENFLOW_INTERVAL
value: "{{ .Values.networking.OVN_REMOTE_OPENFLOW_INTERVAL }}"
volumeMounts:
- mountPath: /opt/ovs-config
name: host-config-ovs
- name: shareddir
mountPath: {{ .Values.kubelet_conf.KUBELET_DIR }}/pods
- name: hugepage
mountPath: /dev/hugepages
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /var/run/openvswitch
name: host-run-ovs
mountPropagation: HostToContainer
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /sys
name: host-sys
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /var/run/tls
name: kube-ovn-tls
readinessProbe:
exec:
command:
- bash
- -c
- LOG_ROTATE=true /kube-ovn/ovs-healthcheck.sh
periodSeconds: 5
timeoutSeconds: 45
livenessProbe:
exec:
command:
- bash
- /kube-ovn/ovs-healthcheck.sh
initialDelaySeconds: 60
periodSeconds: 5
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
cpu: {{ index .Values "ovs-ovn" "requests" "cpu" }}
memory: {{ index .Values "ovs-ovn" "requests" "memory" }}
limits:
cpu: {{ index .Values "ovs-ovn" "limits" "cpu" }}
{{.Values.HUGEPAGE_SIZE_TYPE}}: {{.Values.HUGEPAGES}}
memory: {{ index .Values "ovs-ovn" "limits" "memory" }}
nodeSelector:
kubernetes.io/os: "linux"
ovn.kubernetes.io/ovs_dp_type: "userspace"
volumes:
- name: host-config-ovs
hostPath:
path: /opt/ovs-config
type: DirectoryOrCreate
- name: shareddir
hostPath:
path: {{ .Values.kubelet_conf.KUBELET_DIR }}/pods
type: ''
- name: hugepage
emptyDir:
medium: HugePages
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: {{ .Values.OPENVSWITCH_DIR }}
- name: host-config-ovn
hostPath:
path: {{ .Values.OVN_DIR }}
- name: host-log-ovs
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/openvswitch
- name: host-log-ovn
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
{{- end }}

View File

@@ -0,0 +1,34 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovn
namespace: {{ .Values.namespace }}
{{- if .Values.global.registry.imagePullSecrets }}
imagePullSecrets:
{{- range $index, $secret := .Values.global.registry.imagePullSecrets }}
{{- if $secret }}
- name: {{ $secret | quote}}
{{- end }}
{{- end }}
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovn-ovs
namespace: {{ .Values.namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-ovn-cni
namespace: {{ .Values.namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-ovn-app
namespace: {{ .Values.namespace }}

View File

@@ -0,0 +1,23 @@
{{- if .Values.networking.ENABLE_SSL }}
{{- $cn := "ovn" -}}
{{- $ca := genCA "ovn-ca" 3650 -}}
---
apiVersion: v1
kind: Secret
metadata:
name: kube-ovn-tls
namespace: {{ .Values.namespace }}
data:
{{- $existingSecret := lookup "v1" "Secret" .Values.namespace "kube-ovn-tls" }}
{{- if $existingSecret }}
cacert: {{ index $existingSecret.data "cacert" }}
cert: {{ index $existingSecret.data "cert" }}
key: {{ index $existingSecret.data "key" }}
{{- else }}
{{- with genSignedCert $cn nil nil 3650 $ca }}
cacert: {{ b64enc $ca.Cert }}
cert: {{ b64enc .Cert }}
key: {{ b64enc .Key }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,206 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-cni
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/description: |
This daemon set launches the kube-ovn cni daemon.
spec:
selector:
matchLabels:
app: kube-ovn-cni
template:
metadata:
labels:
app: kube-ovn-cni
component: network
type: infra
spec:
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
priorityClassName: system-node-critical
serviceAccountName: kube-ovn-cni
hostNetwork: true
hostPID: true
initContainers:
- name: install-cni
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command: ["/kube-ovn/install-cni.sh"]
securityContext:
runAsUser: 0
privileged: true
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-bin
{{- if .Values.cni_conf.MOUNT_LOCAL_BIN_DIR }}
- mountPath: /usr/local/bin
name: local-bin
{{- end }}
containers:
- name: cni-server
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- bash
- /kube-ovn/start-cniserver.sh
args:
- --enable-mirror={{- .Values.debug.ENABLE_MIRROR }}
- --mirror-iface={{- .Values.debug.MIRROR_IFACE }}
- --node-switch={{ .Values.networking.NODE_SUBNET }}
- --encap-checksum=true
- --service-cluster-ip-range=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.SVC_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.SVC_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.SVC_CIDR }}
{{- end }}
{{- if eq .Values.networking.NETWORK_TYPE "vlan" }}
- --iface=
{{- else}}
- --iface={{- .Values.networking.IFACE }}
{{- end }}
- --dpdk-tunnel-iface={{- .Values.networking.DPDK_TUNNEL_IFACE }}
- --network-type={{- .Values.networking.TUNNEL_TYPE }}
- --default-interface-name={{- .Values.networking.vlan.VLAN_INTERFACE_NAME }}
- --cni-conf-dir={{ .Values.cni_conf.CNI_CONF_DIR }}
- --cni-conf-file={{ .Values.cni_conf.CNI_CONF_FILE }}
- --cni-conf-name={{- .Values.cni_conf.CNI_CONFIG_PRIORITY -}}-kube-ovn.conflist
- --logtostderr=false
- --alsologtostderr=true
- --log_file=/var/log/kube-ovn/kube-ovn-cni.log
- --log_file_max_size=0
- --enable-metrics={{- .Values.networking.ENABLE_METRICS }}
- --kubelet-dir={{ .Values.kubelet_conf.KUBELET_DIR }}
- --enable-tproxy={{ .Values.func.ENABLE_TPROXY }}
- --ovs-vsctl-concurrency={{ .Values.performance.OVS_VSCTL_CONCURRENCY }}
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_IPS
valueFrom:
fieldRef:
fieldPath: status.podIPs
- name: ENABLE_BIND_LOCAL_IP
value: "{{- .Values.func.ENABLE_BIND_LOCAL_IP }}"
- name: DBUS_SYSTEM_BUS_ADDRESS
value: "unix:path=/host/var/run/dbus/system_bus_socket"
volumeMounts:
- name: host-modules
mountPath: /lib/modules
readOnly: true
- name: shared-dir
mountPath: {{ .Values.kubelet_conf.KUBELET_DIR }}/pods
- mountPath: /etc/openvswitch
name: systemid
readOnly: true
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /run/openvswitch
name: host-run-ovs
mountPropagation: Bidirectional
- mountPath: /run/ovn
name: host-run-ovn
- mountPath: /host/var/run/dbus
name: host-dbus
mountPropagation: HostToContainer
- mountPath: /var/run/netns
name: host-ns
mountPropagation: HostToContainer
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
readOnly: true
readinessProbe:
failureThreshold: 3
periodSeconds: 7
successThreshold: 1
tcpSocket:
port: 10665
timeoutSeconds: 3
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 7
successThreshold: 1
tcpSocket:
port: 10665
timeoutSeconds: 3
resources:
requests:
cpu: {{ index .Values "kube-ovn-cni" "requests" "cpu" }}
memory: {{ index .Values "kube-ovn-cni" "requests" "memory" }}
limits:
cpu: {{ index .Values "kube-ovn-cni" "limits" "cpu" }}
memory: {{ index .Values "kube-ovn-cni" "limits" "memory" }}
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: shared-dir
hostPath:
path: {{ .Values.kubelet_conf.KUBELET_DIR }}/pods
- name: systemid
hostPath:
path: {{ .Values.OPENVSWITCH_DIR }}
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: cni-conf
hostPath:
path: {{ .Values.cni_conf.CNI_CONF_DIR }}
- name: cni-bin
hostPath:
path: {{ .Values.cni_conf.CNI_BIN_DIR }}
- name: host-ns
hostPath:
path: /var/run/netns
- name: host-dbus
hostPath:
path: /var/run/dbus
- name: kube-ovn-log
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/kube-ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: host-log-ovs
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/openvswitch
- name: host-log-ovn
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/ovn
{{- if .Values.cni_conf.MOUNT_LOCAL_BIN_DIR }}
- name: local-bin
hostPath:
path: {{ .Values.cni_conf.MOUNT_LOCAL_BIN_DIR }}
{{- end }}

View File

@@ -0,0 +1,16 @@
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-cni
namespace: {{ .Values.namespace }}
labels:
app: kube-ovn-cni
spec:
selector:
app: kube-ovn-cni
ports:
- port: 10665
name: metrics
{{- if eq .Values.networking.NET_STACK "dual_stack" }}
ipFamilyPolicy: PreferDualStack
{{- end }}

View File

@@ -0,0 +1,221 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovs-ovn
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
chart-version: "{{ .Chart.Name }}-{{ .Chart.Version }}"
spec:
selector:
matchLabels:
app: ovs
updateStrategy:
type: {{ include "kubeovn.ovs-ovn.updateStrategy" . }}
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
template:
metadata:
labels:
app: ovs
component: network
type: infra
annotations:
chart-version: "{{ .Chart.Name }}-{{ .Chart.Version }}"
spec:
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
priorityClassName: system-node-critical
serviceAccountName: ovn-ovs
hostNetwork: true
hostPID: true
containers:
- name: openvswitch
{{- if .Values.DPDK }}
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.dpdkRepository }}:{{ .Values.DPDK_VERSION }}-{{ .Values.global.images.kubeovn.tag }}
{{- else }}
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.DPDK }}
command: ["/kube-ovn/start-ovs-dpdk.sh"]
{{- else }}
command:
{{- if .Values.DISABLE_MODULES_MANAGEMENT }}
- /bin/sh
- -ec
- |
ln -sf /bin/true /usr/sbin/modprobe
ln -sf /bin/true /usr/sbin/modinfo
ln -sf /bin/true /usr/sbin/rmmod
exec /kube-ovn/start-ovs.sh
{{- else }}
- /kube-ovn/start-ovs.sh
{{- end }}
{{- end }}
securityContext:
runAsUser: 0
privileged: true
env:
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: HW_OFFLOAD
value: "{{- .Values.func.HW_OFFLOAD }}"
- name: TUNNEL_TYPE
value: "{{- .Values.networking.TUNNEL_TYPE }}"
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: OVN_DB_IPS
value: "{{ .Values.MASTER_NODES | default (include "kubeovn.nodeIPs" .) }}"
- name: OVN_REMOTE_PROBE_INTERVAL
value: "{{ .Values.networking.OVN_REMOTE_PROBE_INTERVAL }}"
- name: OVN_REMOTE_OPENFLOW_INTERVAL
value: "{{ .Values.networking.OVN_REMOTE_OPENFLOW_INTERVAL }}"
volumeMounts:
- mountPath: /var/run/netns
name: host-ns
mountPropagation: HostToContainer
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /etc/ovn
name: host-config-ovn
- mountPath: /var/log/openvswitch
name: host-log-ovs
- mountPath: /var/log/ovn
name: host-log-ovn
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /var/run/tls
name: kube-ovn-tls
- mountPath: /var/run/containerd
name: cruntime
readOnly: true
{{- if .Values.DPDK }}
- mountPath: /opt/ovs-config
name: host-config-ovs
- mountPath: /dev/hugepages
name: hugepage
{{- end }}
readinessProbe:
exec:
{{- if .Values.DPDK }}
command:
- bash
- /kube-ovn/ovs-dpdk-healthcheck.sh
{{- else }}
command:
- bash
- -c
- LOG_ROTATE=true /kube-ovn/ovs-healthcheck.sh
{{- end }}
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 45
livenessProbe:
exec:
{{- if .Values.DPDK }}
command:
- bash
- /kube-ovn/ovs-dpdk-healthcheck.sh
{{- else }}
command:
- bash
- /kube-ovn/ovs-healthcheck.sh
{{- end }}
initialDelaySeconds: 60
periodSeconds: 5
failureThreshold: 5
timeoutSeconds: 45
resources:
requests:
{{- if .Values.DPDK }}
cpu: {{ .Values.DPDK_CPU }}
memory: {{ .Values.DPDK_MEMORY }}
{{- else }}
cpu: {{ index .Values "ovs-ovn" "requests" "cpu" }}
memory: {{ index .Values "ovs-ovn" "requests" "memory" }}
{{- end }}
limits:
{{- if .Values.DPDK }}
cpu: {{ .Values.DPDK_CPU }}
memory: {{ .Values.DPDK_MEMORY }}
hugepages-1Gi: 1Gi
{{- else }}
cpu: {{ index .Values "ovs-ovn" "limits" "cpu" }}
memory: {{ index .Values "ovs-ovn" "limits" "memory" }}
{{- end }}
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-config-openvswitch
hostPath:
path: {{ .Values.OPENVSWITCH_DIR }}
- name: host-config-ovn
hostPath:
path: {{ .Values.OVN_DIR }}
- name: host-log-ovs
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/openvswitch
- name: host-log-ovn
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
- name: host-ns
hostPath:
path: /var/run/netns
- hostPath:
path: /var/run/containerd
name: cruntime
{{- if .Values.DPDK }}
- name: host-config-ovs
hostPath:
path: /opt/ovs-config
type: DirectoryOrCreate
- name: hugepage
emptyDir:
medium: HugePages
{{- end }}

View File

@@ -0,0 +1,137 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-pinger
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: kube-ovn-pinger
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-pinger
component: network
type: infra
spec:
priorityClassName: system-node-critical
tolerations:
- effect: NoSchedule
operator: Exists
- effect: NoExecute
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: kube-ovn-app
hostPID: true
containers:
- name: pinger
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
command:
- /kube-ovn/kube-ovn-pinger
args:
- --external-address=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.PINGER_EXTERNAL_ADDRESS }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.PINGER_EXTERNAL_ADDRESS }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.PINGER_EXTERNAL_ADDRESS }}
{{- end }}
- --external-dns=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.PINGER_EXTERNAL_DOMAIN }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.PINGER_EXTERNAL_DOMAIN }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.PINGER_EXTERNAL_DOMAIN }}
{{- end }}
- --ds-namespace={{ .Values.namespace }}
- --logtostderr=false
- --alsologtostderr=true
- --log_file=/var/log/kube-ovn/kube-ovn-pinger.log
- --log_file_max_size=0
- --enable-metrics={{- .Values.networking.ENABLE_METRICS }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
runAsUser: 0
privileged: false
env:
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: HOST_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /var/run/ovn
name: host-run-ovn
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /var/log/openvswitch
name: host-log-ovs
readOnly: true
- mountPath: /var/log/ovn
name: host-log-ovn
readOnly: true
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /etc/localtime
name: localtime
readOnly: true
- mountPath: /var/run/tls
name: kube-ovn-tls
resources:
requests:
cpu: {{ index .Values "kube-ovn-pinger" "requests" "cpu" }}
memory: {{ index .Values "kube-ovn-pinger" "requests" "memory" }}
limits:
cpu: {{ index .Values "kube-ovn-pinger" "limits" "cpu" }}
memory: {{ index .Values "kube-ovn-pinger" "limits" "memory" }}
nodeSelector:
kubernetes.io/os: "linux"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-run-ovn
hostPath:
path: /run/ovn
- name: host-config-openvswitch
hostPath:
path: {{ .Values.OPENVSWITCH_DIR }}
- name: host-log-ovs
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/openvswitch
- name: kube-ovn-log
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/kube-ovn
- name: host-log-ovn
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/ovn
- name: localtime
hostPath:
path: /etc/localtime
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls

View File

@@ -0,0 +1,16 @@
kind: Service
apiVersion: v1
metadata:
name: kube-ovn-pinger
namespace: {{ .Values.namespace }}
labels:
app: kube-ovn-pinger
spec:
selector:
app: kube-ovn-pinger
ports:
- port: 8080
name: metrics
{{- if eq .Values.networking.NET_STACK "dual_stack" }}
ipFamilyPolicy: PreferDualStack
{{- end }}

View File

@@ -0,0 +1,123 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-ovn-pre-delete-hook
namespace: {{ .Values.namespace }}
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "1"
"helm.sh/hook-delete-policy": hook-succeeded
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "2"
"helm.sh/hook-delete-policy": hook-succeeded
name: system:kube-ovn-pre-delete-hook
rules:
- apiGroups:
- kubeovn.io
resources:
- subnets
verbs:
- get
- list
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kube-ovn-pre-delete-hook
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "3"
"helm.sh/hook-delete-policy": hook-succeeded
roleRef:
name: system:kube-ovn-pre-delete-hook
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: kube-ovn-pre-delete-hook
namespace: {{ .Values.namespace }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: "{{ .Chart.Name }}-pre-delete-hook"
namespace: {{ .Values.namespace }}
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": pre-delete
"helm.sh/hook-weight": "4"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
completions: 1
template:
metadata:
name: "{{ .Release.Name }}"
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app: kube-ovn-pre-delete-hook
component: job
spec:
tolerations:
- key: ""
operator: "Exists"
effect: "NoSchedule"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- kube-ovn-pre-delete-hook
- key: component
operator: In
values:
- job
restartPolicy: Never
hostNetwork: true
nodeSelector:
kubernetes.io/os: "linux"
serviceAccount: kube-ovn-pre-delete-hook
serviceAccountName: kube-ovn-pre-delete-hook
containers:
- name: remove-subnet-finalizer
image: "{{ .Values.global.registry.address}}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- sh
- -c
- /kube-ovn/remove-subnet-finalizer.sh 2>&1 | tee -a /var/log/kube-ovn/remove-subnet-finalizer.log
volumeMounts:
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
volumes:
- name: kube-ovn-log
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/kube-ovn

View File

@@ -0,0 +1,19 @@
kind: Service
apiVersion: v1
metadata:
name: ovn-sb
namespace: {{ .Values.namespace }}
spec:
ports:
- name: ovn-sb
protocol: TCP
port: 6642
targetPort: 6642
type: ClusterIP
{{- if eq .Values.networking.NET_STACK "dual_stack" }}
ipFamilyPolicy: PreferDualStack
{{- end }}
selector:
app: ovn-central
ovn-sb-leader: "true"
sessionAffinity: None

View File

@@ -0,0 +1,163 @@
{{- if eq (include "kubeovn.ovs-ovn.updateStrategy" .) "OnDelete" }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovs-ovn-upgrade
namespace: {{ .Values.namespace }}
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": post-upgrade
"helm.sh/hook-weight": "1"
"helm.sh/hook-delete-policy": hook-succeeded
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": post-upgrade
"helm.sh/hook-weight": "2"
"helm.sh/hook-delete-policy": hook-succeeded
name: system:ovs-ovn-upgrade
rules:
- apiGroups:
- apps
resources:
- daemonsets
resourceNames:
- ovs-ovn
verbs:
- get
- apiGroups:
- apps
resources:
- deployments
resourceNames:
- ovn-central
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- get
- watch
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovs-ovn-upgrade
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": post-upgrade
"helm.sh/hook-weight": "3"
"helm.sh/hook-delete-policy": hook-succeeded
roleRef:
name: system:ovs-ovn-upgrade
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovs-ovn-upgrade
namespace: {{ .Values.namespace }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: "{{ .Chart.Name }}-post-upgrade-hook"
namespace: {{ .Values.namespace }}
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
app.kubernetes.io/version: {{ .Chart.AppVersion }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
annotations:
# This is what defines this resource as a hook. Without this line, the
# job is considered part of the release.
"helm.sh/hook": post-upgrade
"helm.sh/hook-weight": "4"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
completions: 1
template:
metadata:
name: "{{ .Release.Name }}"
labels:
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
app: post-upgrade
component: job
spec:
tolerations:
- key: ""
operator: "Exists"
effect: "NoSchedule"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- post-upgrade
- key: component
operator: In
values:
- job
restartPolicy: Never
hostNetwork: true
nodeSelector:
kubernetes.io/os: "linux"
serviceAccount: ovs-ovn-upgrade
serviceAccountName: ovs-ovn-upgrade
containers:
- name: ovs-ovn-upgrade
image: "{{ .Values.global.registry.address}}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_SSL
value: "{{ .Values.networking.ENABLE_SSL }}"
- name: OVN_DB_IPS
value: "{{ .Values.MASTER_NODES | default (include "kubeovn.nodeIPs" .) }}"
command:
- bash
- -eo
- pipefail
- -c
- /kube-ovn/upgrade-ovs.sh 2>&1 | tee -a /var/log/kube-ovn/upgrade-ovs.log
volumeMounts:
- mountPath: /var/log/kube-ovn
name: kube-ovn-log
- mountPath: /var/run/tls
name: kube-ovn-tls
volumes:
- name: kube-ovn-log
hostPath:
path: {{ .Values.log_conf.LOG_DIR }}/kube-ovn
- name: kube-ovn-tls
secret:
optional: true
secretName: kube-ovn-tls
{{ end }}

View File

@@ -0,0 +1,10 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: ovn-vpc-nat-config
namespace: {{ .Values.namespace }}
annotations:
kubernetes.io/description: |
kube-ovn vpc-nat common config
data:
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.vpcRepository }}:{{ .Values.global.images.kubeovn.tag }}

View File

@@ -0,0 +1,181 @@
# Default values for kubeovn.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
registry:
address: docker.io/kubeovn
imagePullSecrets: []
images:
kubeovn:
repository: kube-ovn
dpdkRepository: kube-ovn-dpdk
vpcRepository: vpc-nat-gateway
tag: v1.13.0
support_arm: true
thirdparty: true
image:
pullPolicy: IfNotPresent
namespace: kube-system
MASTER_NODES: ""
MASTER_NODES_LABEL: "kube-ovn/role=master"
networking:
# NET_STACK could be dual_stack, ipv4, ipv6
NET_STACK: ipv4
ENABLE_SSL: false
# network type could be geneve or vlan
NETWORK_TYPE: geneve
# tunnel type could be geneve, vxlan or stt
TUNNEL_TYPE: geneve
IFACE: ""
DPDK_TUNNEL_IFACE: "br-phy"
EXCLUDE_IPS: ""
POD_NIC_TYPE: "veth-pair"
vlan:
PROVIDER_NAME: "provider"
VLAN_INTERFACE_NAME: ""
VLAN_NAME: "ovn-vlan"
VLAN_ID: "100"
EXCHANGE_LINK_NAME: false
ENABLE_EIP_SNAT: true
DEFAULT_SUBNET: "ovn-default"
DEFAULT_VPC: "ovn-cluster"
NODE_SUBNET: "join"
ENABLE_ECMP: false
ENABLE_METRICS: true
NODE_LOCAL_DNS_IP: ""
PROBE_INTERVAL: 180000
OVN_NORTHD_PROBE_INTERVAL: 5000
OVN_LEADER_PROBE_INTERVAL: 5
OVN_REMOTE_PROBE_INTERVAL: 10000
OVN_REMOTE_OPENFLOW_INTERVAL: 180
OVN_NORTHD_N_THREADS: 1
ENABLE_COMPACT: false
func:
ENABLE_LB: true
ENABLE_NP: true
ENABLE_EIP_SNAT: true
ENABLE_EXTERNAL_VPC: true
HW_OFFLOAD: false
ENABLE_LB_SVC: false
ENABLE_KEEP_VM_IP: true
LS_DNAT_MOD_DL_DST: true
LS_CT_SKIP_DST_LPORT_IPS: true
CHECK_GATEWAY: true
LOGICAL_GATEWAY: false
ENABLE_BIND_LOCAL_IP: true
U2O_INTERCONNECTION: false
ENABLE_TPROXY: false
ENABLE_IC: false
ipv4:
POD_CIDR: "10.16.0.0/16"
POD_GATEWAY: "10.16.0.1"
SVC_CIDR: "10.96.0.0/12"
JOIN_CIDR: "100.64.0.0/16"
PINGER_EXTERNAL_ADDRESS: "1.1.1.1"
PINGER_EXTERNAL_DOMAIN: "alauda.cn."
ipv6:
POD_CIDR: "fd00:10:16::/112"
POD_GATEWAY: "fd00:10:16::1"
SVC_CIDR: "fd00:10:96::/112"
JOIN_CIDR: "fd00:100:64::/112"
PINGER_EXTERNAL_ADDRESS: "2606:4700:4700::1111"
PINGER_EXTERNAL_DOMAIN: "google.com."
dual_stack:
POD_CIDR: "10.16.0.0/16,fd00:10:16::/112"
POD_GATEWAY: "10.16.0.1,fd00:10:16::1"
SVC_CIDR: "10.96.0.0/12,fd00:10:96::/112"
JOIN_CIDR: "100.64.0.0/16,fd00:100:64::/112"
PINGER_EXTERNAL_ADDRESS: "1.1.1.1,2606:4700:4700::1111"
PINGER_EXTERNAL_DOMAIN: "google.com."
performance:
GC_INTERVAL: 360
INSPECT_INTERVAL: 20
OVS_VSCTL_CONCURRENCY: 100
debug:
ENABLE_MIRROR: false
MIRROR_IFACE: "mirror0"
cni_conf:
CNI_CONFIG_PRIORITY: "01"
CNI_CONF_DIR: "/etc/cni/net.d"
CNI_BIN_DIR: "/opt/cni/bin"
CNI_CONF_FILE: "/kube-ovn/01-kube-ovn.conflist"
LOCAL_BIN_DIR: "/usr/local/bin"
MOUNT_LOCAL_BIN_DIR: false
kubelet_conf:
KUBELET_DIR: "/var/lib/kubelet"
log_conf:
LOG_DIR: "/var/log"
OPENVSWITCH_DIR: "/etc/origin/openvswitch"
OVN_DIR: "/etc/origin/ovn"
DISABLE_MODULES_MANAGEMENT: false
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
# hybrid dpdk
HYBRID_DPDK: false
HUGEPAGE_SIZE_TYPE: hugepages-2Mi # Default
HUGEPAGES: 1Gi
# DPDK
DPDK: false
DPDK_VERSION: "19.11"
DPDK_CPU: "1000m" # Default CPU configuration
DPDK_MEMORY: "2Gi" # Default Memory configuration
ovn-central:
requests:
cpu: "300m"
memory: "200Mi"
limits:
cpu: "3"
memory: "4Gi"
ovs-ovn:
requests:
cpu: "200m"
memory: "200Mi"
limits:
cpu: "2"
memory: "1000Mi"
kube-ovn-controller:
requests:
cpu: "200m"
memory: "200Mi"
limits:
cpu: "1000m"
memory: "1Gi"
kube-ovn-cni:
requests:
cpu: "100m"
memory: "100Mi"
limits:
cpu: "1000m"
memory: "1Gi"
kube-ovn-pinger:
requests:
cpu: "100m"
memory: "100Mi"
limits:
cpu: "200m"
memory: "400Mi"
kube-ovn-monitor:
requests:
cpu: "200m"
memory: "200Mi"
limits:
cpu: "200m"
memory: "200Mi"

View File

@@ -52,19 +52,46 @@ spec:
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
- /kube-ovn/start-controller.sh
- --default-ls={{ .Values.networking.DEFAULT_SUBNET }}
- --default-cidr={{ index $cozyConfig.data "ipv4-pod-cidr" }}
- --default-gateway={{ index $cozyConfig.data "ipv4-pod-gateway" }}
- --default-cidr=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.POD_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.POD_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.POD_CIDR }}
{{- end }}
- --default-gateway=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.POD_GATEWAY }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.POD_GATEWAY }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.POD_GATEWAY }}
{{- end }}
- --default-gateway-check={{- .Values.func.CHECK_GATEWAY }}
- --default-logical-gateway={{- .Values.func.LOGICAL_GATEWAY }}
- --default-u2o-interconnection={{- .Values.func.U2O_INTERCONNECTION }}
- --default-exclude-ips={{- .Values.networking.EXCLUDE_IPS }}
- --cluster-router={{ .Values.networking.DEFAULT_VPC }}
- --node-switch={{ .Values.networking.NODE_SUBNET }}
- --node-switch-cidr={{ index $cozyConfig.data "ipv4-join-cidr" }}
- --service-cluster-ip-range={{ index $cozyConfig.data "ipv4-svc-cidr" }}
- --node-switch-cidr=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.JOIN_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.JOIN_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.JOIN_CIDR }}
{{- end }}
- --service-cluster-ip-range=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.SVC_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.SVC_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.SVC_CIDR }}
{{- end }}
- --network-type={{- .Values.networking.NETWORK_TYPE }}
- --default-provider-name={{ .Values.networking.vlan.PROVIDER_NAME }}
- --default-interface-name={{- .Values.networking.vlan.VLAN_INTERFACE_NAME }}

View File

@@ -51,12 +51,18 @@ spec:
- bash
- /kube-ovn/start-cniserver.sh
args:
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
- --enable-mirror={{- .Values.debug.ENABLE_MIRROR }}
- --mirror-iface={{- .Values.debug.MIRROR_IFACE }}
- --node-switch={{ .Values.networking.NODE_SUBNET }}
- --encap-checksum=true
- --service-cluster-ip-range={{ index $cozyConfig.data "ipv4-svc-cidr" }}
- --service-cluster-ip-range=
{{- if eq .Values.networking.NET_STACK "dual_stack" -}}
{{ .Values.dual_stack.SVC_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv4" -}}
{{ .Values.ipv4.SVC_CIDR }}
{{- else if eq .Values.networking.NET_STACK "ipv6" -}}
{{ .Values.ipv6.SVC_CIDR }}
{{- end }}
{{- if eq .Values.networking.NETWORK_TYPE "vlan" }}
- --iface=
{{- else}}

View File

@@ -70,6 +70,10 @@ func:
ENABLE_TPROXY: false
ipv4:
POD_CIDR: "10.16.0.0/16"
POD_GATEWAY: "10.16.0.1"
SVC_CIDR: "10.96.0.0/12"
JOIN_CIDR: "100.64.0.0/16"
PINGER_EXTERNAL_ADDRESS: "1.1.1.1"
PINGER_EXTERNAL_DOMAIN: "alauda.cn."

View File

@@ -1,97 +0,0 @@
diff --git a/packages/system/kubeovn/charts/kube-ovn/templates/ovncni-ds.yaml b/packages/system/kubeovn/charts/kube-ovn/templates/ovncni-ds.yaml
index d9a9a67..b2e12dd 100644
--- a/packages/system/kubeovn/charts/kube-ovn/templates/ovncni-ds.yaml
+++ b/packages/system/kubeovn/charts/kube-ovn/templates/ovncni-ds.yaml
@@ -51,18 +51,12 @@ spec:
- bash
- /kube-ovn/start-cniserver.sh
args:
+ {{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
- --enable-mirror={{- .Values.debug.ENABLE_MIRROR }}
- --mirror-iface={{- .Values.debug.MIRROR_IFACE }}
- --node-switch={{ .Values.networking.NODE_SUBNET }}
- --encap-checksum=true
- - --service-cluster-ip-range=
- {{- if eq .Values.networking.NET_STACK "dual_stack" -}}
- {{ .Values.dual_stack.SVC_CIDR }}
- {{- else if eq .Values.networking.NET_STACK "ipv4" -}}
- {{ .Values.ipv4.SVC_CIDR }}
- {{- else if eq .Values.networking.NET_STACK "ipv6" -}}
- {{ .Values.ipv6.SVC_CIDR }}
- {{- end }}
+ - --service-cluster-ip-range={{ index $cozyConfig.data "ipv4-svc-cidr" }}
{{- if eq .Values.networking.NETWORK_TYPE "vlan" }}
- --iface=
{{- else}}
diff --git a/packages/system/kubeovn/charts/kube-ovn/templates/controller-deploy.yaml b/packages/system/kubeovn/charts/kube-ovn/templates/controller-deploy.yaml
index 0e69494..756eb7c 100644
--- a/packages/system/kubeovn/charts/kube-ovn/templates/controller-deploy.yaml
+++ b/packages/system/kubeovn/charts/kube-ovn/templates/controller-deploy.yaml
@@ -52,46 +52,19 @@ spec:
image: {{ .Values.global.registry.address }}/{{ .Values.global.images.kubeovn.repository }}:{{ .Values.global.images.kubeovn.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
+ {{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
- /kube-ovn/start-controller.sh
- --default-ls={{ .Values.networking.DEFAULT_SUBNET }}
- - --default-cidr=
- {{- if eq .Values.networking.NET_STACK "dual_stack" -}}
- {{ .Values.dual_stack.POD_CIDR }}
- {{- else if eq .Values.networking.NET_STACK "ipv4" -}}
- {{ .Values.ipv4.POD_CIDR }}
- {{- else if eq .Values.networking.NET_STACK "ipv6" -}}
- {{ .Values.ipv6.POD_CIDR }}
- {{- end }}
- - --default-gateway=
- {{- if eq .Values.networking.NET_STACK "dual_stack" -}}
- {{ .Values.dual_stack.POD_GATEWAY }}
- {{- else if eq .Values.networking.NET_STACK "ipv4" -}}
- {{ .Values.ipv4.POD_GATEWAY }}
- {{- else if eq .Values.networking.NET_STACK "ipv6" -}}
- {{ .Values.ipv6.POD_GATEWAY }}
- {{- end }}
+ - --default-cidr={{ index $cozyConfig.data "ipv4-pod-cidr" }}
+ - --default-gateway={{ index $cozyConfig.data "ipv4-pod-gateway" }}
- --default-gateway-check={{- .Values.func.CHECK_GATEWAY }}
- --default-logical-gateway={{- .Values.func.LOGICAL_GATEWAY }}
- --default-u2o-interconnection={{- .Values.func.U2O_INTERCONNECTION }}
- --default-exclude-ips={{- .Values.networking.EXCLUDE_IPS }}
- --cluster-router={{ .Values.networking.DEFAULT_VPC }}
- --node-switch={{ .Values.networking.NODE_SUBNET }}
- - --node-switch-cidr=
- {{- if eq .Values.networking.NET_STACK "dual_stack" -}}
- {{ .Values.dual_stack.JOIN_CIDR }}
- {{- else if eq .Values.networking.NET_STACK "ipv4" -}}
- {{ .Values.ipv4.JOIN_CIDR }}
- {{- else if eq .Values.networking.NET_STACK "ipv6" -}}
- {{ .Values.ipv6.JOIN_CIDR }}
- {{- end }}
- - --service-cluster-ip-range=
- {{- if eq .Values.networking.NET_STACK "dual_stack" -}}
- {{ .Values.dual_stack.SVC_CIDR }}
- {{- else if eq .Values.networking.NET_STACK "ipv4" -}}
- {{ .Values.ipv4.SVC_CIDR }}
- {{- else if eq .Values.networking.NET_STACK "ipv6" -}}
- {{ .Values.ipv6.SVC_CIDR }}
- {{- end }}
+ - --node-switch-cidr={{ index $cozyConfig.data "ipv4-join-cidr" }}
+ - --service-cluster-ip-range={{ index $cozyConfig.data "ipv4-svc-cidr" }}
- --network-type={{- .Values.networking.NETWORK_TYPE }}
- --default-provider-name={{ .Values.networking.vlan.PROVIDER_NAME }}
- --default-interface-name={{- .Values.networking.vlan.VLAN_INTERFACE_NAME }}
diff --git a/packages/system/kubeovn/charts/kube-ovn/values.yaml b/packages/system/kubeovn/charts/kube-ovn/values.yaml
index bfffc4d..b880749 100644
--- a/packages/system/kubeovn/charts/kube-ovn/values.yaml
+++ b/packages/system/kubeovn/charts/kube-ovn/values.yaml
@@ -70,10 +70,6 @@ func:
ENABLE_TPROXY: false
ipv4:
- POD_CIDR: "10.16.0.0/16"
- POD_GATEWAY: "10.16.0.1"
- SVC_CIDR: "10.96.0.0/12"
- JOIN_CIDR: "100.64.0.0/16"
PINGER_EXTERNAL_ADDRESS: "1.1.1.1"
PINGER_EXTERNAL_DOMAIN: "alauda.cn."

View File

@@ -12,6 +12,12 @@ kube-ovn:
func:
ENABLE_NP: false
ipv4:
POD_CIDR: "10.244.0.0/16"
POD_GATEWAY: "10.244.0.1"
SVC_CIDR: "10.96.0.0/16"
JOIN_CIDR: "100.64.0.0/16"
MASTER_NODES_LABEL: "node-role.kubernetes.io/control-plane"
networking:
ENABLE_SSL: true

View File

@@ -1,19 +1,17 @@
apiVersion: v2
appVersion: v0.0.27
appVersion: v0.0.22
description: Run and operate MariaDB in a cloud native way
home: https://github.com/mariadb-operator/mariadb-operator
icon: https://mariadb-operator.github.io/mariadb-operator/assets/mariadb_profile.svg
icon: https://mariadb-operator.github.io/mariadb-operator/assets/mariadb.png
keywords:
- mariadb
- mysql
- operator
- mariadb-operator
- database
- maxscale
kubeVersion: '>= 1.16.0-0'
maintainers:
- email: mariadb-operator@proton.me
name: mmontes11
name: mariadb-operator
type: application
version: 0.27.0
version: 0.22.0

View File

@@ -3,10 +3,10 @@
[//]: # (README.md generated by gotmpl. DO NOT EDIT.)
<p align="center">
<img src="https://mariadb-operator.github.io/mariadb-operator/assets/mariadb-operator_centered_whitebg.svg" alt="mariadb" width="100%"/>
<img src="https://mariadb-operator.github.io/mariadb-operator/assets/mariadb-operator.png" alt="mariadb" width="250"/>
</p>
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.27.0](https://img.shields.io/badge/Version-0.27.0-informational?style=flat-square) ![AppVersion: v0.0.27](https://img.shields.io/badge/AppVersion-v0.0.27-informational?style=flat-square)
![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.22.0](https://img.shields.io/badge/Version-0.22.0-informational?style=flat-square) ![AppVersion: v0.0.22](https://img.shields.io/badge/AppVersion-v0.0.22-informational?style=flat-square)
Run and operate MariaDB in a cloud native way
@@ -26,50 +26,20 @@ helm uninstall mariadb-operator
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Affinity to add to controller Pod |
| certController.affinity | object | `{}` | Affinity to add to controller Pod |
| certController.caValidity | string | `"35064h"` | CA certificate validity. It must be greater than certValidity. |
| certController.certValidity | string | `"8766h"` | Certificate validity. |
| certController.enabled | bool | `true` | Specifies whether the cert-controller should be created. |
| certController.extrArgs | list | `[]` | Extra arguments to be passed to the cert-controller entrypoint |
| certController.extraVolumeMounts | list | `[]` | Extra volumes to mount to cert-controller container |
| certController.extraVolumes | list | `[]` | Extra volumes to pass to cert-controller Pod |
| certController.ha.enabled | bool | `false` | Enable high availability |
| certController.ha.replicas | int | `3` | Number of replicas |
| certController.image.pullPolicy | string | `"IfNotPresent"` | |
| certController.image.repository | string | `"ghcr.io/mariadb-operator/mariadb-operator"` | |
| certController.image.tag | string | `""` | Image tag to use. By default the chart appVersion is used |
| certController.imagePullSecrets | list | `[]` | |
| certController.lookaheadValidity | string | `"2160h"` | Duration used to verify whether a certificate is valid or not. |
| certController.nodeSelector | object | `{}` | Node selectors to add to controller Pod |
| certController.podAnnotations | object | `{}` | Annotations to add to cert-controller Pod |
| certController.podSecurityContext | object | `{}` | Security context to add to cert-controller Pod |
| certController.requeueDuration | string | `"5m"` | Requeue duration to ensure that certificate gets renewed. |
| certController.resources | object | `{}` | Resources to add to cert-controller container |
| certController.securityContext | object | `{}` | Security context to add to cert-controller container |
| certController.serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
| certController.serviceAccount.automount | bool | `true` | Automounts the service account token in all containers of the Pod |
| certController.serviceAccount.enabled | bool | `true` | Specifies whether a service account should be created |
| certController.serviceAccount.extraLabels | object | `{}` | Extra Labels to add to the service account |
| certController.serviceAccount.name | string | `""` | The name of the service account to use. If not set and enabled is true, a name is generated using the fullname template |
| certController.serviceMonitor.additionalLabels | object | `{}` | Labels to be added to the cert-controller ServiceMonitor |
| certController.serviceMonitor.enabled | bool | `true` | Enable cert-controller ServiceMonitor. Metrics must be enabled |
| certController.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics |
| certController.serviceMonitor.scrapeTimeout | string | `"25s"` | Timeout if metrics can't be retrieved in given time interval |
| certController.tolerations | list | `[]` | Tolerations to add to controller Pod |
| clusterName | string | `"cluster.local"` | Cluster DNS name |
| extrArgs | list | `[]` | Extra arguments to be passed to the controller entrypoint |
| extraEnv | list | `[]` | Extra environment variables to be passed to the controller |
| extraVolumeMounts | list | `[]` | Extra volumes to mount to the container. |
| extraVolumes | list | `[]` | Extra volumes to pass to pod. |
| fullnameOverride | string | `""` | |
| ha.enabled | bool | `false` | Enable high availability |
| ha.leaseId | string | `"mariadb.mmontes.io"` | Lease resource name to be used for leader election |
| ha.replicas | int | `3` | Number of replicas |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"ghcr.io/mariadb-operator/mariadb-operator"` | |
| image.tag | string | `""` | Image tag to use. By default the chart appVersion is used |
| imagePullSecrets | list | `[]` | |
| logLevel | string | `"INFO"` | Controller log level |
| metrics.enabled | bool | `false` | Enable operator internal metrics. Prometheus must be installed in the cluster |
| metrics.enabled | bool | `false` | Enable prometheus metrics. Prometheus must be installed in the cluster |
| metrics.serviceMonitor.additionalLabels | object | `{}` | Labels to be added to the controller ServiceMonitor |
| metrics.serviceMonitor.enabled | bool | `true` | Enable controller ServiceMonitor |
| metrics.serviceMonitor.interval | string | `"30s"` | Interval to scrape metrics |
@@ -89,19 +59,16 @@ helm uninstall mariadb-operator
| tolerations | list | `[]` | Tolerations to add to controller Pod |
| webhook.affinity | object | `{}` | Affinity to add to controller Pod |
| webhook.annotations | object | `{}` | Annotations for webhook configurations. |
| webhook.cert.caPath | string | `"/tmp/k8s-webhook-server/certificate-authority"` | Path where the CA certificate will be mounted. |
| webhook.cert.certManager.duration | string | `""` | Duration to be used in the Certificate resource, |
| webhook.cert.certManager.enabled | bool | `false` | Whether to use cert-manager to issue and rotate the certificate. If set to false, mariadb-operator's cert-controller will be used instead. |
| webhook.cert.certManager.issuerRef | object | `{}` | Issuer reference to be used in the Certificate resource. If not provided, a self-signed issuer will be used. |
| webhook.cert.certManager.renewBefore | string | `""` | Renew before duration to be used in the Certificate resource. |
| webhook.cert.path | string | `"/tmp/k8s-webhook-server/serving-certs"` | Path where the certificate will be mounted. |
| webhook.cert.secretAnnotations | object | `{}` | Annotatioms to be added to webhook TLS secret. |
| webhook.cert.secretLabels | object | `{}` | Labels to be added to webhook TLS secret. |
| webhook.certificate.certManager | bool | `false` | Use cert-manager to issue and rotate the certificate. If set to false, a default certificate will be used. |
| webhook.certificate.default | object | `{"annotations":{},"caExpirationDays":365,"certExpirationDays":365,"hook":""}` | Default certificate generated when the chart is installed or upgraded. |
| webhook.certificate.default.annotations | object | `{}` | Annotations for certificate Secret. |
| webhook.certificate.default.caExpirationDays | int | `365` | Certificate authority expiration in days. |
| webhook.certificate.default.certExpirationDays | int | `365` | Certificate expiration in days. |
| webhook.certificate.default.hook | string | `""` | Helm hook to be added to the default certificate. |
| webhook.certificate.path | string | `"/tmp/k8s-webhook-server/serving-certs"` | Path where the certificate will be mounted. |
| webhook.extrArgs | list | `[]` | Extra arguments to be passed to the webhook entrypoint |
| webhook.extraVolumeMounts | list | `[]` | Extra volumes to mount to webhook container |
| webhook.extraVolumes | list | `[]` | Extra volumes to pass to webhook Pod |
| webhook.ha.enabled | bool | `false` | Enable high availability |
| webhook.ha.replicas | int | `3` | Number of replicas |
| webhook.hostNetwork | bool | `false` | Expose the webhook server in the host network |
| webhook.image.pullPolicy | string | `"IfNotPresent"` | |
| webhook.image.repository | string | `"ghcr.io/mariadb-operator/mariadb-operator"` | |
@@ -110,7 +77,7 @@ helm uninstall mariadb-operator
| webhook.nodeSelector | object | `{}` | Node selectors to add to controller Pod |
| webhook.podAnnotations | object | `{}` | Annotations to add to webhook Pod |
| webhook.podSecurityContext | object | `{}` | Security context to add to webhook Pod |
| webhook.port | int | `9443` | Port to be used by the webhook server |
| webhook.port | int | `10250` | Port to be used by the webhook server |
| webhook.resources | object | `{}` | Resources to add to webhook container |
| webhook.securityContext | object | `{}` | Security context to add to webhook container |
| webhook.serviceAccount.annotations | object | `{}` | Annotations to add to the service account |

View File

@@ -4,7 +4,7 @@
[//]: # (README.md generated by gotmpl. DO NOT EDIT.)
<p align="center">
<img src="https://mariadb-operator.github.io/mariadb-operator/assets/mariadb-operator_centered_whitebg.svg" alt="mariadb" width="100%"/>
<img src="https://mariadb-operator.github.io/mariadb-operator/assets/mariadb-operator.png" alt="mariadb" width="250"/>
</p>
{{ template "chart.typeBadge" . }}{{ template "chart.versionBadge" . }}{{ template "chart.appVersionBadge" . }}

View File

@@ -71,23 +71,28 @@ app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Cert-controller common labels
Webhook certificate
*/}}
{{- define "mariadb-operator-cert-controller.labels" -}}
helm.sh/chart: {{ include "mariadb-operator.chart" . }}
{{ include "mariadb-operator-cert-controller.selectorLabels" . }}
{{ if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{ end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- define "mariadb-operator-webhook.certificate" -}}
{{- if .Values.webhook.certificate.certManager }}
{{- include "mariadb-operator.fullname" . }}-webhook-cert
{{- else }}
{{- include "mariadb-operator.fullname" . }}-webhook-default-cert
{{- end }}
{{- end }}
{{/*
Cert-controller selector labels
Webhook certificate subject name
*/}}
{{- define "mariadb-operator-cert-controller.selectorLabels" -}}
app.kubernetes.io/name: {{ include "mariadb-operator.name" . }}-cert-controller
app.kubernetes.io/instance: {{ .Release.Name }}
{{- define "mariadb-operator-webhook.subjectName" -}}
{{- include "mariadb-operator.fullname" . }}-webhook.{{ .Release.Namespace }}.svc
{{- end }}
{{/*
Webhook certificate subject alternative name
*/}}
{{- define "mariadb-operator-webhook.altName" -}}
{{- include "mariadb-operator.fullname" . }}-webhook.{{ .Release.Namespace }}.svc.{{ .Values.clusterName }}
{{- end }}
{{/*
@@ -111,14 +116,3 @@ Create the name of the webhook service account to use
{{- default "default" .Values.webhook.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create the name of the cert-controller service account to use
*/}}
{{- define "mariadb-operator-cert-controller.serviceAccountName" -}}
{{- if .Values.certController.serviceAccount.enabled }}
{{- default (printf "%s-cert-controller" (include "mariadb-operator.fullname" .)) .Values.certController.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.certController.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,103 +0,0 @@
{{- if and .Values.certController.enabled (not .Values.webhook.cert.certManager.enabled) -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "mariadb-operator.fullname" . }}-cert-controller
labels:
{{ include "mariadb-operator-cert-controller.labels" . | nindent 4 }}
spec:
{{ if .Values.certController.ha.enabled }}
replicas: {{ .Values.certController.ha.replicas}}
{{ end }}
selector:
matchLabels:
{{ include "mariadb-operator-cert-controller.selectorLabels" . | nindent 6 }}
template:
metadata:
{{ with .Values.certController.podAnnotations }}
annotations:
{{ toYaml . | nindent 8 }}
{{ end }}
labels:
{{ include "mariadb-operator-cert-controller.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.certController.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "mariadb-operator-cert-controller.serviceAccountName" . }}-cert-controller
automountServiceAccountToken: {{ .Values.certController.serviceAccount.automount }}
{{ with .Values.certController.nodeSelector }}
nodeSelector:
{{ toYaml . | nindent 8 }}
{{ end }}
{{ with .Values.certController.tolerations }}
tolerations:
{{ toYaml . | nindent 8 }}
{{ end }}
{{ with .Values.certController.affinity }}
affinity:
{{ toYaml . | nindent 8 }}
{{ end }}
{{ with .Values.certController.podSecurityContext }}
securityContext:
{{ toYaml . | nindent 8 }}
{{ end }}
containers:
- image: "{{ .Values.certController.image.repository }}:{{ .Values.certController.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.certController.image.pullPolicy }}
name: cert-controller
args:
- cert-controller
- --ca-secret-name={{ include "mariadb-operator.fullname" . }}-webhook-ca
- --ca-secret-namespace={{ .Release.Namespace }}
- --ca-validity={{ .Values.certController.caValidity }}
- --cert-secret-name={{ include "mariadb-operator.fullname" . }}-webhook-cert
- --cert-secret-namespace={{ .Release.Namespace }}
- --cert-validity={{ .Values.certController.certValidity }}
- --lookahead-validity={{ .Values.certController.lookaheadValidity }}
- --service-name={{ include "mariadb-operator.fullname" . }}-webhook
- --service-namespace={{ .Release.Namespace }}
- --requeue-duration={{ .Values.certController.requeueDuration }}
- --metrics-addr=:8080
- --health-addr=:8081
- --log-level={{ .Values.logLevel }}
{{- if .Values.certController.ha.enabled }}
- --leader-elect
{{- end }}
{{- range .Values.certController.extrArgs }}
- {{ . }}
{{- end }}
ports:
- containerPort: 8080
protocol: TCP
name: metrics
- containerPort: 8081
protocol: TCP
name: health
env:
- name: CLUSTER_NAME
value: {{ .Values.clusterName }}
{{- with .Values.certController.extraVolumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 20
periodSeconds: 5
{{ with .Values.certController.resources }}
resources:
{{ toYaml . | nindent 12 }}
{{ end }}
{{ with .Values.certController.securityContext}}
securityContext:
{{ toYaml . | nindent 12 }}
{{ end }}
{{- with .Values.certController.extraVolumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -1,88 +0,0 @@
{{- if and .Values.rbac.enabled .Values.certController.enabled (not .Values.webhook.cert.certManager.enabled) -}}
{{ $fullName := include "mariadb-operator.fullname" . }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ $fullName }}-cert-controller
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ $fullName }}-cert-controller
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs:
- get
- list
- update
- patch
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- list
- patch
- watch
- apiGroups:
- ""
resources:
- endpoints
- endpoints/restricted
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ $fullName }}-cert-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ $fullName }}-cert-controller
subjects:
- kind: ServiceAccount
name: {{ include "mariadb-operator-cert-controller.serviceAccountName" . }}-cert-controller
namespace: {{ .Release.Namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ $fullName }}-cert-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $fullName }}-cert-controller
subjects:
- kind: ServiceAccount
name: {{ include "mariadb-operator-cert-controller.serviceAccountName" . }}-cert-controller
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -1,15 +0,0 @@
{{- if and .Values.certController.enabled (not .Values.webhook.cert.certManager.enabled) -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "mariadb-operator-cert-controller.serviceAccountName" . }}-cert-controller
labels:
{{- include "mariadb-operator-cert-controller.labels" . | nindent 4 }}
{{- with .Values.certController.serviceAccount.extraLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.certController.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -1,36 +0,0 @@
{{ if and .Values.certController.enabled (not .Values.webhook.cert.certManager.enabled) .Values.metrics.enabled .Values.certController.serviceMonitor.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "mariadb-operator.fullname" . }}-cert-controller-metrics
labels:
{{ include "mariadb-operator-cert-controller.labels" . | nindent 4 }}
spec:
ports:
- port: 8080
protocol: TCP
name: metrics
selector:
{{ include "mariadb-operator-cert-controller.selectorLabels" . | nindent 4 }}
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "mariadb-operator.fullname" . }}-cert-controller
labels:
{{ include "mariadb-operator-cert-controller.labels" . | nindent 4 }}
{{ with .Values.certController.serviceMonitor.additionalLabels }}
{{ toYaml . | nindent 4 }}
{{ end }}
spec:
selector:
matchLabels:
{{ include "mariadb-operator-cert-controller.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace | quote }}
endpoints:
- port: metrics
interval: {{ .Values.certController.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.certController.serviceMonitor.scrapeTimeout }}
{{ end }}

View File

@@ -1,13 +0,0 @@
apiVersion: v1
data:
MARIADB_GALERA_AGENT_IMAGE: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27
MARIADB_GALERA_INIT_IMAGE: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27
MARIADB_GALERA_LIB_PATH: /usr/lib/galera/libgalera_smm.so
MARIADB_OPERATOR_IMAGE: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27
RELATED_IMAGE_EXPORTER: prom/mysqld-exporter:v0.15.1
RELATED_IMAGE_MARIADB: mariadb:11.2.2
RELATED_IMAGE_MAXSCALE: mariadb/maxscale:23.08
kind: ConfigMap
metadata:
creationTimestamp: null
name: mariadb-operator-env

View File

@@ -53,17 +53,17 @@ spec:
{{- if .Values.ha.enabled }}
- --leader-elect
{{- end }}
{{- range .Values.extraArgs }}
{{- if .Values.metrics.enabled }}
- --service-monitor-reconciler
{{- end }}
{{- range .Values.extrArgs }}
- {{ . }}
{{- end }}
ports:
- containerPort: 8080
protocol: TCP
name: metrics
envFrom:
- configMapRef:
name: mariadb-operator-env
env:
env:
- name: CLUSTER_NAME
value: {{ .Values.clusterName }}
- name: MARIADB_OPERATOR_NAME
@@ -76,9 +76,6 @@ spec:
fieldPath: metadata.namespace
- name: MARIADB_OPERATOR_SA_PATH
value: /var/run/secrets/kubernetes.io/serviceaccount/token
{{- with .Values.extraEnv }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.extraVolumeMounts }}
volumeMounts:
{{- toYaml .Values.extraVolumeMounts | nindent 12 }}
@@ -91,6 +88,21 @@ spec:
securityContext:
{{ toYaml . | nindent 12 }}
{{ end }}
readinessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 30
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 30
periodSeconds: 10
startupProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 20
periodSeconds: 10
{{- if .Values.extraVolumes }}
volumes:
{{- toYaml .Values.extraVolumes | nindent 8 }}

View File

@@ -56,15 +56,6 @@ rules:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- patch
- watch
- apiGroups:
- ""
resources:
- endpoints/restricted
verbs:
- create
@@ -99,12 +90,6 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pvcs
verbs:
- list
- apiGroups:
- ""
resources:
@@ -132,38 +117,16 @@ rules:
- list
- patch
- watch
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- list
- patch
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- create
- delete
- get
- list
- patch
- watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- batch
resources:
@@ -179,12 +142,11 @@ rules:
- jobs
verbs:
- create
- delete
- list
- patch
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- backups
verbs:
@@ -196,13 +158,13 @@ rules:
- update
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- backups/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- backups/status
verbs:
@@ -210,7 +172,7 @@ rules:
- patch
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- connections
verbs:
@@ -222,37 +184,23 @@ rules:
- update
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- connections
- grants
- maxscale
- restores
- users
verbs:
- create
- list
- patch
- watch
- apiGroups:
- k8s.mariadb.com
resources:
- connections
- grants
- users
verbs:
- create
- list
- patch
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- connections/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- connections/status
verbs:
@@ -260,7 +208,7 @@ rules:
- patch
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- databases
verbs:
@@ -272,13 +220,13 @@ rules:
- update
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- databases/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- databases/status
verbs:
@@ -286,7 +234,7 @@ rules:
- patch
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- grants
verbs:
@@ -298,13 +246,13 @@ rules:
- update
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- grants/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- grants/status
verbs:
@@ -312,7 +260,7 @@ rules:
- patch
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- mariadbs
verbs:
@@ -324,13 +272,13 @@ rules:
- update
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- mariadbs/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- mariadbs/status
verbs:
@@ -338,33 +286,7 @@ rules:
- patch
- update
- apiGroups:
- k8s.mariadb.com
resources:
- maxscales
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- k8s.mariadb.com
resources:
- maxscales/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
resources:
- maxscales/status
verbs:
- get
- patch
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- restores
verbs:
@@ -376,13 +298,13 @@ rules:
- update
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- restores/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- restores/status
verbs:
@@ -390,7 +312,7 @@ rules:
- patch
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- sqljobs
verbs:
@@ -402,13 +324,13 @@ rules:
- update
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- sqljobs/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- sqljobs/status
verbs:
@@ -416,7 +338,7 @@ rules:
- patch
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- users
verbs:
@@ -428,13 +350,13 @@ rules:
- update
- watch
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- users/finalizers
verbs:
- update
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
resources:
- users/status
verbs:
@@ -509,4 +431,4 @@ subjects:
- kind: ServiceAccount
name: {{ include "mariadb-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}
{{- end }}

View File

@@ -1,5 +1,4 @@
{{ if .Values.webhook.cert.certManager.enabled }}
{{ if not .Values.webhook.cert.certManager.issuerRef }}
{{ if .Values.webhook.certificate.certManager }}
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
@@ -8,7 +7,6 @@ metadata:
{{ include "mariadb-operator-webhook.labels" . | nindent 4 }}
spec:
selfSigned: {}
{{ end }}
---
apiVersion: cert-manager.io/v1
kind: Certificate
@@ -17,33 +15,11 @@ metadata:
labels:
{{ include "mariadb-operator-webhook.labels" . | nindent 4 }}
spec:
commonName: {{ include "mariadb-operator.fullname" . }}-webhook.{{ .Release.Namespace }}.svc
dnsNames:
- {{ include "mariadb-operator.fullname" . }}-webhook.{{ .Release.Namespace }}.svc.{{ .Values.clusterName }}
- {{ include "mariadb-operator.fullname" . }}-webhook.{{ .Release.Namespace }}.svc
- {{ include "mariadb-operator.fullname" . }}-webhook.{{ .Release.Namespace }}
- {{ include "mariadb-operator.fullname" . }}-webhook
- {{ include "mariadb-operator-webhook.subjectName" . }}
- {{ include "mariadb-operator-webhook.altName" . }}
issuerRef:
{{- if .Values.webhook.cert.certManager.issuerRef -}}
{{ toYaml .Values.webhook.cert.certManager.issuerRef | nindent 4 }}
{{- else }}
kind: Issuer
name: {{ include "mariadb-operator.fullname" . }}-selfsigned-issuer
{{- end }}
{{- with .Values.webhook.cert.certManager.duration }}
duration: {{ . | quote }}
{{- end }}
{{- with .Values.webhook.cert.certManager.renewBefore }}
renewBefore: {{ . | quote }}
{{- end }}
secretName: {{ include "mariadb-operator.fullname" . }}-webhook-cert
secretTemplate:
{{- with .Values.webhook.cert.secretLabels }}
labels:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.webhook.cert.secretAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{ end }}
{{ end }}

View File

@@ -1,4 +1,30 @@
{{ $fullName := include "mariadb-operator.fullname" . }}
{{ $subjectName := include "mariadb-operator-webhook.subjectName" . }}
{{ $altNames := list }}
{{ $altNames := append $altNames $subjectName }}
{{ $altNames := append $altNames (include "mariadb-operator-webhook.altName" . ) }}
{{ $ca := genCA $fullName (.Values.webhook.certificate.default.caExpirationDays | int) }}
{{ $cert := genSignedCert $subjectName nil $altNames (.Values.webhook.certificate.default.certExpirationDays | int) $ca }}
{{ if not .Values.webhook.certificate.certManager }}
apiVersion: v1
kind: Secret
type: kubernetes.io/tls
metadata:
name: {{ $fullName }}-webhook-default-cert
labels:
{{ include "mariadb-operator-webhook.labels" . | nindent 4 }}
annotations:
{{ with .Values.webhook.certificate.default.hook }}
helm.sh/hook: {{ . }}
{{ end }}
{{ with .Values.webhook.certificate.default.annotations }}
{{ toYaml . | nindent 4 }}
{{ end }}
data:
tls.crt: {{ $cert.Cert | b64enc }}
tls.key: {{ $cert.Key | b64enc }}
{{ end }}
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
@@ -6,11 +32,12 @@ metadata:
labels:
{{ include "mariadb-operator-webhook.labels" . | nindent 4 }}
annotations:
{{- if .Values.webhook.cert.certManager.enabled }}
{{ if .Values.webhook.certificate.certManager }}
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "mariadb-operator.fullname" . }}-webhook-cert
{{- else }}
k8s.mariadb.com/webhook: ""
{{- end }}
{{ end }}
{{ with .Values.webhook.certificate.default.hook }}
helm.sh/hook: {{ . }}
{{ end }}
{{ with .Values.webhook.annotations }}
{{ toYaml . | indent 4 }}
{{ end }}
@@ -21,12 +48,15 @@ webhooks:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /mutate-k8s-mariadb-com-v1alpha1-mariadb
path: /mutate-mariadb-mmontes-io-v1alpha1-mariadb
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: mmariadb.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:
@@ -43,11 +73,12 @@ metadata:
labels:
{{ include "mariadb-operator-webhook.labels" . | nindent 4 }}
annotations:
{{- if .Values.webhook.cert.certManager.enabled }}
{{ if .Values.webhook.certificate.certManager }}
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "mariadb-operator.fullname" . }}-webhook-cert
{{- else }}
k8s.mariadb.com/webhook: ""
{{- end }}
{{ end }}
{{ with .Values.webhook.certificate.default.hook }}
helm.sh/hook: {{ . }}
{{ end }}
{{ with .Values.webhook.annotations }}
{{ toYaml . | indent 4 }}
{{ end }}
@@ -58,12 +89,15 @@ webhooks:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-backup
path: /validate-mariadb-mmontes-io-v1alpha1-backup
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: vbackup.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:
@@ -78,12 +112,15 @@ webhooks:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-connection
path: /validate-mariadb-mmontes-io-v1alpha1-connection
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: vconnection.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:
@@ -98,12 +135,15 @@ webhooks:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-database
path: /validate-mariadb-mmontes-io-v1alpha1-database
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: vdatabase.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:
@@ -118,12 +158,15 @@ webhooks:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-grant
path: /validate-mariadb-mmontes-io-v1alpha1-grant
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: vgrant.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:
@@ -138,12 +181,15 @@ webhooks:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-mariadb
path: /validate-mariadb-mmontes-io-v1alpha1-mariadb
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: vmariadb.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:
@@ -152,38 +198,21 @@ webhooks:
resources:
- mariadbs
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-maxscale
failurePolicy: Fail
name: vmaxscale.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- maxscales
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-restore
path: /validate-mariadb-mmontes-io-v1alpha1-restore
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: vrestore.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:
@@ -198,12 +227,15 @@ webhooks:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-sqljob
path: /validate-mariadb-mmontes-io-v1alpha1-sqljob
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: vsqljob.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:
@@ -218,12 +250,15 @@ webhooks:
service:
name: {{ $fullName }}-webhook
namespace: {{ .Release.Namespace }}
path: /validate-k8s-mariadb-com-v1alpha1-user
path: /validate-mariadb-mmontes-io-v1alpha1-user
{{ if not .Values.webhook.certificate.certManager }}
caBundle: {{ $ca.Cert | b64enc }}
{{ end }}
failurePolicy: Fail
name: vuser.kb.io
rules:
- apiGroups:
- k8s.mariadb.com
- mariadb.mmontes.io
apiVersions:
- v1alpha1
operations:

View File

@@ -1,14 +1,10 @@
{{ $fullName := include "mariadb-operator.fullname" . }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ $fullName }}-webhook
name: {{ include "mariadb-operator.fullname" . }}-webhook
labels:
{{ include "mariadb-operator-webhook.labels" . | nindent 4 }}
spec:
{{ if .Values.webhook.ha.enabled }}
replicas: {{ .Values.webhook.ha.replicas}}
{{ end }}
selector:
matchLabels:
{{ include "mariadb-operator-webhook.selectorLabels" . | nindent 6 }}
@@ -50,18 +46,12 @@ spec:
name: webhook
args:
- webhook
{{- if .Values.webhook.cert.certManager.enabled }}
- --ca-cert-path={{ .Values.webhook.cert.path }}/ca.crt
{{- else }}
- --ca-cert-path={{ .Values.webhook.cert.caPath }}/tls.crt
{{- end }}
- --cert-dir={{ .Values.webhook.cert.path }}
- --dns-name={{ $fullName }}-webhook.{{ .Release.Namespace }}.svc
- --cert-dir={{ .Values.webhook.certificate.path }}
- --port={{ .Values.webhook.port }}
- --metrics-addr=:8080
- --health-addr=:8081
- --log-level={{ .Values.logLevel }}
{{- range .Values.webhook.extrArgs }}
{{- range .Values.extrArgs }}
- {{ . }}
{{- end }}
ports:
@@ -75,12 +65,7 @@ spec:
protocol: TCP
name: health
volumeMounts:
{{- if not .Values.webhook.cert.certManager.enabled }}
- mountPath: {{ .Values.webhook.cert.caPath }}
name: ca
readOnly: true
{{- end }}
- mountPath: {{ .Values.webhook.cert.path }}
- mountPath: {{ .Values.webhook.certificate.path }}
name: cert
readOnly: true
{{- if .Values.webhook.extraVolumeMounts }}
@@ -88,10 +73,22 @@ spec:
{{- end }}
readinessProbe:
httpGet:
path: /readyz
path: /healthz
port: 8081
initialDelaySeconds: 20
periodSeconds: 5
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
startupProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
{{ with .Values.webhook.resources }}
resources:
{{ toYaml . | nindent 12 }}
@@ -101,16 +98,10 @@ spec:
{{ toYaml . | nindent 12 }}
{{ end }}
volumes:
{{- if not .Values.webhook.cert.certManager.enabled }}
- name: ca
secret:
defaultMode: 420
secretName: {{ $fullName }}-webhook-ca
{{- end }}
- name: cert
secret:
defaultMode: 420
secretName: {{ $fullName }}-webhook-cert
secretName: {{ include "mariadb-operator-webhook.certificate" . }}
{{- if .Values.webhook.extraVolumes }}
{{- toYaml .Values.webhook.extraVolumes | nindent 8 }}
{{- end }}

View File

@@ -1,25 +0,0 @@
{{- if not .Values.webhook.cert.certManager.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "mariadb-operator.fullname" . }}-webhook-ca
labels:
{{- include "mariadb-operator-webhook.labels" . | nindent 4 }}
mariadb-operator.io/component: webhook
{{- with .Values.webhook.cert.secretAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "mariadb-operator.fullname" . }}-webhook-cert
labels:
{{- include "mariadb-operator-webhook.labels" . | nindent 4 }}
mariadb-operator.io/component: webhook
{{- with .Values.webhook.cert.secretAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -19,9 +19,11 @@ ha:
enabled: false
# -- Number of replicas
replicas: 3
# -- Lease resource name to be used for leader election
leaseId: mariadb.mmontes.io
metrics:
# -- Enable operator internal metrics. Prometheus must be installed in the cluster
# -- Enable prometheus metrics. Prometheus must be installed in the cluster
enabled: false
serviceMonitor:
# -- Enable controller ServiceMonitor
@@ -54,9 +56,6 @@ rbac:
# -- Extra arguments to be passed to the controller entrypoint
extrArgs: []
# -- Extra environment variables to be passed to the controller
extraEnv: []
# -- Extra volumes to pass to pod.
extraVolumes: []
@@ -88,37 +87,31 @@ tolerations: []
affinity: {}
webhook:
# -- Annotations for webhook configurations.
annotations: {}
image:
repository: ghcr.io/mariadb-operator/mariadb-operator
pullPolicy: IfNotPresent
# -- Image tag to use. By default the chart appVersion is used
tag: ""
imagePullSecrets: []
ha:
# -- Enable high availability
enabled: false
# -- Number of replicas
replicas: 3
cert:
certManager:
# -- Whether to use cert-manager to issue and rotate the certificate. If set to false, mariadb-operator's cert-controller will be used instead.
enabled: false
# -- Issuer reference to be used in the Certificate resource. If not provided, a self-signed issuer will be used.
issuerRef: {}
# -- Duration to be used in the Certificate resource,
duration: ""
# -- Renew before duration to be used in the Certificate resource.
renewBefore: ""
# -- Annotatioms to be added to webhook TLS secret.
secretAnnotations: {}
# -- Labels to be added to webhook TLS secret.
secretLabels: {}
# -- Path where the CA certificate will be mounted.
caPath: /tmp/k8s-webhook-server/certificate-authority
certificate:
# -- Use cert-manager to issue and rotate the certificate. If set to false, a default certificate will be used.
certManager: false
# -- Default certificate generated when the chart is installed or upgraded.
default:
# -- Certificate authority expiration in days.
caExpirationDays: 365
# -- Certificate expiration in days.
certExpirationDays: 365
# -- Annotations for certificate Secret.
annotations: {}
# -- Helm hook to be added to the default certificate.
hook: ""
# -- Path where the certificate will be mounted.
path: /tmp/k8s-webhook-server/serving-certs
# -- Port to be used by the webhook server
port: 9443
port: 10250
# -- Expose the webhook server in the host network
hostNetwork: false
serviceMonitor:
@@ -143,8 +136,6 @@ webhook:
# -- The name of the service account to use.
# If not set and enabled is true, a name is generated using the fullname template
name: ""
# -- Annotations for webhook configurations.
annotations: {}
# -- Extra arguments to be passed to the webhook entrypoint
extrArgs: []
# -- Extra volumes to pass to webhook Pod
@@ -168,71 +159,3 @@ webhook:
tolerations: []
# -- Affinity to add to controller Pod
affinity: {}
certController:
# -- Specifies whether the cert-controller should be created.
enabled: true
image:
repository: ghcr.io/mariadb-operator/mariadb-operator
pullPolicy: IfNotPresent
# -- Image tag to use. By default the chart appVersion is used
tag: ""
imagePullSecrets: []
ha:
# -- Enable high availability
enabled: false
# -- Number of replicas
replicas: 3
# -- CA certificate validity. It must be greater than certValidity.
caValidity: 35064h
# -- Certificate validity.
certValidity: 8766h
# -- Duration used to verify whether a certificate is valid or not.
lookaheadValidity: 2160h
# -- Requeue duration to ensure that certificate gets renewed.
requeueDuration: 5m
serviceMonitor:
# -- Enable cert-controller ServiceMonitor. Metrics must be enabled
enabled: true
# -- Labels to be added to the cert-controller ServiceMonitor
additionalLabels: {}
# release: kube-prometheus-stack
# -- Interval to scrape metrics
interval: 30s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 25s
serviceAccount:
# -- Specifies whether a service account should be created
enabled: true
# -- Automounts the service account token in all containers of the Pod
automount: true
# -- Annotations to add to the service account
annotations: {}
# -- Extra Labels to add to the service account
extraLabels: {}
# -- The name of the service account to use.
# If not set and enabled is true, a name is generated using the fullname template
name: ""
# -- Extra arguments to be passed to the cert-controller entrypoint
extrArgs: []
# -- Extra volumes to pass to cert-controller Pod
extraVolumes: []
# -- Extra volumes to mount to cert-controller container
extraVolumeMounts: []
# -- Annotations to add to cert-controller Pod
podAnnotations: {}
# -- Security context to add to cert-controller Pod
podSecurityContext: {}
# -- Security context to add to cert-controller container
securityContext: {}
# -- Resources to add to cert-controller container
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
# -- Node selectors to add to controller Pod
nodeSelector: {}
# -- Tolerations to add to controller Pod
tolerations: []
# -- Affinity to add to controller Pod
affinity: {}

View File

@@ -1,18 +1,9 @@
#!/bin/sh
VERSION=2
set -o pipefail
set -e
run_migrations() {
if ! kubectl get configmap -n cozy-system cozystack-version; then
kubectl create configmap -n cozy-system cozystack-version --from-literal=version="$VERSION" --dry-run=client -o yaml | kubectl create -f-
fi
current_version=$(kubectl get configmap -n cozy-system cozystack-version -o jsonpath='{.data.version}') || true
until [ "$current_version" = "$VERSION" ]; do
echo "run migration: $current_version --> $VERSION"
scripts/migrations/$current_version
current_version=$(kubectl get configmap -n cozy-system cozystack-version -o jsonpath='{.data.version}')
done
return 0
}
flux_is_ok() {
@@ -27,9 +18,6 @@ install_basic_charts() {
cd "$(dirname "$0")/.."
# Run migrations
run_migrations
# Install namespaces
make -C packages/core/platform namespaces-apply
@@ -38,6 +26,9 @@ if ! flux_is_ok; then
install_basic_charts
fi
# Run migrations
run_migrations
# Reconcile Helm repositories
kubectl annotate helmrepositories.source.toolkit.fluxcd.io -A -l cozystack.io/repository reconcile.fluxcd.io/requestedAt=$(date +"%Y-%m-%dT%H:%M:%SZ") --overwrite

View File

@@ -1,8 +0,0 @@
#!/bin/sh
if kubectl get -n cozy-mariadb-operator secret/mariadb-operator-webhook-cert; then
kubectl annotate -n cozy-mariadb-operator secret/mariadb-operator-webhook-cert meta.helm.sh/release-namespace=cozy-mariadb-operator meta.helm.sh/release-name=mariadb-operator
kubectl label -n cozy-mariadb-operator secret/mariadb-operator-webhook-cert app.kubernetes.io/managed-by=Helm
fi
kubectl create configmap -n cozy-system cozystack-version --from-literal=version=2 --dry-run=client -o yaml | kubectl apply -f-