[tenant-k8s] change coredns

Signed-off-by: kklinch0 <kklinch0@gmail.com>
This commit is contained in:
kklinch0
2025-08-28 12:06:43 +03:00
parent 6cd5e746c8
commit 6b5af37e1a
47 changed files with 2823 additions and 5 deletions

View File

@@ -16,7 +16,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.27.0
version: 0.28.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -139,6 +139,8 @@ See the reference for components utilized in this service:
| `addons.velero` | Velero | `object` | `{}` |
| `addons.velero.enabled` | Enable Velero for backup and recovery of a tenant Kubernetes cluster. | `bool` | `false` |
| `addons.velero.valuesOverride` | Custom values to override | `object` | `{}` |
| `addons.coredns` | Coredns | `object` | `{}` |
| `addons.coredns.valuesOverride` | Custom values to override | `object` | `{}` |
### Kubernetes Control Plane Configuration

View File

@@ -127,9 +127,6 @@ spec:
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.controlPlane.scheduler.resourcesPreset .Values.controlPlane.scheduler.resources $) | nindent 6 }}
dataStoreName: "{{ $etcd }}"
addons:
coreDNS:
dnsServiceIPs:
- 10.95.0.10
konnectivity:
server:
port: 8132

View File

@@ -0,0 +1,47 @@
{{- define "cozystack.defaultCoreDNSValues" -}}
coredns:
service:
clusterIP: "10.95.0.10"
{{- end }}
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: {{ .Release.Name }}-coredns
labels:
cozystack.io/repository: system
cozystack.io/target-cluster-name: {{ .Release.Name }}
spec:
interval: 5m
releaseName: coredns
chart:
spec:
chart: cozy-coredns
reconcileStrategy: Revision
sourceRef:
kind: HelmRepository
name: cozystack-system
namespace: cozy-system
version: '>= 0.0.0-0'
kubeConfig:
secretRef:
name: {{ .Release.Name }}-admin-kubeconfig
key: super-admin.svc
targetNamespace: cozy-coredns
storageNamespace: cozy-coredns
install:
createNamespace: true
remediation:
retries: -1
upgrade:
remediation:
retries: -1
values:
{{- toYaml (deepCopy .Values.addons.coredns.valuesOverride | mergeOverwrite (fromYaml (include "cozystack.defaultCoreDNSValues" .))) | nindent 4 }}
dependsOn:
{{- if lookup "helm.toolkit.fluxcd.io/v2" "HelmRelease" .Release.Namespace .Release.Name }}
- name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}
{{- end }}
- name: {{ .Release.Name }}-cilium
namespace: {{ .Release.Namespace }}

View File

@@ -41,6 +41,7 @@ spec:
{{ .Release.Name }}-fluxcd
{{ .Release.Name }}-gpu-operator
{{ .Release.Name }}-velero
{{ .Release.Name }}-coredns
-p '{"spec": {"suspend": true}}'
--type=merge --field-manager=flux-client-side-apply || true
---
@@ -81,6 +82,7 @@ rules:
- {{ .Release.Name }}-fluxcd
- {{ .Release.Name }}-gpu-operator
- {{ .Release.Name }}-velero
- {{ .Release.Name }}-coredns
---
apiVersion: rbac.authorization.k8s.io/v1

View File

@@ -13,6 +13,9 @@
"cilium": {
"valuesOverride": {}
},
"coredns": {
"valuesOverride": {}
},
"fluxcd": {
"enabled": false,
"valuesOverride": {}
@@ -45,6 +48,7 @@
"required": [
"certManager",
"cilium",
"coredns",
"fluxcd",
"gatewayAPI",
"gpuOperator",
@@ -97,6 +101,24 @@
}
}
},
"coredns": {
"description": "Coredns",
"type": "object",
"default": {
"valuesOverride": {}
},
"required": [
"valuesOverride"
],
"properties": {
"valuesOverride": {
"description": "Custom values to override",
"type": "object",
"default": {},
"x-kubernetes-preserve-unknown-fields": true
}
}
},
"fluxcd": {
"description": "Flux CD",
"type": "object",

View File

@@ -122,6 +122,13 @@ addons:
enabled: false
valuesOverride: {}
## @field addons.coredns {coredns} Coredns
##
coredns:
## @field coredns.valuesOverride {object} Custom values to override
##
valuesOverride: {}
## @section Kubernetes Control Plane Configuration
##
## @param controlPlane {controlPlane} Control Plane Configuration

View File

@@ -69,7 +69,8 @@ kubernetes 0.26.0 9584e5f5
kubernetes 0.26.1 0e47e1e8
kubernetes 0.26.2 8ddbe32e
kubernetes 0.26.3 c02a3818
kubernetes 0.27.0 HEAD
kubernetes 0.27.0 6cd5e746
kubernetes 0.28.0 HEAD
mysql 0.1.0 263e47be
mysql 0.2.0 c24a103f
mysql 0.3.0 53f2365e

View File

@@ -0,0 +1,3 @@
apiVersion: v2
name: cozy-coredns
version: 0.0.0 # Placeholder, the actual version will be automatically set during the build process

View File

@@ -0,0 +1,10 @@
export NAME=coredns
export NAMESPACE=coredns
include ../../../scripts/package.mk
update:
rm -rf charts
helm repo add cozy-coredns https://coredns.github.io/helm
helm repo update cozy-coredns
helm pull cozy-coredns/coredns --untar --untardir charts

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
OWNERS
*.tests

View File

@@ -0,0 +1,24 @@
annotations:
artifacthub.io/changes: |
- kind: changed
description: Bump to CoreDNS 1.12.3
apiVersion: v2
appVersion: 1.12.3
description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS
Services
home: https://coredns.io
icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png
keywords:
- coredns
- dns
- kubedns
maintainers:
- name: mrueg
- name: haad
- name: hagaibarel
- name: shubham-cmyk
name: coredns
sources:
- https://github.com/coredns/coredns
type: application
version: 1.43.2

View File

@@ -0,0 +1,316 @@
# CoreDNS
[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services
# TL;DR;
```console
$ helm repo add coredns https://coredns.github.io/helm
$ helm --namespace=kube-system install coredns coredns/coredns
```
## Introduction
This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below:
- CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true.
- CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false.
- CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed.
## Prerequisites
- Kubernetes 1.10 or later
## Installing the Chart
The chart can be installed as follows:
```console
$ helm repo add coredns https://coredns.github.io/helm
$ helm --namespace=kube-system install coredns coredns/coredns
```
The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment.
> **Tip**: List all releases using `helm list --all-namespaces`
## OCI installing
The chart can also be installed using the following:
```console
$ helm --namespace=kube-system install coredns oci://ghcr.io/coredns/charts/coredns --version 1.38.0
```
The command deploys the `1.38.0` version of CoreDNS on the Kubernetes cluster in the default configuration.
## Helm Unit Testing & Debugging Guide
This document explains how to write, run, and debug Helm unit tests for this chart using [helm-unittest](https://github.com/helm-unittest/helm-unittest).
---
### Prerequisites
Install the Helm unittest plugin:
```bash
helm plugin install https://github.com/helm-unittest/helm-unittest
```
### Running Unit Tests
Run all unit tests in the chart folder (e.g., `./coredns`):
```bash
helm unittest ./coredns
```
To output results in **JUnit XML** format:
```bash
mkdir -p test-results
helm unittest --strict \
--output-type JUnit \
--output-file test-results/helm-unittest-report.xml \
./coredns
```
---
### Debugging Helm Charts
Render the chart templates with real values to debug:
```bash
helm template ./coredns --debug
```
## YAML Intellisense in VS Code
Add this line at the top of your unit test YAML files for schema validation and autocompletion in VS Code:
```yaml
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
```
This improves YAML editing and error highlighting for test definitions.
## Uninstalling the Chart
To uninstall/delete the `coredns` deployment:
```console
$ helm uninstall coredns
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
| Parameter | Description | Default |
| :--------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------- |
| `image.repository` | The image repository to pull from | coredns/coredns |
| `image.tag` | The image tag to pull from (derived from Chart.yaml) | `` |
| `image.pullPolicy` | Image pull policy | IfNotPresent |
| `image.pullSecrets` | Specify container image pull secrets | `[]` |
| `replicaCount` | Number of replicas | 1 |
| `resources.limits.cpu` | Container maximum CPU | `100m` |
| `resources.limits.memory` | Container maximum memory | `128Mi` |
| `resources.requests.cpu` | Container requested CPU | `100m` |
| `resources.requests.memory` | Container requested memory | `128Mi` |
| `serviceType` | Kubernetes Service type | `ClusterIP` |
| `prometheus.service.enabled` | Set this to `true` to create Service for Prometheus metrics | `false` |
| `prometheus.service.annotations` | Annotations to add to the metrics Service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}` |
| `prometheus.service.selector` | Pod selector | `{}` |
| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` |
| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} |
| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` |
| `prometheus.monitor.interval` | Scrape interval for polling the metrics endpoint. (E.g. "30s") | `""` |
| `prometheus.monitor.selector` | Service selector | `{}` |
| `service.clusterIP` | IP address to assign to service | `""` |
| `service.clusterIPs` | IP addresses to assign to service | `[]` |
| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` |
| `service.externalIPs` | External IP addresses | [] |
| `service.externalTrafficPolicy` | Enable client source IP preservation | [] |
| `service.ipFamilyPolicy` | Service dual-stack policy | `""` |
| `service.annotations` | Annotations to add to service | {} |
| `service.selector` | Pod selector | `{}` |
| `service.trafficDistribution` | Service traffic routing strategy | |
| `serviceAccount.create` | If true, create & use serviceAccount | false |
| `serviceAccount.name` | If not set & create is true, use template fullname | |
| `rbac.create` | If true, create & use RBAC resources | true |
| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` |
| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true |
| `priorityClassName` | Name of Priority Class to assign pods | `""` |
| `securityContext` | securityContext definition for pods | capabilities.add.NET_BIND_SERVICE |
| `servers` | Configuration for CoreDNS and plugins | See values.yml |
| `livenessProbe.enabled` | Enable/disable the Liveness probe | `true` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `60` |
| `livenessProbe.periodSeconds` | How often to perform the probe | `10` |
| `livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
| `readinessProbe.enabled` | Enable/disable the Readiness probe | `true` |
| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
| `readinessProbe.periodSeconds` | How often to perform the probe | `10` |
| `readinessProbe.timeoutSeconds` | When the probe times out | `5` |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
| `affinity` | Affinity settings for pod assignment | {} |
| `nodeSelector` | Node labels for pod assignment | {} |
| `tolerations` | Tolerations for pod assignment | [] |
| `zoneFiles` | Configure custom Zone files | [] |
| `extraContainers` | Optional array of sidecar containers | [] |
| `extraVolumes` | Optional array of volumes to create | [] |
| `extraVolumeMounts` | Optional array of volumes to mount inside the CoreDNS container | [] |
| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] |
| `env` | Optional array of environment variables for CoreDNS container | [] |
| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} |
| `customAnnotations` | Optional annotations for Deployment(s), Pod, Service, ServiceMonitor objects |
| `rollingUpdate.maxUnavailable` | Maximum number of unavailable replicas during rolling update | `1` |
| `rollingUpdate.maxSurge` | Maximum number of pods created above desired number of pods | `25%` |
| `podDisruptionBudget` | Optional PodDisruptionBudget | {} |
| `podAnnotations` | Optional Pod only Annotations | {} |
| `terminationGracePeriodSeconds` | Optional duration in seconds the pod needs to terminate gracefully. | 30 |
| `hpa.enabled` | Enable Hpa autoscaler instead of proportional one | `false` |
| `hpa.minReplicas` | Hpa minimum number of CoreDNS replicas | `1` |
| `hpa.maxReplicas` | Hpa maximum number of CoreDNS replicas | `2` |
| `hpa.metrics` | Metrics definitions used by Hpa to scale up and down | {} |
| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` |
| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` |
| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` |
| `autoscaler.min` | Min size of replicaCount | 0 |
| `autoscaler.max` | Max size of replicaCount | 0 (aka no max) |
| `autoscaler.includeUnschedulableNodes` | Should the replicas scale based on the total number or only schedulable nodes | `false` |
| `autoscaler.preventSinglePointFailure` | If true does not allow single points of failure to form | `true` |
| `autoscaler.customFlags` | A list of custom flags to pass into cluster-proportional-autoscaler | (no args) |
| `autoscaler.image.repository` | The image repository to pull autoscaler from | registry.k8s.io/cpa/cluster-proportional-autoscaler |
| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.8.5` |
| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent |
| `autoscaler.image.pullSecrets` | Specify container image pull secrets | `[]` |
| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` |
| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} |
| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} |
| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] |
| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` |
| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` |
| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` |
| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` |
| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} |
| `autoscaler.livenessProbe.enabled` | Enable/disable the Liveness probe | `true` |
| `autoscaler.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `10` |
| `autoscaler.livenessProbe.periodSeconds` | How often to perform the probe | `5` |
| `autoscaler.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
| `autoscaler.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` |
| `autoscaler.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` |
| `autoscaler.extraContainers` | Optional array of sidecar containers | [] |
| `deployment.enabled` | Optionally disable the main deployment and its respective resources. | `true` |
| `deployment.name` | Name of the deployment if `deployment.enabled` is true. Otherwise the name of an existing deployment for the autoscaler or HPA to target. | `""` |
| `deployment.annotations` | Annotations to add to the main deployment | `{}` |
| `deployment.selector` | Pod selector | `{}` |
| `clusterRole.nameOverride` | ClusterRole name override | |
See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install coredns \
coredns/coredns \
--set rbac.create=false
```
The above command disables automatic creation of RBAC rules.
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,
```console
$ helm install coredns coredns/coredns -f values.yaml
```
> **Tip**: You can use the default [values.yaml](/charts/coredns/values.yaml)
## Caveats
The chart will automatically determine which protocols to listen on based on
the protocols you define in your zones. This means that you could potentially
use both "TCP" and "UDP" on a single port.
Some cloud environments like "GCE" or "Azure container service" cannot
create external loadbalancers with both "TCP" and "UDP" protocols. So
When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud
environments, make sure you do not attempt to use both protocols at the same
time.
## Autoscaling
By setting `autoscaler.enabled = true` a
[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler)
will be deployed. This will default to a coredns replica for every 256 cores, or
16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica`
and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more
cores), `coresPerReplica` should dominate. If using small nodes,
`nodesPerReplica` should dominate.
This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for
the autoscaler deployment.
`replicaCount` is ignored if this is enabled.
By setting `hpa.enabled = true` a [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/)
is enabled for Coredns deployment. This can scale number of replicas based on meitrics
like CpuUtilization, MemoryUtilization or Custom ones.
## Adopting existing CoreDNS resources
If you do not want to delete the existing CoreDNS resources in your cluster, you can adopt the resources into a release as of Helm 3.2.0.
You will also need to annotate and label your existing resources to allow Helm to assume control of them. See: https://github.com/helm/helm/pull/7649
```
annotations:
meta.helm.sh/release-name: your-release-name
meta.helm.sh/release-namespace: your-release-namespace
labels:
app.kubernetes.io/managed-by: Helm
```
Once you have annotated and labeled all the resources this chart specifies, you may need to locally template the chart and compare against existing manifest to ensure there are no changes/diffs.s If
you have been careful this should not diff and leave all the resources unmodified and now under management of helm.
Some values to investigate to help adopt your existing manifests to the Helm release are:
- k8sAppLabelOverride
- service.name
- customLabels
In some cases, you will need to orphan delete your existing deployment since selector labels are immutable.
```
kubectl delete deployment coredns --cascade=orphan
```
This will delete the deployment and leave the replicaset to ensure no downtime in the cluster. You will need to manually delete the replicaset AFTER Helm has released a new deployment.
Here is an example script to modify the annotations and labels of existing resources:
WARNING: Substitute YOUR_HELM_RELEASE_NAME_HERE with the name of your helm release.
```
#!/usr/bin/env bash
set -euo pipefail
for kind in config service serviceAccount; do
echo "setting annotations and labels on $kind/coredns"
kubectl -n kube-system annotate --overwrite $kind coredns meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE
kubectl -n kube-system annotate --overwrite $kind coredns meta.helm.sh/release-namespace=kube-system
kubectl -n kube-system label --overwrite $kind coredns app.kubernetes.io/managed-by=Helm
done
```
NOTE: Sometimes, previous deployments of kube-dns that have been migrated to CoreDNS still use kube-dns for the service name as well.
```
echo "setting annotations and labels on service/kube-dns"
kubectl -n kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE
kubectl -n kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system
kubectl -n kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm
```

View File

@@ -0,0 +1,30 @@
{{- if .Values.isClusterService }}
CoreDNS is now running in the cluster as a cluster-service.
{{- else }}
CoreDNS is now running in the cluster.
It can be accessed using the below endpoint
{{- if contains "NodePort" .Values.serviceType }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo "$NODE_IP:$NODE_PORT"
{{- else if contains "LoadBalancer" .Values.serviceType }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo $SERVICE_IP
{{- else if contains "ClusterIP" .Values.serviceType }}
"{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local"
from within the cluster
{{- end }}
{{- end }}
It can be tested with the following:
1. Launch a Pod with DNS tools:
kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
2. Query the DNS server:
/ # host kubernetes

View File

@@ -0,0 +1,236 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "coredns.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "coredns.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "coredns.labels" -}}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}
{{- end -}}
{{/*
Common labels with autoscaler
*/}}
{{- define "coredns.labels.autoscaler" -}}
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler
{{- end -}}
{{/*
Allow k8s-app label to be overridden
*/}}
{{- define "coredns.k8sapplabel" -}}
{{- default .Chart.Name .Values.k8sAppLabelOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Generate the list of ports automatically from the server definitions
*/}}
{{- define "coredns.servicePorts" -}}
{{/* Set ports to be an empty dict */}}
{{- $ports := dict -}}
{{/* Iterate through each of the server blocks */}}
{{- range .Values.servers -}}
{{/* Capture port to avoid scoping awkwardness */}}
{{- $port := toString .port -}}
{{- $serviceport := default .port .servicePort -}}
{{/* If none of the server blocks has mentioned this port yet take note of it */}}
{{- if not (hasKey $ports $port) -}}
{{- $ports := set $ports $port (dict "istcp" false "isudp" false "serviceport" $serviceport) -}}
{{- end -}}
{{/* Retrieve the inner dict that holds the protocols for a given port */}}
{{- $innerdict := index $ports $port -}}
{{/*
Look at each of the zones and check which protocol they serve
At the moment the following are supported by CoreDNS:
UDP: dns://
TCP: tls://, grpc://, https://
*/}}
{{- range .zones -}}
{{- if has (default "" .scheme) (list "dns://" "") -}}
{{/* Optionally enable tcp for this service as well */}}
{{- if eq (default false .use_tcp) true }}
{{- $innerdict := set $innerdict "istcp" true -}}
{{- end }}
{{- $innerdict := set $innerdict "isudp" true -}}
{{- end -}}
{{- if has (default "" .scheme) (list "tls://" "grpc://" "https://") -}}
{{- $innerdict := set $innerdict "istcp" true -}}
{{- end -}}
{{- end -}}
{{/* If none of the zones specify scheme, default to dns:// udp */}}
{{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}}
{{- $innerdict := set $innerdict "isudp" true -}}
{{- end -}}
{{- if .nodePort -}}
{{- $innerdict := set $innerdict "nodePort" .nodePort -}}
{{- end -}}
{{/* Write the dict back into the outer dict */}}
{{- $ports := set $ports $port $innerdict -}}
{{- end -}}
{{/* Write out the ports according to the info collected above */}}
{{- range $port, $innerdict := $ports -}}
{{- $portList := list -}}
{{- if index $innerdict "isudp" -}}
{{- $portList = append $portList (dict "port" (get $innerdict "serviceport") "protocol" "UDP" "name" (printf "udp-%s" $port) "targetPort" ($port | int)) -}}
{{- end -}}
{{- if index $innerdict "istcp" -}}
{{- $portList = append $portList (dict "port" (get $innerdict "serviceport") "protocol" "TCP" "name" (printf "tcp-%s" $port) "targetPort" ($port | int)) -}}
{{- end -}}
{{- range $portDict := $portList -}}
{{- if index $innerdict "nodePort" -}}
{{- $portDict := set $portDict "nodePort" (get $innerdict "nodePort" | int) -}}
{{- end -}}
{{- printf "- %s\n" (toJson $portDict) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Generate the list of ports automatically from the server definitions
*/}}
{{- define "coredns.containerPorts" -}}
{{/* Set ports to be an empty dict */}}
{{- $ports := dict -}}
{{/* Iterate through each of the server blocks */}}
{{- range .Values.servers -}}
{{/* Capture port to avoid scoping awkwardness */}}
{{- $port := toString .port -}}
{{/* If none of the server blocks has mentioned this port yet take note of it */}}
{{- if not (hasKey $ports $port) -}}
{{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}}
{{- end -}}
{{/* Retrieve the inner dict that holds the protocols for a given port */}}
{{- $innerdict := index $ports $port -}}
{{/*
Look at each of the zones and check which protocol they serve
At the moment the following are supported by CoreDNS:
UDP: dns://
TCP: tls://, grpc://, https://
*/}}
{{- range .zones -}}
{{- if has (default "" .scheme) (list "dns://" "") -}}
{{/* Optionally enable tcp for this service as well */}}
{{- if eq (default false .use_tcp) true }}
{{- $innerdict := set $innerdict "istcp" true -}}
{{- end }}
{{- $innerdict := set $innerdict "isudp" true -}}
{{- end -}}
{{- if has (default "" .scheme) (list "tls://" "grpc://" "https://") -}}
{{- $innerdict := set $innerdict "istcp" true -}}
{{- end -}}
{{- end -}}
{{/* If none of the zones specify scheme, default to dns:// udp */}}
{{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}}
{{- $innerdict := set $innerdict "isudp" true -}}
{{- end -}}
{{- if .hostPort -}}
{{- $innerdict := set $innerdict "hostPort" .hostPort -}}
{{- end -}}
{{/* Write the dict back into the outer dict */}}
{{- $ports := set $ports $port $innerdict -}}
{{/* Fetch port from the configuration if the prometheus section exists */}}
{{- range .plugins -}}
{{- if eq .name "prometheus" -}}
{{- $prometheus_addr := toString .parameters -}}
{{- $prometheus_addr_list := regexSplit ":" $prometheus_addr -1 -}}
{{- $prometheus_port := index $prometheus_addr_list 1 -}}
{{- $ports := set $ports $prometheus_port (dict "istcp" true "isudp" false) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/* Write out the ports according to the info collected above */}}
{{- range $port, $innerdict := $ports -}}
{{- $portList := list -}}
{{- if index $innerdict "isudp" -}}
{{- $portList = append $portList (dict "containerPort" ($port | int) "protocol" "UDP" "name" (printf "udp-%s" $port)) -}}
{{- end -}}
{{- if index $innerdict "istcp" -}}
{{- $portList = append $portList (dict "containerPort" ($port | int) "protocol" "TCP" "name" (printf "tcp-%s" $port)) -}}
{{- end -}}
{{- range $portDict := $portList -}}
{{- if index $innerdict "hostPort" -}}
{{- $portDict := set $portDict "hostPort" (get $innerdict "hostPort" | int) -}}
{{- end -}}
{{- printf "- %s\n" (toJson $portDict) -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "coredns.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "coredns.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "coredns.clusterRoleName" -}}
{{- if and .Values.clusterRole .Values.clusterRole.nameOverride -}}
{{ .Values.clusterRole.nameOverride }}
{{- else -}}
{{ template "coredns.fullname" . }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,30 @@
{{- if and .Values.autoscaler.enabled .Values.rbac.create }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "coredns.fullname" . }}-autoscaler
labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list","watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions", "apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
# Remove the configmaps rule once below issue is fixed:
# kubernetes-incubator/cluster-proportional-autoscaler#16
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
{{- end }}

View File

@@ -0,0 +1,36 @@
{{- if and .Values.deployment.enabled .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "coredns.clusterRoleName" . }}
labels: {{- include "coredns.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
{{- if .Values.rbac.pspEnable }}
- apiGroups:
- policy
- extensions
resources:
- podsecuritypolicies
verbs:
- use
resourceNames:
- {{ template "coredns.fullname" . }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,23 @@
{{- if and .Values.autoscaler.enabled .Values.rbac.create }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "coredns.fullname" . }}-autoscaler
labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "coredns.fullname" . }}-autoscaler
subjects:
- kind: ServiceAccount
name: {{ template "coredns.fullname" . }}-autoscaler
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,15 @@
{{- if and .Values.deployment.enabled .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "coredns.clusterRoleName" . }}
labels: {{- include "coredns.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "coredns.clusterRoleName" . }}
subjects:
- kind: ServiceAccount
name: {{ template "coredns.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end }}

View File

@@ -0,0 +1,33 @@
{{- if .Values.autoscaler.enabled }}
---
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ template "coredns.fullname" . }}-autoscaler
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
{{- if .Values.customLabels }}
{{- toYaml .Values.customLabels | nindent 4 }}
{{- end }}
{{- if or .Values.autoscaler.configmap.annotations .Values.customAnnotations }}
annotations:
{{- if .Values.customAnnotations }}
{{- toYaml .Values.customAnnotations | nindent 4 }}
{{- end }}
{{- if .Values.autoscaler.configmap.annotations -}}
{{ toYaml .Values.autoscaler.configmap.annotations | nindent 4 }}
{{- end }}
{{- end }}
data:
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
linear: |-
{
"coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }},
"nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }},
"preventSinglePointFailure": {{ .Values.autoscaler.preventSinglePointFailure }},
"min": {{ .Values.autoscaler.min | int }},
"max": {{ .Values.autoscaler.max | int }},
"includeUnschedulableNodes": {{ .Values.autoscaler.includeUnschedulableNodes }}
}
{{- end }}

View File

@@ -0,0 +1,37 @@
{{- if .Values.deployment.enabled }}
{{- if not .Values.deployment.skipConfig }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "coredns.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
data:
Corefile: |-
{{- range $name, $conf := .Values.extraConfig }}
{{ $name }}{{ if $conf.parameters }} {{ $conf.parameters }}{{ end }}
{{- end }}
{{ range .Values.servers }}
{{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}}
{{- if .port }}:{{ .port }} {{ end -}}
{
{{- range .plugins }}
{{ .name }}{{ if .parameters }} {{ .parameters }}{{ end }}{{ if .configBlock }} {
{{ .configBlock | indent 12 }}
}{{ end }}
{{- end }}
}
{{ end }}
{{- range .Values.zoneFiles }}
{{ .filename }}: {{ toYaml .contents | indent 4 }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,98 @@
{{- if and (.Values.autoscaler.enabled) (not .Values.hpa.enabled) }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "coredns.fullname" . }}-autoscaler
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
app.kubernetes.io/instance: {{ .Release.Name | quote }}
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler
template:
metadata:
labels:
{{- if .Values.isClusterService }}
{{- if not (hasKey .Values.customLabels "k8s-app")}}
k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler
{{- end }}
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler
app.kubernetes.io/instance: {{ .Release.Name | quote }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | nindent 8 }}
{{- end }}
annotations:
checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }}
{{- if .Values.isClusterService }}
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
{{- end }}
{{- with .Values.autoscaler.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler
{{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }}
{{- if $priorityClassName }}
priorityClassName: {{ $priorityClassName | quote }}
{{- end }}
{{- if .Values.autoscaler.affinity }}
affinity:
{{ toYaml .Values.autoscaler.affinity | indent 8 }}
{{- end }}
{{- if .Values.autoscaler.tolerations }}
tolerations:
{{ toYaml .Values.autoscaler.tolerations | indent 8 }}
{{- end }}
{{- if .Values.autoscaler.nodeSelector }}
nodeSelector:
{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.autoscaler.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.autoscaler.image.pullSecrets | indent 8 }}
{{- end }}
containers:
- name: autoscaler
image: "{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }}"
imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }}
resources:
{{ toYaml .Values.autoscaler.resources | indent 10 }}
{{- if .Values.autoscaler.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: {{ .Values.autoscaler.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.autoscaler.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.autoscaler.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.autoscaler.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.autoscaler.livenessProbe.failureThreshold }}
{{- end }}
command:
- /cluster-proportional-autoscaler
- --namespace={{ .Release.Namespace }}
- --configmap={{ template "coredns.fullname" . }}-autoscaler
- --target=Deployment/{{ default (include "coredns.fullname" .) .Values.deployment.name }}
- --logtostderr=true
- --v=2
{{- if .Values.autoscaler.customFlags }}
{{ toYaml .Values.autoscaler.customFlags | indent 10 }}
{{- end }}
{{- if .Values.autoscaler.extraContainers }}
{{ toYaml .Values.autoscaler.extraContainers | indent 6 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,174 @@
{{- if .Values.deployment.enabled }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ default (include "coredns.fullname" .) .Values.deployment.name }}
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels" . | nindent 4 }}
app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | replace ":" "-" | replace "@" "_" | trunc 63 | trimSuffix "-" | quote }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- if or .Values.deployment.annotations .Values.customAnnotations }}
annotations:
{{- if .Values.customAnnotations }}
{{- toYaml .Values.customAnnotations | nindent 4 }}
{{- end }}
{{- if .Values.deployment.annotations }}
{{- toYaml .Values.deployment.annotations | nindent 4 }}
{{- end }}
{{- end }}
spec:
{{- if and (not .Values.autoscaler.enabled) (not .Values.hpa.enabled) }}
replicas: {{ .Values.replicaCount }}
{{- end }}
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: {{ .Values.rollingUpdate.maxUnavailable }}
maxSurge: {{ .Values.rollingUpdate.maxSurge }}
selector:
{{- if .Values.deployment.selector }}
{{- toYaml .Values.deployment.selector | nindent 4 }}
{{- else }}
matchLabels:
app.kubernetes.io/instance: {{ .Release.Name | quote }}
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}
{{- end }}
template:
metadata:
labels:
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 8 }}
{{- end }}
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
{{- if .Values.isClusterService }}
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
{{- end }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
spec:
{{- if .Values.podSecurityContext }}
securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }}
{{- end }}
{{- if .Values.terminationGracePeriodSeconds }}
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
{{- end }}
serviceAccountName: {{ template "coredns.serviceAccountName" . }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName | quote }}
{{- end }}
{{- if .Values.isClusterService }}
dnsPolicy: Default
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{ tpl (toYaml .Values.topologySpreadConstraints) $ | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.image.pullSecrets | indent 8 }}
{{- end }}
{{- if .Values.initContainers }}
initContainers:
{{ toYaml .Values.initContainers | indent 8 }}
{{- end }}
containers:
- name: "coredns"
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
{{- range .Values.extraSecrets }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: true
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{- toYaml .Values.extraVolumeMounts | nindent 8}}
{{- end }}
{{- if .Values.env }}
env:
{{- toYaml .Values.env | nindent 10}}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}
ports:
{{ include "coredns.containerPorts" . | indent 8 }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
{{- end }}
{{- if .Values.securityContext }}
securityContext:
{{- toYaml .Values.securityContext | nindent 10 }}
{{- end }}
{{- if .Values.extraContainers }}
{{ toYaml .Values.extraContainers | indent 6 }}
{{- end }}
volumes:
- name: config-volume
configMap:
name: {{ template "coredns.fullname" . }}
items:
- key: Corefile
path: Corefile
{{ range .Values.zoneFiles }}
- key: {{ .filename }}
path: {{ .filename }}
{{ end }}
{{- range .Values.extraSecrets }}
- name: {{ .name }}
secret:
secretName: {{ .name }}
defaultMode: {{ default 400 .defaultMode }}
{{- end }}
{{- if .Values.extraVolumes }}
{{ toYaml .Values.extraVolumes | indent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,33 @@
{{- if and (.Values.hpa.enabled) (not .Values.autoscaler.enabled) }}
---
{{- if .Capabilities.APIVersions.Has "autoscaling/v2" }}
apiVersion: autoscaling/v2
{{- else }}
apiVersion: autoscaling/v2beta2
{{- end }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ template "coredns.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ default (include "coredns.fullname" .) .Values.deployment.name }}
minReplicas: {{ .Values.hpa.minReplicas }}
maxReplicas: {{ .Values.hpa.maxReplicas }}
metrics:
{{ toYaml .Values.hpa.metrics | indent 4 }}
{{- if .Values.hpa.behavior }}
behavior:
{{ toYaml .Values.hpa.behavior | indent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if and .Values.deployment.enabled .Values.podDisruptionBudget -}}
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ template "coredns.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.podDisruptionBudget.selector }}
selector:
matchLabels:
app.kubernetes.io/instance: {{ .Release.Name | quote }}
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}
{{- end }}
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end }}

View File

@@ -0,0 +1,39 @@
{{- if and .Values.deployment.enabled .Values.prometheus.service.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "coredns.fullname" . }}-metrics
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels" . | nindent 4 }}
app.kubernetes.io/component: metrics
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- if or .Values.prometheus.service.annotations .Values.service.annotations .Values.customAnnotations }}
annotations:
{{- if .Values.prometheus.service.annotations }}
{{- toYaml .Values.prometheus.service.annotations | nindent 4 }}
{{- end }}
{{- if .Values.service.annotations }}
{{- toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
{{- if .Values.customAnnotations }}
{{- toYaml .Values.customAnnotations | nindent 4 }}
{{- end }}
{{- end }}
spec:
selector:
{{- if .Values.prometheus.service.selector }}
{{- toYaml .Values.prometheus.service.selector | nindent 4 }}
{{- else }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}
{{- end }}
ports:
- name: metrics
port: 9153
targetPort: 9153
{{- end }}

View File

@@ -0,0 +1,61 @@
{{- if .Values.deployment.enabled }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ default (include "coredns.fullname" .) .Values.service.name }}
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- if or .Values.service.annotations .Values.customAnnotations }}
annotations:
{{- if .Values.service.annotations }}
{{- toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
{{- if .Values.customAnnotations }}
{{- toYaml .Values.customAnnotations | nindent 4 }}
{{- end }}
{{- end }}
spec:
selector:
{{- if .Values.service.selector }}
{{- toYaml .Values.service.selector | nindent 4 }}
{{- else }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}
{{- end }}
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{- end }}
{{- if .Values.service.clusterIPs }}
clusterIPs:
{{ toYaml .Values.service.clusterIPs | nindent 4 }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{- toYaml .Values.service.externalIPs | nindent 4 }}
{{- end }}
{{- if .Values.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
{{- end }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerClass }}
loadBalancerClass: {{ .Values.service.loadBalancerClass }}
{{- end }}
ports:
{{ include "coredns.servicePorts" . | indent 2 -}}
type: {{ default "ClusterIP" .Values.serviceType }}
{{- if .Values.service.ipFamilyPolicy }}
ipFamilyPolicy: {{ .Values.service.ipFamilyPolicy }}
{{- end }}
{{- end }}
{{- if .Values.service.trafficDistribution }}
trafficDistribution: {{ .Values.service.trafficDistribution }}
{{- end }}

View File

@@ -0,0 +1,20 @@
{{- if and .Values.autoscaler.enabled .Values.rbac.create }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "coredns.fullname" . }}-autoscaler
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.autoscaler.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.autoscaler.image.pullSecrets | indent 2 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,24 @@
{{- if and .Values.deployment.enabled .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "coredns.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "coredns.labels" . | nindent 4 }}
{{- if .Values.customLabels }}
{{ toYaml .Values.customLabels | indent 4 }}
{{- end }}
{{- if or .Values.serviceAccount.annotations .Values.customAnnotations }}
annotations:
{{- if .Values.customAnnotations }}
{{- toYaml .Values.customAnnotations | nindent 4 }}
{{- end }}
{{- if .Values.serviceAccount.annotations }}
{{- toYaml .Values.serviceAccount.annotations | nindent 4 }}
{{- end }}
{{- end }}
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{ toYaml .Values.image.pullSecrets | indent 2 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,40 @@
{{- if and .Values.deployment.enabled .Values.prometheus.monitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "coredns.fullname" . }}
{{- if .Values.prometheus.monitor.namespace }}
namespace: {{ .Values.prometheus.monitor.namespace }}
{{- end }}
labels: {{- include "coredns.labels" . | nindent 4 }}
{{- if .Values.prometheus.monitor.additionalLabels }}
{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if ne .Values.prometheus.monitor.namespace .Release.Namespace }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}
selector:
{{- if .Values.prometheus.monitor.selector }}
{{- toYaml .Values.prometheus.monitor.selector | nindent 4 }}
{{- else }}
matchLabels:
app.kubernetes.io/instance: {{ .Release.Name | quote }}
{{- if .Values.isClusterService }}
k8s-app: {{ template "coredns.k8sapplabel" . }}
{{- end }}
app.kubernetes.io/name: {{ template "coredns.name" . }}
app.kubernetes.io/component: metrics
{{- end }}
endpoints:
- port: metrics
{{- if .Values.prometheus.monitor.interval }}
interval: {{ .Values.prometheus.monitor.interval }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,62 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: test ClusterRole for autoscaler
templates:
- templates/clusterrole-autoscaler.yaml
tests:
- it: should render ClusterRole when autoscaler and rbac are enabled
set:
autoscaler:
enabled: true
rbac:
create: true
customLabels:
team: devops
customAnnotations:
note: autoscaler
asserts:
- hasDocuments:
count: 1
- isKind:
of: ClusterRole
- matchRegex:
path: metadata.name
pattern: ^.*-autoscaler$
- equal:
path: rules[0].resources
value: ["nodes"]
- equal:
path: metadata.labels.team
value: devops
- equal:
path: metadata.annotations.note
value: autoscaler
- it: should not render ClusterRole if autoscaler is disabled
set:
autoscaler:
enabled: false
rbac:
create: true
asserts:
- hasDocuments:
count: 0
- it: should not render ClusterRole if rbac is disabled
set:
autoscaler:
enabled: true
rbac:
create: false
asserts:
- hasDocuments:
count: 0

View File

@@ -0,0 +1,53 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: ClusterRole RBAC Test
templates:
- templates/clusterrole.yaml
set:
deployment:
enabled: true
rbac:
create: true
pspEnable: true
tests:
- it: should render ClusterRole with correct name and rules
asserts:
- isKind:
of: ClusterRole
- equal:
path: metadata.name
value: RELEASE-NAME-coredns
- contains:
path: rules[0].resources
content: endpoints
- contains:
path: rules[0].resources
content: services
- contains:
path: rules[0].resources
content: pods
- contains:
path: rules[0].resources
content: namespaces
- equal:
path: rules[1].apiGroups[0]
value: discovery.k8s.io
- contains:
path: rules[1].resources
content: endpointslices
- equal:
path: rules[2].apiGroups[0]
value: policy
- equal:
path: rules[2].apiGroups[1]
value: extensions
- contains:
path: rules[2].resources
content: podsecuritypolicies
- contains:
path: rules[2].verbs
content: use

View File

@@ -0,0 +1,60 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: ClusterRoleBinding Autoscaler Test
templates:
- templates/clusterrolebinding-autoscaler.yaml
set:
autoscaler:
enabled: true
rbac:
create: true
customLabels:
my-custom-label: "enabled"
customAnnotations:
custom-annotation: "test-value"
release:
namespace: test-namespace
tests:
- it: should render ClusterRoleBinding with correct metadata and subject
asserts:
- isKind:
of: ClusterRoleBinding
- equal:
path: metadata.name
value: RELEASE-NAME-coredns-autoscaler
- equal:
path: metadata.labels.my-custom-label
value: enabled
- equal:
path: metadata.annotations.custom-annotation
value: test-value
- equal:
path: roleRef.apiGroup
value: rbac.authorization.k8s.io
- equal:
path: roleRef.kind
value: ClusterRole
- equal:
path: roleRef.name
value: RELEASE-NAME-coredns-autoscaler
- equal:
path: subjects[0].kind
value: ServiceAccount
- equal:
path: subjects[0].name
value: RELEASE-NAME-coredns-autoscaler
- equal:
path: subjects[0].namespace
value: test-namespace

View File

@@ -0,0 +1,52 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: ClusterRoleBinding Test
templates:
- templates/clusterrolebinding.yaml
set:
deployment:
enabled: true
rbac:
create: true
nameOverride: coredns-test
serviceAccount:
name: coredns-test
release:
namespace: test-namespace
tests:
- it: should render ClusterRoleBinding with correct metadata and subjects
asserts:
- isKind:
of: ClusterRoleBinding
- equal:
path: metadata.name
value: RELEASE-NAME-coredns-test
- equal:
path: roleRef.apiGroup
value: rbac.authorization.k8s.io
- equal:
path: roleRef.kind
value: ClusterRole
- equal:
path: roleRef.name
value: RELEASE-NAME-coredns-test
- equal:
path: subjects[0].kind
value: ServiceAccount
- equal:
path: subjects[0].name
value: coredns-test
- equal:
path: subjects[0].namespace
value: test-namespace

View File

@@ -0,0 +1,75 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: ConfigMap Autoscaler Test
templates:
- templates/configmap-autoscaler.yaml
set:
autoscaler:
enabled: true
coresPerReplica: 256
nodesPerReplica: 32
preventSinglePointFailure: true
min: 1
max: 10
includeUnschedulableNodes: false
configmap:
annotations:
autoscaler-note: "used by cluster-autoscaler"
customLabels:
my-custom-label: "enabled"
customAnnotations:
custom-annotation: "test-value"
release:
namespace: test-namespace
tests:
- it: should render ConfigMap with correct metadata and autoscaler values
asserts:
- isKind:
of: ConfigMap
- equal:
path: metadata.name
value: RELEASE-NAME-coredns-autoscaler
- equal:
path: metadata.namespace
value: test-namespace
- equal:
path: metadata.labels.my-custom-label
value: enabled
- equal:
path: metadata.annotations.custom-annotation
value: test-value
- equal:
path: metadata.annotations.autoscaler-note
value: used by cluster-autoscaler
- matchRegex:
path: data.linear
pattern: '"coresPerReplica": 256'
- matchRegex:
path: data.linear
pattern: '"nodesPerReplica": 32'
- matchRegex:
path: data.linear
pattern: '"preventSinglePointFailure": true'
- matchRegex:
path: data.linear
pattern: '"min": 1'
- matchRegex:
path: data.linear
pattern: '"max": 10'
- matchRegex:
path: data.linear
pattern: '"includeUnschedulableNodes": false'

View File

@@ -0,0 +1,41 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: "Test coredns configmap generation"
templates:
- templates/configmap.yaml
tests:
- it: "should create a configmap with custom Corefile and zone files"
set:
deployment:
enabled: true
skipConfig: false
customAnnotations:
custom-annotation: "test-value"
servers:
- zones:
- zone: "example.com"
scheme: "."
port: 5353
plugins:
- name: forward
parameters: ". 8.8.8.8"
zoneFiles:
- filename: db.example.com
contents: |
example.com. IN A 192.0.2.1
asserts:
- isKind:
of: ConfigMap
- matchRegex:
path: data.Corefile
pattern: "example\\.com:5353 \\{"
- matchRegex:
path: data.Corefile
pattern: "forward \\. 8\\.8\\.8\\.8"
- equal:
path: metadata.annotations.custom-annotation
value: "test-value"
- matchRegex:
path: 'data["db.example.com"]'
pattern: "example\\.com\\.\\s+IN\\s+A\\s+192\\.0\\.2\\.1"

View File

@@ -0,0 +1,77 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: test autoscaler deployment
templates:
- templates/configmap-autoscaler.yaml
- templates/deployment-autoscaler.yaml
tests:
- it: should render autoscaler deployment with correct image
set:
autoscaler:
enabled: true
image:
repository: autoscaler
tag: v1.0.0
pullPolicy: IfNotPresent
hpa:
enabled: false
template: templates/deployment-autoscaler.yaml
asserts:
- isKind:
of: Deployment
- equal:
path: metadata.name
value: RELEASE-NAME-coredns-autoscaler
- equal:
path: spec.template.spec.containers[0].image
value: autoscaler:v1.0.0
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: IfNotPresent
- it: should set correct priorityClassName and tolerations
set:
autoscaler:
enabled: true
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
image:
repository: autoscaler
tag: v1.0.0
hpa:
enabled: false
template: templates/deployment-autoscaler.yaml
asserts:
- equal:
path: spec.template.spec.priorityClassName
value: system-cluster-critical
- equal:
path: spec.template.spec.tolerations[0].key
value: CriticalAddonsOnly
- equal:
path: spec.template.spec.tolerations[0].operator
value: Exists
- it: should not render deployment if autoscaler disabled
set:
autoscaler:
enabled: false
hpa:
enabled: false
template: templates/deployment-autoscaler.yaml
asserts:
- hasDocuments:
count: 0
- it: should not render deployment if hpa enabled
set:
autoscaler:
enabled: true
hpa:
enabled: true
template: templates/deployment-autoscaler.yaml
asserts:
- hasDocuments:
count: 0

View File

@@ -0,0 +1,62 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: CoreDNS Deployment Tests
templates:
- templates/configmap.yaml
- templates/deployment.yaml
tests:
- it: should render a deployment with the correct image
set:
deployment:
enabled: true
image:
repository: coredns/coredns
tag: 1.10.1
documentIndex: 0
template: templates/deployment.yaml
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: coredns/coredns:1.10.1
- it: should include tolerations when isClusterService is true
set:
deployment:
enabled: true
isClusterService: true
tolerations:
- key: CriticalAddonsOnly
operator: Exists
documentIndex: 0
template: templates/deployment.yaml
asserts:
- exists:
path: spec.template.spec.tolerations
- it: should contain checksum/config annotation
set:
deployment:
enabled: true
documentIndex: 0
template: templates/deployment.yaml
asserts:
- matchRegex:
path: spec.template.metadata.annotations.checksum/config
pattern: ^[a-f0-9]{64}$
- it: should set pod security context and priority class
set:
deployment:
enabled: true
podSecurityContext:
runAsUser: 1000
priorityClassName: system-cluster-critical
documentIndex: 0
template: templates/deployment.yaml
asserts:
- equal:
path: spec.template.spec.securityContext.runAsUser
value: 1000
- equal:
path: spec.template.spec.priorityClassName
value: system-cluster-critical

View File

@@ -0,0 +1,110 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: HPA Test
templates:
- templates/hpa.yaml
set:
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 80
behavior:
scaleUp:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 100
periodSeconds: 15
deployment:
name: coredns
customLabels:
custom-label: "true"
customAnnotations:
custom-annotation: "enabled"
autoscaler:
enabled: false
release:
namespace: test-namespace
tests:
- it: should render HPA with correct metadata and spec
asserts:
- isKind:
of: HorizontalPodAutoscaler
- equal:
path: metadata.name
value: RELEASE-NAME-coredns
- equal:
path: metadata.namespace
value: test-namespace
- equal:
path: metadata.labels.custom-label
value: "true"
- equal:
path: metadata.annotations.custom-annotation
value: "enabled"
- equal:
path: spec.minReplicas
value: 1
- equal:
path: spec.maxReplicas
value: 5
- equal:
path: spec.scaleTargetRef.apiVersion
value: apps/v1
- equal:
path: spec.scaleTargetRef.kind
value: Deployment
- equal:
path: spec.scaleTargetRef.name
value: coredns
- equal:
path: spec.metrics[0].type
value: Resource
- equal:
path: spec.metrics[0].resource.name
value: cpu
- equal:
path: spec.metrics[0].resource.target.type
value: Utilization
- equal:
path: spec.metrics[0].resource.target.averageUtilization
value: 80
- equal:
path: spec.behavior.scaleUp.stabilizationWindowSeconds
value: 300
- equal:
path: spec.behavior.scaleUp.policies[0].type
value: Percent
- equal:
path: spec.behavior.scaleUp.policies[0].value
value: 100
- equal:
path: spec.behavior.scaleUp.policies[0].periodSeconds
value: 15

View File

@@ -0,0 +1,61 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: PodDisruptionBudget Test
templates:
- templates/poddisruptionbudget.yaml
set:
deployment:
enabled: true
isClusterService: true
podDisruptionBudget:
maxUnavailable: 1
customLabels:
my-custom-label: "enabled"
customAnnotations:
custom-annotation: "true"
release:
namespace: test-namespace
tests:
- it: should render PodDisruptionBudget with correct metadata and spec
asserts:
- isKind:
of: PodDisruptionBudget
- equal:
path: metadata.name
value: RELEASE-NAME-coredns
- equal:
path: metadata.namespace
value: test-namespace
- equal:
path: metadata.labels.my-custom-label
value: enabled
- equal:
path: metadata.annotations.custom-annotation
value: "true"
- equal:
path: spec.maxUnavailable
value: 1
- equal:
path: spec.selector.matchLabels["app.kubernetes.io/instance"]
value: RELEASE-NAME
- equal:
path: spec.selector.matchLabels["app.kubernetes.io/name"]
value: coredns
- equal:
path: spec.selector.matchLabels["k8s-app"]
value: coredns

View File

@@ -0,0 +1,81 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: Prometheus Metrics Service Test
templates:
- templates/service-metrics.yaml
set:
deployment:
enabled: true
prometheus:
service:
enabled: true
annotations:
prometheus.io/scrape: "true"
selector:
app: custom-coredns
service:
annotations:
monitoring: "enabled"
customLabels:
my-custom-label: "true"
customAnnotations:
custom-annotation: "true"
isClusterService: true
release:
namespace: test-namespace
tests:
- it: should render Prometheus metrics Service with correct metadata and port
asserts:
- isKind:
of: Service
- equal:
path: metadata.name
value: RELEASE-NAME-coredns-metrics
- equal:
path: metadata.namespace
value: test-namespace
- equal:
path: metadata.labels["app.kubernetes.io/component"]
value: metrics
- equal:
path: metadata.labels["my-custom-label"]
value: "true"
- equal:
path: metadata.annotations["prometheus.io/scrape"]
value: "true"
- equal:
path: metadata.annotations["monitoring"]
value: "enabled"
- equal:
path: metadata.annotations["custom-annotation"]
value: "true"
- equal:
path: spec.selector.app
value: custom-coredns
- equal:
path: spec.ports[0].name
value: metrics
- equal:
path: spec.ports[0].port
value: 9153
- equal:
path: spec.ports[0].targetPort
value: 9153

View File

@@ -0,0 +1,108 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: Service Rendering Test
templates:
- templates/service.yaml
set:
deployment:
enabled: true
service:
name: "custom-service-name"
annotations:
service-annotation: "value"
selector:
component: "dns"
clusterIP: "10.96.0.10"
clusterIPs:
- "10.96.0.10"
- "fd00::10"
externalIPs:
- "1.2.3.4"
externalTrafficPolicy: "Local"
loadBalancerIP: "4.3.2.1"
loadBalancerClass: "my-lb-class"
ipFamilyPolicy: "PreferDualStack"
trafficDistribution: "Topology"
customLabels:
my-custom-label: "enabled"
customAnnotations:
custom-annotation: "test"
isClusterService: true
release:
namespace: test-ns
tests:
- it: should render a valid Service with custom values
asserts:
- isKind:
of: Service
- equal:
path: metadata.name
value: custom-service-name
- equal:
path: metadata.namespace
value: test-ns
- equal:
path: metadata.labels.my-custom-label
value: enabled
- equal:
path: metadata.annotations.custom-annotation
value: test
- equal:
path: metadata.annotations.service-annotation
value: value
- equal:
path: spec.selector.component
value: dns
- equal:
path: spec.clusterIP
value: 10.96.0.10
- equal:
path: spec.clusterIPs[0]
value: 10.96.0.10
- equal:
path: spec.clusterIPs[1]
value: fd00::10
- equal:
path: spec.externalIPs[0]
value: 1.2.3.4
- equal:
path: spec.externalTrafficPolicy
value: Local
- equal:
path: spec.loadBalancerIP
value: 4.3.2.1
- equal:
path: spec.loadBalancerClass
value: my-lb-class
- equal:
path: spec.type
value: ClusterIP
- equal:
path: spec.ipFamilyPolicy
value: PreferDualStack
- equal:
path: spec.trafficDistribution
value: Topology

View File

@@ -0,0 +1,48 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: Autoscaler ServiceAccount Test
templates:
- templates/serviceaccount-autoscaler.yaml
set:
autoscaler:
enabled: true
image:
pullSecrets:
- name: my-registry-secret
rbac:
create: true
customLabels:
my-custom-label: "true"
customAnnotations:
custom-annotation: "enabled"
release:
name: release-name
namespace: test-namespace
tests:
- it: should render ServiceAccount with correct name, namespace, labels, annotations, and pullSecrets
asserts:
- isKind:
of: ServiceAccount
- equal:
path: metadata.name
value: release-name-coredns-autoscaler
- equal:
path: metadata.namespace
value: test-namespace
- equal:
path: metadata.labels.my-custom-label
value: "true"
- equal:
path: metadata.annotations.custom-annotation
value: "enabled"
- equal:
path: imagePullSecrets[0].name
value: my-registry-secret

View File

@@ -0,0 +1,47 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: ServiceAccount Test
templates:
- templates/serviceaccount.yaml
set:
deployment:
enabled: true
serviceAccount:
create: true
annotations:
serviceaccount-annotation: "true"
customAnnotations:
custom-annotation: "yes"
image:
pullSecrets:
- name: my-pull-secret
release:
namespace: test-namespace
name: test-release
tests:
- it: should render ServiceAccount with correct name, annotations, and pullSecrets
asserts:
- isKind:
of: ServiceAccount
- equal:
path: metadata.name
value: test-release-coredns
- equal:
path: metadata.namespace
value: test-namespace
- equal:
path: metadata.annotations.custom-annotation
value: yes
- equal:
path: metadata.annotations.serviceaccount-annotation
value: "true"
- equal:
path: imagePullSecrets[0].name
value: my-pull-secret

View File

@@ -0,0 +1,64 @@
# yaml-language-server: $schema=https://raw.githubusercontent.com/helm-unittest/helm-unittest/main/schema/helm-testsuite.json
suite: ServiceMonitor Rendering Test
templates:
- templates/servicemonitor.yaml
set:
deployment:
enabled: true
prometheus:
monitor:
enabled: true
namespace: monitoring
interval: 15s
additionalLabels:
team: infra
selector:
matchLabels:
my-custom-label: "true"
customAnnotations:
monitoring: "enabled"
isClusterService: true
release:
name: test-release
namespace: default
tests:
- it: should render ServiceMonitor with correct metadata and selector
asserts:
- isKind:
of: ServiceMonitor
- equal:
path: metadata.name
value: test-release-coredns
- equal:
path: metadata.namespace
value: monitoring
- equal:
path: metadata.labels.team
value: infra
- equal:
path: metadata.annotations.monitoring
value: "enabled"
- equal:
path: spec.namespaceSelector.matchNames[0]
value: default
- equal:
path: spec.selector.matchLabels.my-custom-label
value: "true"
- equal:
path: spec.endpoints[0].port
value: metrics
- equal:
path: spec.endpoints[0].interval
value: 15s

View File

@@ -0,0 +1,408 @@
# Default values for coredns.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: coredns/coredns
# Overrides the image tag whose default is the chart appVersion.
tag: ""
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
pullSecrets: []
# pullSecrets:
# - name: myRegistryKeySecretName
replicaCount: 1
resources:
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
rollingUpdate:
maxUnavailable: 1
maxSurge: 25%
terminationGracePeriodSeconds: 30
podAnnotations: {}
# cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
serviceType: "ClusterIP"
prometheus:
service:
enabled: false
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9153"
selector: {}
monitor:
enabled: false
additionalLabels: {}
namespace: ""
interval: ""
selector: {}
service:
# clusterIP: ""
# clusterIPs: []
# loadBalancerIP: ""
# loadBalancerClass: ""
# externalIPs: []
# externalTrafficPolicy: ""
# ipFamilyPolicy: ""
# trafficDistribution: PreferClose
# The name of the Service
# If not set, a name is generated using the fullname template
name: ""
annotations: {}
# Pod selector
selector: {}
serviceAccount:
create: false
# The name of the ServiceAccount to use
# If not set and create is true, a name is generated using the fullname template
name: ""
annotations: {}
rbac:
# If true, create & use RBAC resources
create: true
clusterRole:
# By default a name is generated using the fullname template.
# Override here if desired:
nameOverride: ""
# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app.
isClusterService: true
# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set.
priorityClassName: ""
# Configure the pod level securityContext.
podSecurityContext: {}
# Configure SecurityContext for Pod.
# Ensure that required linux capability to bind port number below 1024 is assigned (`CAP_NET_BIND_SERVICE`).
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- ALL
readOnlyRootFilesystem: true
# Default zone is what Kubernetes recommends:
# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options
servers:
- zones:
- zone: .
use_tcp: true
port: 53
# -- expose the service on a different port
# servicePort: 5353
# If serviceType is nodePort you can specify nodePort here
# nodePort: 30053
# hostPort: 53
plugins:
- name: errors
# Serves a /health endpoint on :8080, required for livenessProbe
- name: health
configBlock: |-
lameduck 10s
# Serves a /ready endpoint on :8181, required for readinessProbe
- name: ready
# Required to query kubernetes API for data
- name: kubernetes
parameters: cluster.local in-addr.arpa ip6.arpa
configBlock: |-
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
# Serves a /metrics endpoint on :9153, required for serviceMonitor
- name: prometheus
parameters: 0.0.0.0:9153
- name: forward
parameters: . /etc/resolv.conf
- name: cache
parameters: 30
- name: loop
- name: reload
- name: loadbalance
# Complete example with all the options:
# - zones: # the `zones` block can be left out entirely, defaults to "."
# - zone: hello.world. # optional, defaults to "."
# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS)
# - zone: foo.bar.
# scheme: dns://
# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol
# # Note that this will not work if you are also exposing tls or grpc on the same server
# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS)
# plugins: # the plugins to use for this server block
# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it!
# parameters: foo bar # list of parameters after the plugin
# configBlock: |- # if the plugin supports extra block style config, supply it here
# hello world
# foo bar
# Extra configuration that is applied outside of the default zone block.
# Example to include additional config files, which may come from extraVolumes:
# extraConfig:
# import:
# parameters: /opt/coredns/*.conf
extraConfig: {}
# To use the livenessProbe, the health plugin needs to be enabled in CoreDNS' server config
livenessProbe:
enabled: true
initialDelaySeconds: 60
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 5
successThreshold: 1
# To use the readinessProbe, the ready plugin needs to be enabled in CoreDNS' server config
readinessProbe:
enabled: true
initialDelaySeconds: 30
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 1
successThreshold: 1
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#topologyspreadconstraint-v1-core
# and supports Helm templating.
# For example:
# topologySpreadConstraints:
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: '{{ template "coredns.name" . }}'
# app.kubernetes.io/instance: '{{ .Release.Name }}'
# topologyKey: topology.kubernetes.io/zone
# maxSkew: 1
# whenUnsatisfiable: ScheduleAnyway
# - labelSelector:
# matchLabels:
# app.kubernetes.io/name: '{{ template "coredns.name" . }}'
# app.kubernetes.io/instance: '{{ .Release.Name }}'
# topologyKey: kubernetes.io/hostname
# maxSkew: 1
# whenUnsatisfiable: ScheduleAnyway
topologySpreadConstraints: []
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
podDisruptionBudget: {}
# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/
zoneFiles: []
# - filename: example.db
# domain: example.com
# contents: |
# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600
# example.com. IN NS b.iana-servers.net.
# example.com. IN NS a.iana-servers.net.
# example.com. IN A 192.168.99.102
# *.example.com. IN A 192.168.99.102
# optional array of sidecar containers
extraContainers: []
# - name: some-container-name
# image: some-image:latest
# imagePullPolicy: Always
# optional array of extra volumes to create
extraVolumes: []
# - name: some-volume-name
# emptyDir: {}
# optional array of mount points for extraVolumes
extraVolumeMounts: []
# - name: some-volume-name
# mountPath: /etc/wherever
# optional array of secrets to mount inside coredns container
# possible usecase: need for secure connection with etcd backend
extraSecrets: []
# - name: etcd-client-certs
# mountPath: /etc/coredns/tls/etcd
# defaultMode: 420
# - name: some-fancy-secret
# mountPath: /etc/wherever
# defaultMode: 440
# optional array of environment variables for coredns container
# possible usecase: provides username and password for etcd user authentications
env: []
# - name: WHATEVER_ENV
# value: whatever
# - name: SOME_SECRET_ENV
# valueFrom:
# secretKeyRef:
# name: some-secret-name
# key: secret-key
# To support legacy deployments using CoreDNS with the "k8s-app: kube-dns" label selectors.
# See https://github.com/coredns/helm/blob/master/charts/coredns/README.md#adopting-existing-coredns-resources
# k8sAppLabelOverride: "kube-dns"
# Custom labels to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled.
customLabels: {}
# Custom annotations to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled.
customAnnotations: {}
## Alternative configuration for HPA deployment if wanted
## Create HorizontalPodAutoscaler object.
##
# hpa:
# enabled: false
# minReplicas: 1
# maxReplicas: 10
# metrics:
# metrics:
# - type: Resource
# resource:
# name: memory
# target:
# type: Utilization
# averageUtilization: 60
# - type: Resource
# resource:
# name: cpu
# target:
# type: Utilization
# averageUtilization: 60
hpa:
enabled: false
minReplicas: 1
maxReplicas: 2
metrics: []
## Configue a cluster-proportional-autoscaler for coredns
# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler
autoscaler:
# Enabled the cluster-proportional-autoscaler
enabled: false
# Number of cores in the cluster per coredns replica
coresPerReplica: 256
# Number of nodes in the cluster per coredns replica
nodesPerReplica: 16
# Min size of replicaCount
min: 0
# Max size of replicaCount (default of 0 is no max)
max: 0
# Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler
includeUnschedulableNodes: false
# If true does not allow single points of failure to form
preventSinglePointFailure: true
# Annotations for the coredns proportional autoscaler pods
podAnnotations: {}
## Optionally specify some extra flags to pass to cluster-proprtional-autoscaler.
## Useful for e.g. the nodelabels flag.
# customFlags:
# - --nodelabels=topology.kubernetes.io/zone=us-east-1a
image:
repository: registry.k8s.io/cpa/cluster-proportional-autoscaler
tag: "v1.9.0"
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
pullSecrets: []
# pullSecrets:
# - name: myRegistryKeySecretName
# Optional priority class to be used for the autoscaler pods. priorityClassName used if not set.
priorityClassName: ""
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
tolerations: []
# resources for autoscaler pod
resources:
requests:
cpu: "20m"
memory: "10Mi"
limits:
cpu: "20m"
memory: "10Mi"
# Options for autoscaler configmap
configmap:
## Annotations for the coredns-autoscaler configmap
# i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed
annotations: {}
# Enables the livenessProbe for cluster-proportional-autoscaler - this requires version 1.8.0+ of the autoscaler
livenessProbe:
enabled: true
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 3
successThreshold: 1
# optional array of sidecar containers
extraContainers: []
# - name: some-container-name
# image: some-image:latest
# imagePullPolicy: Always
deployment:
skipConfig: false
enabled: true
name: ""
## Annotations for the coredns deployment
annotations: {}
## Pod selector
selector: {}
# Configures initcontainers for the coredns deployment.
initContainers: []

View File