mirror of
https://github.com/Telecominfraproject/wlan-cloud-helm.git
synced 2026-03-20 22:39:08 +00:00
Compare commits
8 Commits
feature/th
...
feature/up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12c8715136 | ||
|
|
9689a60173 | ||
|
|
d7ac826a84 | ||
|
|
05eeda31eb | ||
|
|
44295c7a55 | ||
|
|
c248877825 | ||
|
|
f0098d1749 | ||
|
|
2992d325bc |
6
.github/workflows/helm-validation.yml
vendored
6
.github/workflows/helm-validation.yml
vendored
@@ -45,8 +45,7 @@ jobs:
|
||||
helm template -f values-test.yaml . | /tmp/k8s-validators/kubeval --ignore-missing-schemas
|
||||
|
||||
echo "Kube-score test"
|
||||
# will be fixed and enabled again in https://telecominfraproject.atlassian.net/browse/WIFI-1258
|
||||
helm template -f values-test.yaml . | /tmp/k8s-validators/kube-score score - || true
|
||||
helm template -f values-test.yaml . | /tmp/k8s-validators/kube-score score -
|
||||
- name: Test glusterfs
|
||||
working-directory: glusterfs/kube-templates
|
||||
run: |
|
||||
@@ -54,5 +53,4 @@ jobs:
|
||||
/tmp/k8s-validators/kubeval *.yaml
|
||||
|
||||
echo "Kube-score test"
|
||||
# will be fixed and enabled again in https://telecominfraproject.atlassian.net/browse/WIFI-1258
|
||||
/tmp/k8s-validators/kube-score score *.yaml || true
|
||||
/tmp/k8s-validators/kube-score score *.yaml
|
||||
103
.github/workflows/testing.yml
vendored
103
.github/workflows/testing.yml
vendored
@@ -1,103 +0,0 @@
|
||||
name: CloudSDK deployment and testing
|
||||
|
||||
env:
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
HELM_RELEASE_PREFIX: tip-wlan
|
||||
AWS_EKS_NAME: tip-wlan-main
|
||||
AWS_DEFAULT_OUTPUT: json
|
||||
AWS_DEFAULT_REGION: us-east-2
|
||||
AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout required repos
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: wlan-pki-cert-scripts
|
||||
repository: Telecominfraproject/wlan-pki-cert-scripts
|
||||
- name: Checkout Cloud SDK repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: wlan-cloud-helm
|
||||
repository: Telecominfraproject/wlan-cloud-helm
|
||||
- name: Checkout helm values repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: Toolsmith
|
||||
repository: Telecominfraproject/Toolsmith
|
||||
token: ${{ secrets.PAT_TOKEN }}
|
||||
|
||||
- name: Generate Helm values file
|
||||
run: |
|
||||
./Toolsmith/helm-values/aws-cicd-testing-pr-deployment.yaml.sh ${{ env.PR_NUMBER }} > pr-deployment.yaml
|
||||
|
||||
- name: Generate certs
|
||||
working-directory: wlan-pki-cert-scripts
|
||||
run: |
|
||||
./generate_all.sh
|
||||
./copy-certs-to-helm.sh ../wlan-cloud-helm
|
||||
|
||||
- name: Get kubeconfig for EKS ${{ env.AWS_EKS_NAME }}
|
||||
run: |
|
||||
aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
|
||||
|
||||
- name: Deploy Cloud SDK
|
||||
run: |
|
||||
helm dependency update wlan-cloud-helm/${{ env.HELM_RELEASE_PREFIX }}
|
||||
# using a timeout of 20 minutes as the EKS nodes may need to be scaled which takes some time
|
||||
helm upgrade --install ${{ env.HELM_RELEASE_PREFIX }}-pr-${{ env.PR_NUMBER }} wlan-cloud-helm/tip-wlan -f pr-deployment.yaml --create-namespace --namespace ${{ env.HELM_RELEASE_PREFIX }}-pr-${{ env.PR_NUMBER }} --wait --timeout 20m
|
||||
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ deploy ]
|
||||
steps:
|
||||
- name: Execute tests
|
||||
run: |
|
||||
echo Running tests...
|
||||
# this is needed to make until work
|
||||
set +e
|
||||
|
||||
urls="https://wlan-ui-pr-$PR_NUMBER.cicd.lab.wlan.tip.build https://wlan-graphql-pr-$PR_NUMBER.cicd.lab.wlan.tip.build/graphql"
|
||||
for url in $urls; do
|
||||
max_retry=300
|
||||
counter=0
|
||||
until curl --silent $url > /dev/null
|
||||
do
|
||||
sleep 1
|
||||
[[ counter -eq $max_retry ]] && echo "$url not reachable after $counter tries...giving up" && exit 1
|
||||
echo "#$counter: $url not reachable. trying again..."
|
||||
((counter++))
|
||||
done
|
||||
echo Successfully reached URL $url
|
||||
done
|
||||
|
||||
echo Tests were successful
|
||||
|
||||
cleanup:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ deploy, test ]
|
||||
if: ${{ always() }}
|
||||
steps:
|
||||
- name: Get kubeconfig for EKS ${{ env.AWS_EKS_NAME }}
|
||||
run: |
|
||||
aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
|
||||
|
||||
- name: Delete Cloud SDK Helm release
|
||||
run: |
|
||||
helm delete ${{ env.HELM_RELEASE_PREFIX }}-pr-${{ env.PR_NUMBER }} --namespace ${{ env.HELM_RELEASE_PREFIX }}-pr-${{ env.PR_NUMBER }} || true
|
||||
|
||||
- name: Delete namespace
|
||||
run: |
|
||||
kubectl delete namespace ${{ env.HELM_RELEASE_PREFIX }}-pr-${{ env.PR_NUMBER }} --wait=true --ignore-not-found true
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -4,14 +4,13 @@
|
||||
*.p12
|
||||
*.csr
|
||||
*.cnf
|
||||
*.key
|
||||
*.DS_Store
|
||||
*.lock
|
||||
|
||||
# local development
|
||||
*.lock
|
||||
*.local_dev
|
||||
tip-wlan/resources/certs
|
||||
tip-wlan/resources/scripts
|
||||
|
||||
*.zip
|
||||
*.tgz
|
||||
stern*
|
||||
helmfile
|
||||
*.tgz
|
||||
|
||||
17
CHANGELOG.md
17
CHANGELOG.md
@@ -1,17 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
|
||||
|
||||
## [0.4.0](https://github.com/Telecominfraproject/wlan-cloud-helm/compare/f7c67645736e3dac498e2caec8c267f04d08b7bc...v0.4) - 2021-01-28
|
||||
|
||||
### Added
|
||||
|
||||
- Initial changelog entry. This is the first versioned release. Next releases will include a detailed overview of all the major changes introduced since the last version.
|
||||
|
||||
76
README.md
76
README.md
@@ -1,68 +1,16 @@
|
||||
# wlan-cloud-helm
|
||||
This repository contains helm charts for various deployment types of the tip wlan cloud services.
|
||||
|
||||
# IMPORTANT - CloudSDK Helm charts v0.4 to v1.x migration procedure
|
||||
|
||||
We've introduced breaking changes to how CloudSDK database charts are managed.
|
||||
If you want to preserve your data when moving from v0.4 to v1.x of the CloudSDK Helm charts, follow the steps outlined below.
|
||||
If you can re-install your CloudSDK and don't care to loose your data, you can skip the steps and just install the upstream charts version with no changes to the default installation procedure.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Checkout latest wlan-cloud-helm repository
|
||||
2. Have your certificates for existing installation
|
||||
3. Helm 3.2+
|
||||
|
||||
## Procedure
|
||||
|
||||
All of the commands should be run under tip-wlan-helm directory.
|
||||
|
||||
1. Delete your current Helm release. The following commands will remove the pods, however, the PVC (your databases data) **won't be deleted**:
|
||||
```
|
||||
helm list -n default (to look up the name of the release)
|
||||
helm uninstall -n default tip-wlan (tip-wlan is usually the name of the release)
|
||||
```
|
||||
2. Replace `REPLACEME` with your storage class name in the `tip-wlan/resources/environments/migration.yaml` file. You can check the available storageclasses with the `kubectl get storageclass` command.
|
||||
3. Update your values file that you used for deploying the original release with the values from `migration.yaml` to preserve existing cassandra\postgres data (or skip that step and use the second upgrade command mentioned in #7)
|
||||
4. If you want to preserve the PKI certificates from the original Helm installation, copy them to a new location using the command below (or checkout the latest wlan-pki-cert-script repo and use `copy-certs-to-helm.sh %path_to_new_helm_code%` to generate new self-signed keys):
|
||||
```
|
||||
find . -regextype posix-extended -regex '.+(jks|pem|key|pkcs12|p12)$' -exec cp "{}" tip-wlan/resources/certs/ \;
|
||||
```
|
||||
5. Remove the old charts from the helm directory, so that the upgrade command can successfully pull new chart depedencies:
|
||||
```
|
||||
rm -rf tip-wlan/charts/cassandra tip-wlan/charts/kafka tip-wlan/charts/postgresql
|
||||
```
|
||||
6. Pull 3rd party subcharts:
|
||||
```
|
||||
helm dependency update tip-wlan
|
||||
```
|
||||
7. Perform Helm upgrade:
|
||||
```
|
||||
helm upgrade --install tip-wlan tip-wlan/ --namespace tip --create-namespace -f tip-wlan/resources/environments/your_values_with_fixes.yaml
|
||||
```
|
||||
|
||||
Alternatively, you can run the upgrade command as follows (the order of the -f arguments is important!):
|
||||
|
||||
```
|
||||
helm upgrade --install tip-wlan tip-wlan/ --namespace tip --create-namespace -f tip-wlan/resources/environments/original_values.yaml -f tip-wlan/resources/environments/migration.yaml
|
||||
```
|
||||
|
||||
As a precaution you can also run `helm template` with the same arguments as the upgrade command and examine the output before actually installing the chart
|
||||
|
||||
# Deploying the wlan-cloud deployment
|
||||
Run the following command under tip-wlan-helm directory:
|
||||
```
|
||||
helm dependency update tip-wlan
|
||||
helm upgrade --install <RELEASE_NAME> tip-wlan/ --namespace tip --create-namespace -f tip-wlan/resources/environments/dev.yaml
|
||||
```
|
||||
|
||||
More details can be found here: https://telecominfraproject.atlassian.net/wiki/spaces/WIFI/pages/262176803/Pre-requisites+before+deploying+Tip-Wlan+solution
|
||||
- Run the following command under tip-wlan-helm directory:
|
||||
- helm install <RELEASE_NAME> tip-wlan/ -n default -f tip-wlan/resources/environments/dev.yaml
|
||||
|
||||
More details can be found here: https://telecominfraproject.atlassian.net/wiki/spaces/WIFI/pages/262176803/Pre-requisites+before+deploying+Tip-Wlan+solution
|
||||
|
||||
# Deleting the wlan-cloud deployment:
|
||||
Run the following command:
|
||||
```
|
||||
helm del tip-wlan -n tip (replace the namespace with your namespace)
|
||||
```
|
||||
- Run the following command:
|
||||
- helm del tip-wlan -n default
|
||||
|
||||
(Note: this would not delete the tip namespace and any PVC/PV/Endpoints under this namespace. These are needed so we can reuse the same PVC mount when the pods are restarted.)
|
||||
|
||||
To get rid of them (PVC/PV/Endpoints), you can use the following script (expects that you are in the `tip` namespace or add `-n tip` to the below set of commands):
|
||||
@@ -144,17 +92,13 @@ done
|
||||
|
||||
Run minikube:
|
||||
|
||||
```
|
||||
minikube start --memory=10g --cpus=4 --driver=virtualbox --extra-config=kubelet.serialize-image-pulls=false --extra-config=kubelet.image-pull-progress-deadline=3m0s --docker-opt=max-concurrent-downloads=10
|
||||
```
|
||||
```minikube start --memory=10g --cpus=4 --driver=virtualbox --extra-config=kubelet.serialize-image-pulls=false --extra-config=kubelet.image-pull-progress-deadline=3m0s --docker-opt=max-concurrent-downloads=10```
|
||||
|
||||
Please note that you may choose another driver (parallels, vmwarefusion, hyperkit, vmware, docker, podman) which might be more suitable for your setup. Omitting this option enables auto discovery of available drivers.
|
||||
|
||||
Deploy CloudSDK chart:
|
||||
|
||||
```
|
||||
helm upgrade --install tip-wlan tip-wlan -f tip-wlan/resources/environments/dev-local.yaml -n default
|
||||
```
|
||||
```helm upgrade --install tip-wlan tip-wlan -f tip-wlan/resources/environments/dev-local.yaml -n default```
|
||||
|
||||
Wait a few minutes, when all pods are in `Running` state, obtain web ui link with `minikube service tip-wlan-wlan-cloud-static-portal -n tip --url`, open in the browser. Importing or trusting certificate might be needed.
|
||||
|
||||
@@ -229,4 +173,4 @@ xDG3eKlu+dllUtKx/PN6yflbT5xcGgcdmrwzRaWS
|
||||
|
||||
```
|
||||
|
||||
2. Double click on it, enter the system admin password, if prompted.
|
||||
2. Double click on it, enter the system admin password, if prompted.
|
||||
@@ -14,7 +14,7 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.4.0
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
@@ -65,11 +65,13 @@ dependencies:
|
||||
condition: nginx-ingress-controller.enabled
|
||||
- name: common
|
||||
version: 0.1.0
|
||||
|
||||
- name: zookeeper
|
||||
version: 0.1.0
|
||||
condition: zookeeper.enabled
|
||||
- name: kafka
|
||||
version: 12.2.0
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
version: 0.1.0
|
||||
condition: kafka.enabled
|
||||
|
||||
- name: postgresql
|
||||
version: 10.1.0
|
||||
repository: https://charts.bitnami.com/bitnami
|
||||
|
||||
@@ -1,5 +1,12 @@
|
||||
{{/*
|
||||
Resolve the environment variables to apply to a chart. The default namespace suffix
|
||||
is the name of the chart. This can be overridden if necessary (eg. for subcharts)
|
||||
using the following value:
|
||||
|
||||
- .Values.nsPrefix : override namespace prefix
|
||||
*/}}
|
||||
{{- define "common.namespace" -}}
|
||||
{{- .Release.Namespace -}}
|
||||
{{- default .Values.global.nsPrefix -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "common.env" -}}
|
||||
|
||||
10
tip-wlan/charts/common/templates/_namespace.tpl
Normal file
10
tip-wlan/charts/common/templates/_namespace.tpl
Normal file
@@ -0,0 +1,10 @@
|
||||
{{/*
|
||||
Resolve the namespace to apply to a chart. The default namespace suffix
|
||||
is the name of the chart. This can be overridden if necessary (eg. for subcharts)
|
||||
using the following value:
|
||||
|
||||
- .Values.nsPrefix : override namespace prefix
|
||||
*/}}
|
||||
{{- define "common.namespace" -}}
|
||||
{{- default .Values.global.nsPrefix -}}
|
||||
{{- end -}}
|
||||
@@ -1,43 +1,24 @@
|
||||
{{/*
|
||||
This template will be used to iterate through the access point debug ports and generate
|
||||
access point debug ports mapping
|
||||
This template will be used to iterate through the debug-ports and generate
|
||||
debug-ports mapping
|
||||
*/}}
|
||||
|
||||
{{- define "apDebugPortsStart" -}}
|
||||
{{- $portPrefix := $.Values.global.nodePortPrefixExt | default $.Values.nodePortPrefixExt | int -}}
|
||||
{{- $start := $.Values.accessPointDebugPortRange.start | int -}}
|
||||
{{- $end := (add $.Values.accessPointDebugPortRange.start $.Values.accessPointDebugPortRange.length) | int -}}
|
||||
{{- printf "%d%d" $portPrefix $start -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{- define "apDebugPortsEnd" -}}
|
||||
{{- $portPrefix := $.Values.global.nodePortPrefixExt | default $.Values.nodePortPrefixExt | int -}}
|
||||
{{- $start := $.Values.accessPointDebugPortRange.start | int -}}
|
||||
{{- $end := (add $.Values.accessPointDebugPortRange.start $.Values.accessPointDebugPortRange.length) | int -}}
|
||||
{{- printf "%d%d" $portPrefix $end -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
|
||||
{{- define "container.dev.apDebugPorts" -}}
|
||||
{{- $accessPointDebugPorts := untilStep (include "apDebugPortsStart" . | atoi) (include "apDebugPortsEnd" . | atoi) 1 -}}
|
||||
{{- range $index, $port := $accessPointDebugPorts }}
|
||||
- name: apdebugport-{{ $index }}
|
||||
containerPort: {{ $port }}
|
||||
{{- define "container.dev.debugport" -}}
|
||||
{{- range $index, $portid := .Values.debugPorts }}
|
||||
- name: debugport-{{ $index }}
|
||||
containerPort: {{ $portid }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "service.dev.apDebugPorts" -}}
|
||||
{{- $accessPointDebugPorts := untilStep (include "apDebugPortsStart" . | atoi) (include "apDebugPortsEnd" . | atoi) 1 -}}
|
||||
{{- range $index, $port := $accessPointDebugPorts }}
|
||||
- port: {{ $port }}
|
||||
targetPort: {{ $port }}
|
||||
{{- define "service.dev.debugport" -}}
|
||||
{{- range $index, $portid := .Values.debugPorts }}
|
||||
- port: {{ $portid }}
|
||||
targetPort: {{ $portid }}
|
||||
protocol: TCP
|
||||
name: apdebugport-{{ $index }}
|
||||
name: debugport-{{ $index }}
|
||||
{{- if eq $.Values.service.type "NodePort" }}
|
||||
nodePort: {{ $port }}
|
||||
nodePort: {{ $portid }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
@@ -16,7 +16,7 @@ else use user-provided URL
|
||||
{{- $zookeeperService := printf "%s-%s" .Release.Name .Values.zookeeper.url }}
|
||||
{{- default $zookeeperService }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Resolve the Kafka service-name to apply to a chart.
|
||||
@@ -67,6 +67,7 @@ else use user-provided URL
|
||||
{{- printf "%s-%s:%.f" .Release.Name .Values.opensyncgw.url .Values.opensyncgw.port | trunc 63 -}}
|
||||
{{- end -}}
|
||||
|
||||
|
||||
{{/*
|
||||
Resolve the pvc name that's would mounted to 2 charts - Portal and Opensync-gw
|
||||
*/}}
|
||||
@@ -79,4 +80,4 @@ else use user-provided URL
|
||||
*/}}
|
||||
{{- define "filestore.dir.name" -}}
|
||||
{{- printf "%s" .Values.filestore.internal | trunc 63 -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
8
tip-wlan/charts/kafka/Chart.yaml
Executable file
8
tip-wlan/charts/kafka/Chart.yaml
Executable file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v2
|
||||
description: Apache Kafka is publish-subscribe messaging
|
||||
name: kafka
|
||||
version: 0.1.0
|
||||
appVersion: 1.0.0
|
||||
dependencies:
|
||||
- name: zookeeper
|
||||
version: 0.1.0
|
||||
2
tip-wlan/charts/kafka/resources/config/certs/README.md
Normal file
2
tip-wlan/charts/kafka/resources/config/certs/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
Contains certs needed for this service to start.
|
||||
Please refer to page: https://telecominfraproject.atlassian.net/wiki/spaces/WIFI/pages/262176803/Pre-requisites+before+deploying+Tip-Wlan+solution
|
||||
67
tip-wlan/charts/kafka/templates/NOTES.txt
Normal file
67
tip-wlan/charts/kafka/templates/NOTES.txt
Normal file
@@ -0,0 +1,67 @@
|
||||
### Connecting to Kafka from inside Kubernetes
|
||||
|
||||
You can connect to Kafka by running a simple pod in the K8s cluster like this with a configuration like this:
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: testclient
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
containers:
|
||||
- name: kafka
|
||||
image: {{ .Values.image }}:{{ .Values.imageTag }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "exec tail -f /dev/null"
|
||||
|
||||
Once you have the testclient pod above running, you can list all kafka
|
||||
topics with:
|
||||
|
||||
kubectl -n {{ .Release.Namespace }} exec testclient -- kafka-topics --zookeeper {{ .Release.Name }}-zookeeper:2181 --list
|
||||
|
||||
To create a new topic:
|
||||
|
||||
kubectl -n {{ .Release.Namespace }} exec testclient -- kafka-topics --zookeeper {{ .Release.Name }}-zookeeper:2181 --topic test1 --create --partitions 1 --replication-factor 1
|
||||
|
||||
To listen for messages on a topic:
|
||||
|
||||
kubectl -n {{ .Release.Namespace }} exec -ti testclient -- kafka-console-consumer --bootstrap-server {{ include "common.fullname" . }}:9092 --topic test1 --from-beginning
|
||||
|
||||
To stop the listener session above press: Ctrl+C
|
||||
|
||||
To start an interactive message producer session:
|
||||
kubectl -n {{ .Release.Namespace }} exec -ti testclient -- kafka-console-producer --broker-list {{ include "common.fullname" . }}-headless:9092 --topic test1
|
||||
|
||||
To create a message in the above session, simply type the message and press "enter"
|
||||
To end the producer session try: Ctrl+C
|
||||
|
||||
If you specify "zookeeper.connect" in configurationOverrides, please replace "{{ .Release.Name }}-zookeeper:2181" with the value of "zookeeper.connect", or you will get error.
|
||||
|
||||
{{ if .Values.external.enabled }}
|
||||
### Connecting to Kafka from outside Kubernetes
|
||||
|
||||
You have enabled the external access feature of this chart.
|
||||
|
||||
**WARNING:** By default this feature allows Kafka clients outside Kubernetes to
|
||||
connect to Kafka via NodePort(s) in `PLAINTEXT`.
|
||||
|
||||
Please see this chart's README.md for more details and guidance.
|
||||
|
||||
If you wish to connect to Kafka from outside please configure your external Kafka
|
||||
clients to point at the following brokers. Please allow a few minutes for all
|
||||
associated resources to become healthy.
|
||||
{{ $fullName := include "common.fullname" . }}
|
||||
{{- $replicas := .Values.replicas | int }}
|
||||
{{- $servicePort := .Values.external.servicePort | int}}
|
||||
{{- $root := . }}
|
||||
{{- range $i, $e := until $replicas }}
|
||||
{{- $externalListenerPort := add $root.Values.external.firstListenerPort $i }}
|
||||
{{- if $root.Values.external.distinct }}
|
||||
{{ printf "%s-%d.%s:%d" $root.Release.Name $i $root.Values.external.domain $servicePort | indent 2 }}
|
||||
{{- else }}
|
||||
{{ printf "%s.%s:%d" $root.Release.Name $root.Values.external.domain $externalListenerPort | indent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
89
tip-wlan/charts/kafka/templates/_helpers.tpl
Normal file
89
tip-wlan/charts/kafka/templates/_helpers.tpl
Normal file
@@ -0,0 +1,89 @@
|
||||
{{/*
|
||||
Form the Zookeeper URL. If zookeeper is installed as part of this chart, use k8s service discovery,
|
||||
else use user-provided URL
|
||||
*/}}
|
||||
{{- define "zookeeper.url" }}
|
||||
{{- $port := .Values.zookeeper.port | toString }}
|
||||
{{- if .Values.zookeeper.enabled -}}
|
||||
{{- printf "%s:%s" (include "kafka.zookeeper.fullname" .) $port }}
|
||||
{{- else -}}
|
||||
{{- $zookeeperConnect := printf "%s-%s:%s" .Release.Name .Values.zookeeper.url $port }}
|
||||
{{- $zookeeperConnectOverride := index .Values "configurationOverrides" "zookeeper.connect" }}
|
||||
{{- default $zookeeperConnect $zookeeperConnectOverride }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Derive offsets.topic.replication.factor in following priority order: configurationOverrides, replicas
|
||||
*/}}
|
||||
{{- define "kafka.replication.factor" }}
|
||||
{{- $replicationFactorOverride := index .Values "configurationOverrides" "offsets.topic.replication.factor" }}
|
||||
{{- default .Values.replicas $replicationFactorOverride }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kafka.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create unified labels for kafka components
|
||||
*/}}
|
||||
|
||||
{{- define "kafka.common.matchLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "common.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kafka.common.metaLabels" -}}
|
||||
helm.sh/chart: {{ include "kafka.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kafka.broker.matchLabels" -}}
|
||||
app.kubernetes.io/component: kafka-broker
|
||||
{{ include "kafka.common.matchLabels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kafka.broker.labels" -}}
|
||||
{{ include "kafka.common.metaLabels" . }}
|
||||
{{ include "kafka.broker.matchLabels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kafka.config.matchLabels" -}}
|
||||
app.kubernetes.io/component: kafka-config
|
||||
{{ include "kafka.common.matchLabels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kafka.config.labels" -}}
|
||||
{{ include "kafka.common.metaLabels" . }}
|
||||
{{ include "kafka.config.matchLabels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kafka.monitor.matchLabels" -}}
|
||||
app.kubernetes.io/component: kafka-monitor
|
||||
{{ include "kafka.common.matchLabels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "kafka.monitor.labels" -}}
|
||||
{{ include "kafka.common.metaLabels" . }}
|
||||
{{ include "kafka.monitor.matchLabels" . }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "serviceMonitor.namespace" -}}
|
||||
{{- if .Values.prometheus.operator.serviceMonitor.releaseNamespace -}}
|
||||
{{ .Release.Namespace }}
|
||||
{{- else -}}
|
||||
{{ .Values.prometheus.operator.serviceMonitor.namespace }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "prometheusRule.namespace" -}}
|
||||
{{- if .Values.prometheus.operator.prometheusRule.releaseNamespace -}}
|
||||
{{ .Release.Namespace }}
|
||||
{{- else -}}
|
||||
{{ .Values.prometheus.operator.prometheusRule.namespace }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
59
tip-wlan/charts/kafka/templates/configmap-config.yaml
Normal file
59
tip-wlan/charts/kafka/templates/configmap-config.yaml
Normal file
@@ -0,0 +1,59 @@
|
||||
{{- if .Values.topics -}}
|
||||
{{- $zk := include "zookeeper.url" . -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kafka.config.labels" . | nindent 4 }}
|
||||
name: {{ include "common.fullname" . }}-config
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
data:
|
||||
runtimeConfig.sh: |
|
||||
#!/bin/bash
|
||||
set -e
|
||||
cd /usr/bin
|
||||
until kafka-configs --zookeeper {{ $zk }} --entity-type topics --describe || (( count++ >= 6 ))
|
||||
do
|
||||
echo "Waiting for Zookeeper..."
|
||||
sleep 20
|
||||
done
|
||||
until nc -z {{ template "common.fullname" . }} 9092 || (( retries++ >= 6 ))
|
||||
do
|
||||
echo "Waiting for Kafka..."
|
||||
sleep 20
|
||||
done
|
||||
echo "Applying runtime configuration using {{ .Values.image }}:{{ .Values.imageTag }}"
|
||||
{{- range $n, $topic := .Values.topics }}
|
||||
{{- if and $topic.partitions $topic.replicationFactor $topic.reassignPartitions }}
|
||||
cat << EOF > {{ $topic.name }}-increase-replication-factor.json
|
||||
{"version":1, "partitions":[
|
||||
{{- $partitions := (int $topic.partitions) }}
|
||||
{{- $replicas := (int $topic.replicationFactor) }}
|
||||
{{- range $i := until $partitions }}
|
||||
{"topic":"{{ $topic.name }}","partition":{{ $i }},"replicas":[{{- range $j := until $replicas }}{{ $j }}{{- if ne $j (sub $replicas 1) }},{{- end }}{{- end }}]}{{- if ne $i (sub $partitions 1) }},{{- end }}
|
||||
{{- end }}
|
||||
]}
|
||||
EOF
|
||||
kafka-reassign-partitions --zookeeper {{ $zk }} --reassignment-json-file {{ $topic.name }}-increase-replication-factor.json --execute
|
||||
kafka-reassign-partitions --zookeeper {{ $zk }} --reassignment-json-file {{ $topic.name }}-increase-replication-factor.json --verify
|
||||
{{- else if and $topic.partitions $topic.replicationFactor }}
|
||||
kafka-topics --zookeeper {{ $zk }} --create --if-not-exists --force --topic {{ $topic.name }} --partitions {{ $topic.partitions }} --replication-factor {{ $topic.replicationFactor }}
|
||||
{{- else if $topic.partitions }}
|
||||
kafka-topics --zookeeper {{ $zk }} --alter --force --topic {{ $topic.name }} --partitions {{ $topic.partitions }} || true
|
||||
{{- end }}
|
||||
{{- if $topic.defaultConfig }}
|
||||
kafka-configs --zookeeper {{ $zk }} --entity-type topics --entity-name {{ $topic.name }} --alter --force --delete-config {{ nospace $topic.defaultConfig }} || true
|
||||
{{- end }}
|
||||
{{- if $topic.config }}
|
||||
kafka-configs --zookeeper {{ $zk }} --entity-type topics --entity-name {{ $topic.name }} --alter --force --add-config {{ nospace $topic.config }}
|
||||
{{- end }}
|
||||
kafka-configs --zookeeper {{ $zk }} --entity-type topics --entity-name {{ $topic.name }} --describe
|
||||
{{- if $topic.acls }}
|
||||
{{- range $a, $acl := $topic.acls }}
|
||||
{{ if and $acl.user $acl.operations }}
|
||||
kafka-acls --authorizer-properties zookeeper.connect={{ $zk }} --force --add --allow-principal User:{{ $acl.user }}{{- range $operation := $acl.operations }} --operation {{ $operation }} {{- end }} --topic {{ $topic.name }} {{ $topic.extraParams }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
17
tip-wlan/charts/kafka/templates/configmap.yaml
Normal file
17
tip-wlan/charts/kafka/templates/configmap.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-clientconfig
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
data:
|
||||
admin-client.properties: |
|
||||
ssl.endpoint.identification.algorithm=
|
||||
security.protocol=SSL
|
||||
ssl.key.password={{ .Values.creds.sslKeyPassword | b64enc }}
|
||||
ssl.keystore.location=/etc/kafka/secrets/kafka-server.pkcs12
|
||||
ssl.keystore.password={{ .Values.creds.sslKeystorePassword | b64enc }}
|
||||
ssl.keystore.type=PKCS12
|
||||
ssl.truststore.location=/etc/kafka/secrets/truststore.jks
|
||||
ssl.truststore.password={{ .Values.creds.sslTruststorePassword | b64enc }}
|
||||
ssl.truststore.type=JKS
|
||||
bootstrap.servers=tip-wlan-kafka-headless:9093
|
||||
30
tip-wlan/charts/kafka/templates/job-config.yaml
Normal file
30
tip-wlan/charts/kafka/templates/job-config.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
{{- if .Values.topics -}}
|
||||
{{- $scriptHash := include (print $.Template.BasePath "/configmap-config.yaml") . | sha256sum | trunc 8 -}}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: "{{ include "common.fullname" . }}-config-{{ $scriptHash }}"
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
{{- include "kafka.config.labels" . | nindent 4 }}
|
||||
spec:
|
||||
backoffLimit: {{ .Values.configJob.backoffLimit }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kafka.config.matchLabels" . | nindent 8 }}
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: {{ include "common.fullname" . }}-config
|
||||
defaultMode: 0744
|
||||
containers:
|
||||
- name: {{ include "common.fullname" . }}-config
|
||||
image: "{{ .Values.image }}:{{ .Values.imageTag }}"
|
||||
command: ["/usr/local/script/runtimeConfig.sh"]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: "/usr/local/script"
|
||||
{{- end -}}
|
||||
18
tip-wlan/charts/kafka/templates/secret.yaml
Normal file
18
tip-wlan/charts/kafka/templates/secret.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-certs
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
app: {{ template "common.name" . }}
|
||||
chart: {{ template "common.chart" . }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
type: Opaque
|
||||
data:
|
||||
truststore_creds: {{ .Values.creds.sslTruststorePassword | b64enc }}
|
||||
keystore_creds: {{ .Values.creds.sslKeystorePassword | b64enc }}
|
||||
key_creds: {{ .Values.creds.sslKeyPassword | b64enc }}
|
||||
truststore.jks: {{ .Files.Get "resources/config/certs/truststore.jks" | b64enc }}
|
||||
kafka-server.pkcs12: {{ .Files.Get "resources/config/certs/kafka-server.pkcs12" | b64enc }}
|
||||
README: {{ .Files.Get "resources/config/certs/README.md" | b64enc }}
|
||||
14
tip-wlan/charts/kafka/templates/service-brokers.yaml
Normal file
14
tip-wlan/charts/kafka/templates/service-brokers.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
{{- include "kafka.broker.labels" . | nindent 4 }}
|
||||
spec:
|
||||
ports:
|
||||
- name: broker
|
||||
port: {{ .Values.headless.sslPort }}
|
||||
targetPort: kafka
|
||||
selector:
|
||||
{{- include "kafka.broker.matchLabels" . | nindent 4 }}
|
||||
27
tip-wlan/charts/kafka/templates/service-headless.yaml
Normal file
27
tip-wlan/charts/kafka/templates/service-headless.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-headless
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
{{- include "kafka.broker.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
{{- if .Values.headless.annotations }}
|
||||
{{ .Values.headless.annotations | toYaml | trimSuffix "\n" | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
# - name: broker
|
||||
# port: {{ .Values.headless.port }}
|
||||
# {{- if .Values.headless.targetPort }}
|
||||
# targetPort: {{ .Values.headless.targetPort }}
|
||||
# {{- end }}
|
||||
- name: broker
|
||||
port: {{ .Values.headless.sslPort }}
|
||||
{{- if .Values.headless.targetSslPort }}
|
||||
targetPort: {{ .Values.headless.targetSslPort }}
|
||||
{{- end }}
|
||||
clusterIP: None
|
||||
selector:
|
||||
{{- include "kafka.broker.matchLabels" . | nindent 4 }}
|
||||
249
tip-wlan/charts/kafka/templates/statefulset.yaml
Normal file
249
tip-wlan/charts/kafka/templates/statefulset.yaml
Normal file
@@ -0,0 +1,249 @@
|
||||
{{- $advertisedListenersOverride := first (pluck "advertised.listeners" .Values.configurationOverrides) }}
|
||||
{{- $zk := include "zookeeper.service" . -}}
|
||||
{{- $ns := include "common.namespace" . -}}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}
|
||||
namespace: {{ $ns }}
|
||||
labels:
|
||||
{{- include "kafka.broker.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kafka.broker.matchLabels" . | nindent 6 }}
|
||||
serviceName: {{ include "common.fullname" . }}-headless
|
||||
podManagementPolicy: {{ .Values.podManagementPolicy }}
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.updateStrategy | indent 4 }}
|
||||
replicas: {{ default 3 .Values.replicas }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kafka.broker.labels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
## Custom pod labels
|
||||
{{ toYaml .Values.podLabels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.schedulerName }}
|
||||
schedulerName: "{{ .Values.schedulerName }}"
|
||||
{{- end }}
|
||||
{{- if .Values.serviceAccountName }}
|
||||
serviceAccountName: {{ .Values.serviceAccountName }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: "{{ .Values.priorityClassName }}"
|
||||
{{- end }}
|
||||
{{- if .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: {{ include "common.name" . }}-readiness
|
||||
image: busybox:1.28
|
||||
imagePullPolicy: "{{ .Values.imagePullPolicy }}"
|
||||
command: ['sh', '-c', "until nslookup {{ $zk }}.{{ $ns }}.svc.cluster.local; do echo waiting for myservice; sleep 2; done"]
|
||||
containers:
|
||||
- name: {{ include "common.name" . }}-broker
|
||||
image: "{{ .Values.image }}:{{ .Values.imageTag }}"
|
||||
imagePullPolicy: "{{ .Values.imagePullPolicy }}"
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- -ec
|
||||
- /usr/bin/jps | /bin/grep -q SupportedKafka
|
||||
{{- if not .Values.livenessProbe }}
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
{{- else }}
|
||||
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds | default 30}}
|
||||
{{- if .Values.livenessProbe.periodSeconds }}
|
||||
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
|
||||
{{- end }}
|
||||
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds | default 5}}
|
||||
{{- if .Values.livenessProbe.successThreshold }}
|
||||
successThreshold: {{ .Values.livenessProbe.successThreshold }}
|
||||
{{- end }}
|
||||
{{- if .Values.livenessProbe.failureThreshold }}
|
||||
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: kafka
|
||||
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
|
||||
successThreshold: {{ .Values.readinessProbe.successThreshold }}
|
||||
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
|
||||
ports:
|
||||
- containerPort: {{ .Values.headless.sslPort }}
|
||||
name: kafka
|
||||
{{- if .Values.external.enabled }}
|
||||
{{- $replicas := .Values.replicas | int }}
|
||||
{{- $root := . }}
|
||||
{{- range $i, $e := until $replicas }}
|
||||
- containerPort: {{ add $root.Values.external.firstListenerPort $i }}
|
||||
name: external-{{ $i }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.additionalPorts }}
|
||||
{{ toYaml .Values.additionalPorts | indent 8 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 10 }}
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: KAFKA_HEAP_OPTS
|
||||
value: {{ .Values.kafkaHeapOptions }}
|
||||
- name: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR
|
||||
value: {{ include "kafka.replication.factor" . | quote }}
|
||||
{{- if not (hasKey .Values.configurationOverrides "zookeeper.connect") }}
|
||||
- name: KAFKA_ZOOKEEPER_CONNECT
|
||||
value: {{ include "zookeeper.url" . | quote }}
|
||||
{{- end }}
|
||||
{{- if not (hasKey .Values.configurationOverrides "log.dirs") }}
|
||||
- name: KAFKA_LOG_DIRS
|
||||
value: {{ printf "%s/%s" .Values.persistence.mountPath .Values.logSubPath | quote }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.configurationOverrides }}
|
||||
- name: {{ printf "KAFKA_%s" $key | replace "." "_" | upper | quote }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- range $secret := .Values.secrets }}
|
||||
{{- if not $secret.mountPath }}
|
||||
{{- range $key := $secret.keys }}
|
||||
- name: {{ (print ($secret.name | replace "-" "_") "_" $key) | upper }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ $secret.name }}
|
||||
key: {{ $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $key, $value := .Values.envOverrides }}
|
||||
- name: {{ printf "%s" $key | replace "." "_" | upper | quote }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
# This is required because the Downward API does not yet support identification of
|
||||
# pod numbering in statefulsets. Thus, we are required to specify a command which
|
||||
# allows us to extract the pod ID for usage as the Kafka Broker ID.
|
||||
# See: https://github.com/kubernetes/kubernetes/issues/31218
|
||||
command:
|
||||
- sh
|
||||
- -exc
|
||||
- |
|
||||
unset KAFKA_PORT && \
|
||||
export KAFKA_BROKER_ID=${POD_NAME##*-} && \
|
||||
{{- if eq .Values.external.type "LoadBalancer" }}
|
||||
export LOAD_BALANCER_IP=$(echo '{{ .Values.external.loadBalancerIP }}' | tr -d '[]' | cut -d ' ' -f "$(($KAFKA_BROKER_ID + 1))") && \
|
||||
{{- end }}
|
||||
{{- if eq .Values.external.type "NodePort" }}
|
||||
export KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://${POD_IP}:9092{{ if kindIs "string" $advertisedListenersOverride }}{{ printf ",%s" $advertisedListenersOverride }}{{ end }} && \
|
||||
{{- else }}
|
||||
export KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://${POD_NAME}.{{ include "common.fullname" . }}-headless.${POD_NAMESPACE}.svc.cluster.local:9092{{ if kindIs "string" $advertisedListenersOverride }}{{ printf ",%s" $advertisedListenersOverride }}{{ end }} && \
|
||||
{{- end }}
|
||||
exec /etc/confluent/docker/run
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kafka/secrets/truststore.jks
|
||||
name: kafka-certificates
|
||||
subPath: truststore.jks
|
||||
- mountPath: /etc/kafka/secrets/kafka-server.pkcs12
|
||||
name: kafka-certificates
|
||||
subPath: kafka-server.pkcs12
|
||||
- mountPath: /etc/kafka/secrets/key_creds
|
||||
name: kafka-certificates
|
||||
subPath: key_creds
|
||||
- mountPath: /etc/kafka/secrets/keystore_creds
|
||||
name: kafka-certificates
|
||||
subPath: keystore_creds
|
||||
- mountPath: /etc/kafka/secrets/truststore_creds
|
||||
name: kafka-certificates
|
||||
subPath: truststore_creds
|
||||
- mountPath: /etc/kafka/admin-client.properties
|
||||
name: kafka-client-config
|
||||
subPath: admin-client.properties
|
||||
- name: datadir
|
||||
mountPath: {{ .Values.persistence.mountPath | quote }}
|
||||
{{- range $secret := .Values.secrets }}
|
||||
{{- if $secret.mountPath }}
|
||||
{{- if $secret.keys }}
|
||||
{{- range $key := $secret.keys }}
|
||||
- name: {{ include "common.fullname" $ }}-{{ $secret.name }}
|
||||
mountPath: {{ $secret.mountPath }}/{{ $key }}
|
||||
subPath: {{ $key }}
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- name: {{ include "common.fullname" $ }}-{{ $secret.name }}
|
||||
mountPath: {{ $secret.mountPath }}
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: kafka-certificates
|
||||
secret:
|
||||
secretName: {{ include "common.fullname" . }}-certs
|
||||
- name: kafka-client-config
|
||||
configMap:
|
||||
name: {{ include "common.fullname" . }}-clientconfig
|
||||
{{- if not .Values.persistence.enabled }}
|
||||
- name: datadir
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.securityContext }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext | indent 8 }}
|
||||
{{- end }}
|
||||
{{- range .Values.secrets }}
|
||||
{{- if .mountPath }}
|
||||
- name: {{ include "common.fullname" $ }}-{{ .name }}
|
||||
secret:
|
||||
secretName: {{ .name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
|
||||
{{- if .Values.persistence.enabled }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: datadir
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size }}
|
||||
{{- if .Values.persistence.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,60 @@
|
||||
{{- if .Values.testsEnabled -}}
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-test-topic-create-produce-consume
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ include "common.name" . }}-test-consume
|
||||
image: {{ .Values.image }}:{{ .Values.imageTag }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
# List topics:
|
||||
echo "##### Listing existing topics #####"
|
||||
kafka-topics --zookeeper {{ include "zookeeper.url" . }} --list
|
||||
# Create the topic
|
||||
echo "##### Create topic helm-test-topic-create-consume-produce #####"
|
||||
kafka-topics --zookeeper {{ include "zookeeper.url" . }} --topic helm-test-topic-create-consume-produce --create --partitions 1 --replication-factor 1 --if-not-exists
|
||||
echo "##### Produce the test message #####"
|
||||
# Create a message
|
||||
MESSAGE="`date -u`"
|
||||
# Produce a test message to the topic
|
||||
echo "$MESSAGE" | kafka-console-producer --broker-list {{ include "common.fullname" . }}-headless:9093 --producer.config /etc/kafka/admin-client.properties --topic helm-test-topic-create-consume-produce
|
||||
echo "##### Consume the test message from the topic #####"
|
||||
# Consume a test message from the topic
|
||||
kafka-console-consumer --bootstrap-server {{ include "common.fullname" . }}-headless:9093 --consumer.config /etc/kafka/admin-client.properties --topic helm-test-topic-create-consume-produce --from-beginning --timeout-ms 2000 --max-messages 1 | grep "$MESSAGE"
|
||||
echo "##### Listing current topics including our new topic #####"
|
||||
kafka-topics --zookeeper {{ include "zookeeper.url" . }} --list
|
||||
# Delete the messages from topic
|
||||
echo "##### Delete messages from our topic #####"
|
||||
kafka-configs --zookeeper {{ include "zookeeper.url" . }} --alter --entity-type topics --entity-name helm-test-topic-create-consume-produce --add-config retention.ms=1000
|
||||
# Mark topic for deletion
|
||||
echo "##### Mark our topic for Deletion #####"
|
||||
kafka-topics --zookeeper {{ include "zookeeper.url" . }} --delete --topic helm-test-topic-create-consume-produce
|
||||
# List topics:
|
||||
echo "##### Listing topics after deleting our newly created topic #####"
|
||||
kafka-topics --zookeeper {{ include "zookeeper.url" . }} --list
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kafka/admin-client.properties
|
||||
name: kafka-client-config
|
||||
subPath: admin-client.properties
|
||||
- mountPath: /etc/kafka/secrets/truststore.jks
|
||||
name: kafka-certificates
|
||||
subPath: truststore.jks
|
||||
- mountPath: /etc/kafka/secrets/kafka-server.pkcs12
|
||||
name: kafka-certificates
|
||||
subPath: kafka-server.pkcs12
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: kafka-client-config
|
||||
configMap:
|
||||
name: {{ include "common.fullname" . }}-clientconfig
|
||||
- name: kafka-certificates
|
||||
secret:
|
||||
secretName: {{ include "common.fullname" . }}-certs
|
||||
{{- end }}
|
||||
360
tip-wlan/charts/kafka/values.yaml
Normal file
360
tip-wlan/charts/kafka/values.yaml
Normal file
@@ -0,0 +1,360 @@
|
||||
# ------------------------------------------------------------------------------
|
||||
# Kafka:
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
## The StatefulSet installs 1 pod by default
|
||||
replicas: 1
|
||||
|
||||
## The kafka image repository
|
||||
image: "confluentinc/cp-kafka"
|
||||
# image: "wurstmeister/kafka"
|
||||
|
||||
## The kafka image tag
|
||||
imageTag: "5.0.1" # Confluent image for Kafka 2.0.0
|
||||
# imageTag: "latest"
|
||||
|
||||
## Specify a imagePullPolicy
|
||||
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
|
||||
## Configure resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 200m
|
||||
# memory: 1536Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 1024Mi
|
||||
kafkaHeapOptions: "-Xmx1G -Xms1G"
|
||||
|
||||
## Optional Container Security context
|
||||
securityContext: {}
|
||||
|
||||
## The StatefulSet Update Strategy which Kafka will use when changes are applied.
|
||||
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
|
||||
updateStrategy:
|
||||
type: "OnDelete"
|
||||
|
||||
## Start and stop pods in Parallel or OrderedReady (one-by-one.) Note - Can not change after first release.
|
||||
## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy
|
||||
podManagementPolicy: OrderedReady
|
||||
|
||||
## Useful if using any custom authorizer
|
||||
## Pass in some secrets to use (if required)
|
||||
# secrets:
|
||||
# - name: myKafkaSecret
|
||||
# keys:
|
||||
# - username
|
||||
# - password
|
||||
# # mountPath: /opt/kafka/secret
|
||||
# - name: myZkSecret
|
||||
# keys:
|
||||
# - user
|
||||
# - pass
|
||||
# mountPath: /opt/zookeeper/secret
|
||||
|
||||
|
||||
## The subpath within the Kafka container's PV where logs will be stored.
|
||||
## This is combined with `persistence.mountPath`, to create, by default: /opt/kafka/data/logs
|
||||
logSubPath: "logs"
|
||||
|
||||
## Use an alternate scheduler, e.g. "stork".
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
# schedulerName:
|
||||
|
||||
## Use an alternate serviceAccount
|
||||
## Useful when using images in custom repositories
|
||||
# serviceAccountName:
|
||||
|
||||
## Set a pod priorityClassName
|
||||
# priorityClassName: high-priority
|
||||
|
||||
## Pod scheduling preferences (by default keep pods within a release on separate nodes).
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## By default we don't set affinity
|
||||
affinity: {}
|
||||
## Alternatively, this typical example defines:
|
||||
## antiAffinity (to keep Kafka pods on separate pods)
|
||||
## and affinity (to encourage Kafka pods to be collocated with Zookeeper pods)
|
||||
# affinity:
|
||||
# podAntiAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# - labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app
|
||||
# operator: In
|
||||
# values:
|
||||
# - kafka
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
# podAffinity:
|
||||
# preferredDuringSchedulingIgnoredDuringExecution:
|
||||
# - weight: 50
|
||||
# podAffinityTerm:
|
||||
# labelSelector:
|
||||
# matchExpressions:
|
||||
# - key: app
|
||||
# operator: In
|
||||
# values:
|
||||
# - zookeeper
|
||||
# topologyKey: "kubernetes.io/hostname"
|
||||
|
||||
## Node labels for pod assignment
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
|
||||
nodeSelector: {}
|
||||
|
||||
## Readiness probe config.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/
|
||||
##
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
|
||||
## Period to wait for broker graceful shutdown (sigterm) before pod is killed (sigkill)
|
||||
## ref: https://kubernetes-v1-4.github.io/docs/user-guide/production-pods/#lifecycle-hooks-and-termination-notice
|
||||
## ref: https://kafka.apache.org/10/documentation.html#brokerconfigs controlled.shutdown.*
|
||||
terminationGracePeriodSeconds: 60
|
||||
|
||||
# Tolerations for nodes that have taints on them.
|
||||
# Useful if you want to dedicate nodes to just run kafka
|
||||
# https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
tolerations: []
|
||||
# tolerations:
|
||||
# - key: "key"
|
||||
# operator: "Equal"
|
||||
# value: "value"
|
||||
# effect: "NoSchedule"
|
||||
|
||||
## Headless service.
|
||||
##
|
||||
headless:
|
||||
# annotations:
|
||||
# targetPort:
|
||||
port: 9092
|
||||
sslPort: 9093
|
||||
|
||||
## External access.
|
||||
##
|
||||
external:
|
||||
enabled: false
|
||||
# type can be either NodePort or LoadBalancer
|
||||
type: NodePort
|
||||
# annotations:
|
||||
# service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
|
||||
dns:
|
||||
useInternal: false
|
||||
useExternal: true
|
||||
# If using external service type LoadBalancer and external dns, set distinct to true below.
|
||||
# This creates an A record for each statefulset pod/broker. You should then map the
|
||||
# A record of the broker to the EXTERNAL IP given by the LoadBalancer in your DNS server.
|
||||
distinct: false
|
||||
servicePort: 19092
|
||||
firstListenerPort: 31090
|
||||
domain: cluster.local
|
||||
loadBalancerIP: []
|
||||
loadBalancerSourceRanges: []
|
||||
init:
|
||||
image: "lwolf/kubectl_deployer"
|
||||
imageTag: "0.4"
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
|
||||
# Annotation to be added to Kafka pods
|
||||
podAnnotations: {}
|
||||
|
||||
# Labels to be added to Kafka pods
|
||||
podLabels: {}
|
||||
# service: broker
|
||||
# team: developers
|
||||
|
||||
podDisruptionBudget: {}
|
||||
# maxUnavailable: 1 # Limits how many Kafka pods may be unavailable due to voluntary disruptions.
|
||||
|
||||
## Configuration Overrides. Specify any Kafka settings you would like set on the StatefulSet
|
||||
## here in map format, as defined in the official docs.
|
||||
## ref: https://kafka.apache.org/documentation/#brokerconfigs
|
||||
##
|
||||
configurationOverrides:
|
||||
"confluent.support.metrics.enable": false # Disables confluent metric submission
|
||||
# "auto.leader.rebalance.enable": true
|
||||
# "auto.create.topics.enable": true
|
||||
# "controlled.shutdown.enable": true
|
||||
# "controlled.shutdown.max.retries": 100
|
||||
# "ssl.secret.dir": "/opt/tip-wlan/certs"
|
||||
# "ssl.keystore.filename": "kafka-server.pkcs12"
|
||||
# "ssl.key.credentials": "mypassword"
|
||||
# "ssl.truststore.filename": "kafka_server_keystore.jks"
|
||||
# "ssl.truststore.credentials": "mypassword"
|
||||
advertised.listeners: SSL://tip-wlan-kafka-headless:9093
|
||||
ssl.client.auth: required
|
||||
ssl.endpoint.identification.algorithm: ""
|
||||
security.inter.broker.protocol: SSL
|
||||
ssl.key.credentials: "key_creds"
|
||||
ssl.keystore.filename: "kafka-server.pkcs12"
|
||||
ssl.keystore.credentials: "keystore_creds"
|
||||
ssl.keystore.type: "PKCS12"
|
||||
ssl.truststore.filename: "truststore.jks"
|
||||
ssl.truststore.credentials: "truststore_creds"
|
||||
ssl.truststore.type: "JKS"
|
||||
|
||||
## Options required for external access via NodePort
|
||||
## ref:
|
||||
## - http://kafka.apache.org/documentation/#security_configbroker
|
||||
## - https://cwiki.apache.org/confluence/display/KAFKA/KIP-103%3A+Separation+of+Internal+and+External+traffic
|
||||
##
|
||||
## Setting "advertised.listeners" here appends to "PLAINTEXT://${POD_IP}:9092,", ensure you update the domain
|
||||
## If external service type is Nodeport:
|
||||
# "advertised.listeners": |-
|
||||
# EXTERNAL://kafka.cluster.local:$((31090 + ${KAFKA_BROKER_ID}))
|
||||
## If external service type is LoadBalancer and distinct is true:
|
||||
# "advertised.listeners": |-
|
||||
# EXTERNAL://kafka-$((${KAFKA_BROKER_ID})).cluster.local:19092
|
||||
## If external service type is LoadBalancer and distinct is false:
|
||||
# "advertised.listeners": |-
|
||||
# EXTERNAL://${LOAD_BALANCER_IP}:31090
|
||||
## Uncomment to define the EXTERNAL Listener protocol
|
||||
# "listener.security.protocol.map": |-
|
||||
# PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT
|
||||
|
||||
## set extra ENVs
|
||||
# key: "value"
|
||||
envOverrides: {}
|
||||
|
||||
|
||||
## A collection of additional ports to expose on brokers (formatted as normal containerPort yaml)
|
||||
# Useful when the image exposes metrics (like prometheus, etc.) through a javaagent instead of a sidecar
|
||||
additionalPorts: {}
|
||||
|
||||
## Persistence configuration. Specify if and how to persist data to a persistent volume.
|
||||
##
|
||||
persistence:
|
||||
enabled: false
|
||||
|
||||
## The size of the PersistentVolume to allocate to each Kafka Pod in the StatefulSet. For
|
||||
## production servers this number should likely be much larger.
|
||||
##
|
||||
size: 1Gi
|
||||
accessMode: ReadWriteOnce
|
||||
## The location within the Kafka container where the PV will mount its storage and Kafka will
|
||||
## store its logs.
|
||||
##
|
||||
mountPath: "/opt/kafka/data"
|
||||
|
||||
## Kafka data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: "-"
|
||||
|
||||
## Kafka Config job configuration
|
||||
##
|
||||
configJob:
|
||||
## Specify the number of retries before considering kafka-config job as failed.
|
||||
## https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy
|
||||
backoffLimit: 6
|
||||
|
||||
## Topic creation and configuration.
|
||||
## The job will be run on a deployment only when the config has been changed.
|
||||
## - If 'partitions' and 'replicationFactor' are specified we create the topic (with --if-not-exists.)
|
||||
## - If 'partitions', 'replicationFactor' and 'reassignPartitions' are specified we reassign the partitions to
|
||||
## increase the replication factor of an existing topic.
|
||||
## - If 'partitions' is specified we 'alter' the number of partitions. This will
|
||||
## silently and safely fail if the new setting isn’t strictly larger than the old (i.e. a NOOP.) Do be aware of the
|
||||
## implications for keyed topics (ref: https://docs.confluent.io/current/kafka/post-deployment.html#admin-operations)
|
||||
## - If 'defaultConfig' is specified it's deleted from the topic configuration. If it isn't present,
|
||||
## it will silently and safely fail.
|
||||
## - If 'config' is specified it's added to the topic configuration.
|
||||
##
|
||||
## Note: To increase the 'replicationFactor' of a topic, 'reassignPartitions' must be set to true (see above).
|
||||
##
|
||||
topics:
|
||||
# - name: myExistingTopicConfig
|
||||
# config: "cleanup.policy=compact,delete.retention.ms=604800000"
|
||||
# - name: myExistingTopicReassignPartitions
|
||||
# partitions: 8
|
||||
# replicationFactor: 5
|
||||
# reassignPartitions: true
|
||||
- name: wlan_service_metrics
|
||||
partitions: 1
|
||||
replicationFactor: 1
|
||||
- name: system_events
|
||||
partitions: 1
|
||||
replicationFactor: 1
|
||||
- name: customer_events
|
||||
partitions: 1
|
||||
replicationFactor: 1
|
||||
# - name: myNewTopicWithConfig
|
||||
# partitions: 8
|
||||
# replicationFactor: 3
|
||||
# defaultConfig: "segment.bytes,segment.ms"
|
||||
# config: "cleanup.policy=compact,delete.retention.ms=604800000"
|
||||
# - name: myAclTopicPartitions
|
||||
# partitions: 8
|
||||
# acls:
|
||||
# - user: read
|
||||
# operations: [ Read ]
|
||||
# - user: read_and_write
|
||||
# operations:
|
||||
# - Read
|
||||
# - Write
|
||||
# - user: all
|
||||
# operations: [ All ]
|
||||
|
||||
## Enable/disable the chart's tests. Useful if using this chart as a dependency of
|
||||
## another chart and you don't want these tests running when trying to develop and
|
||||
## test your own chart.
|
||||
testsEnabled: true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Zookeeper:
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
zookeeper:
|
||||
## If true, install the Zookeeper chart alongside Kafka
|
||||
## ref: https://github.com/kubernetes/charts/tree/master/incubator/zookeeper
|
||||
enabled: false
|
||||
|
||||
## Configure Zookeeper resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
resources: ~
|
||||
|
||||
## Environmental variables to set in Zookeeper
|
||||
env:
|
||||
## The JVM heap size to allocate to Zookeeper
|
||||
ZK_HEAP_SIZE: "1G"
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
## The amount of PV storage allocated to each Zookeeper pod in the statefulset
|
||||
# size: "2Gi"
|
||||
|
||||
## Specify a Zookeeper imagePullPolicy
|
||||
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
||||
image:
|
||||
PullPolicy: "IfNotPresent"
|
||||
|
||||
## If the Zookeeper Chart is disabled a URL and port are required to connect
|
||||
url: "zookeeper-headless"
|
||||
port: 2181
|
||||
|
||||
## Pod scheduling preferences (by default keep pods within a release on separate nodes).
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## By default we don't set affinity:
|
||||
affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods.
|
||||
# podAntiAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# - topologyKey: "kubernetes.io/hostname"
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# release: zookeeper
|
||||
|
||||
creds:
|
||||
sslTruststorePassword: DUMMY_PASSWORD
|
||||
sslKeystorePassword: DUMMY_PASSWORD
|
||||
sslKeyPassword: DUMMY_PASSWORD
|
||||
@@ -40,7 +40,7 @@ controller:
|
||||
tag: "1.7.0"
|
||||
|
||||
## The pull policy for the Ingress controller image.
|
||||
pullPolicy: Always
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
config:
|
||||
## The name of the ConfigMap used by the Ingress controller.
|
||||
@@ -92,13 +92,13 @@ controller:
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
## The resources of the Ingress controller pods.
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 140Mi
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 64Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 64Mi
|
||||
|
||||
## The tolerations of the Ingress controller pods.
|
||||
tolerations: []
|
||||
|
||||
@@ -27,17 +27,47 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
initContainers:
|
||||
- name: wait-for-services
|
||||
image: opsfleet/depends-on:latest
|
||||
args:
|
||||
- "-service={{ .Release.Name }}-opensync-mqtt-broker"
|
||||
- "-service={{ .Release.Name }}-wlan-prov-service"
|
||||
- "-service={{ .Release.Name }}-wlan-ssc-service"
|
||||
- -check_interval=5
|
||||
- name: {{ include "common.name" . }}-mqtt-readiness
|
||||
image: eclipse-mosquitto:latest
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
mosquitto_pub -h {{ $mqtt }} -p 1883 --cafile /certs/cacert.pem --cert /certs/clientcert.pem --key /certs/clientkey.pem --insecure -t "/ap/test" -q 0 -m "CheckingMQTTAliveness"
|
||||
status=$(echo $?)
|
||||
echo mosquitto_pub response of the request = $status
|
||||
counter=0
|
||||
while [ $counter -lt 10 ] && [ $status -ne 0 ]
|
||||
do
|
||||
echo {{ $mqtt }} service isnt ready. Tried $counter times
|
||||
sleep 2
|
||||
counter=`expr $counter + 1`
|
||||
mosquitto_pub -h {{ $mqtt }} -p 1883 --cafile /certs/cacert.pem --cert /certs/clientcert.pem --key /certs/clientkey.pem --insecure -t "/ap/test" -q 0 -m "CheckingMQTTAliveness"
|
||||
status=$(echo $?)
|
||||
echo mosquitto_pub response of the request = $status
|
||||
done
|
||||
if [ $status -eq 0 ]
|
||||
then
|
||||
echo {{ $mqtt }} service is ready!
|
||||
else
|
||||
echo {{ $mqtt }} service failed to respond after 20 secs
|
||||
exit 1
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /certs/cacert.pem
|
||||
name: certificates
|
||||
subPath: cacert.pem
|
||||
- mountPath: /certs/clientcert.pem
|
||||
name: certificates
|
||||
subPath: clientcert.pem
|
||||
- mountPath: /certs/clientkey.pem
|
||||
name: certificates
|
||||
subPath: clientkey.pem
|
||||
{{- if .Values.global.integratedDeployment }}
|
||||
- name: {{ include "common.name" . }}-readiness-int-cloud
|
||||
image: alpine
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
@@ -46,7 +76,34 @@ spec:
|
||||
url=https://{{ $icc }}/ping
|
||||
counter=0
|
||||
status=$(curl --insecure --head --location --connect-timeout 5 --write-out %{http_code} --silent --output /dev/null ${url});
|
||||
while [ $counter -lt 10 ] && [ $status -ne 200 ]
|
||||
while [ $counter -lt 10 ] && [ $status -ne 200 ]
|
||||
do
|
||||
echo ${url} service isnt ready. Tried $counter times
|
||||
sleep 5
|
||||
counter=`expr $counter + 1`
|
||||
status=$(curl --insecure --head --location --connect-timeout 5 --write-out %{http_code} --silent --output /dev/null ${url});
|
||||
echo Http Response code of ping request = $status
|
||||
done
|
||||
if [ $status -eq 200 ]
|
||||
then
|
||||
echo ${url} service is ready!
|
||||
else
|
||||
echo ${url} service failed to respond after 50 secs
|
||||
exit 1
|
||||
fi
|
||||
{{- else }}
|
||||
- name: {{ include "common.name" . }}-readiness-prov
|
||||
image: alpine
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
apk add curl
|
||||
url=https://{{ $prov }}/ping
|
||||
counter=0
|
||||
status=$(curl --insecure --head --location --connect-timeout 5 --write-out %{http_code} --silent --output /dev/null ${url});
|
||||
while [ $counter -lt 10 ] && [ $status -ne 200 ]
|
||||
do
|
||||
echo ${url} service isnt ready. Tried $counter times
|
||||
sleep 5
|
||||
@@ -61,22 +118,48 @@ spec:
|
||||
echo ${url} service failed to respond after 50 secs
|
||||
exit 1
|
||||
fi
|
||||
- name: {{ include "common.name" . }}-readiness-ssc
|
||||
image: alpine
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
apk add curl
|
||||
url=https://{{ $ssc }}/ping
|
||||
counter=0
|
||||
status=$(curl --insecure --head --location --connect-timeout 5 --write-out %{http_code} --silent --output /dev/null ${url});
|
||||
while [ $counter -lt 10 ] && [ $status -ne 200 ]
|
||||
do
|
||||
echo ${url} service isnt ready. Tried $counter times
|
||||
sleep 5
|
||||
counter=`expr $counter + 1`
|
||||
status=$(curl --insecure --head --location --connect-timeout 5 --write-out %{http_code} --silent --output /dev/null ${url});
|
||||
echo Http Response code of ping request = $status
|
||||
done
|
||||
if [ $status -eq 200 ]
|
||||
then
|
||||
echo ${url} service is ready!
|
||||
else
|
||||
echo ${url} service failed to respond after 50 secs
|
||||
exit 1
|
||||
fi
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
{{- if .Values.probes.enabled }}
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: {{ .Values.service.port2 }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
timeoutSeconds: {{ .Values.probes.livenessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.probes.livenessProbe.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.livenessProbe.periodSeconds }}
|
||||
@@ -127,22 +210,6 @@ spec:
|
||||
value: {{ .Values.ethernetType.wanType }}
|
||||
- name: DEFAULT_WAN_NAME
|
||||
value: {{ .Values.ethernetType.wanName }}
|
||||
- name: tip_wlan_ovsdb_listener_threadPoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_ovsdb_listener_threadPoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_CorePoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_CorePoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_MaxPoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_MaxPoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_QueueCapacity
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_QueueCapacity }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsTotal
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsTotal }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsPerRoute
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsPerRoute }}"
|
||||
- name: tip_wlan_maxHttpThreads
|
||||
value: "{{ .Values.scalability.tip_wlan_maxHttpThreads }}"
|
||||
- name: JVM_MEM_OPTIONS
|
||||
value: "{{ .Values.scalability.JVM_MEM_OPTIONS }}"
|
||||
volumeMounts:
|
||||
- mountPath: /opt/tip-wlan/certs/client_keystore.jks
|
||||
name: certificates
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
rules:
|
||||
- apiGroups: ["batch", "apps", ""]
|
||||
resources: ["pods", "services", "jobs"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "common.serviceAccountName" . }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -5,10 +5,6 @@ metadata:
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
{{- include "common.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
@@ -16,38 +12,28 @@ spec:
|
||||
targetPort: {{ .Values.service.port1 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name1 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort1 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port2 }}
|
||||
targetPort: {{ .Values.service.port2 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name2 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort2 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port3 }}
|
||||
targetPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name3 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort3 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port4 }}
|
||||
targetPort: {{ .Values.service.port4 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name4 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.debug.enabled }}
|
||||
- port: {{ .Values.service.port5 }}
|
||||
targetPort: {{ .Values.service.port5 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name5 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort5 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "common.selectorLabels" . | nindent 4 }}
|
||||
|
||||
@@ -57,7 +57,7 @@ probes:
|
||||
testsEnabled: false
|
||||
|
||||
# Enable/Disable Remote debugging
|
||||
debug:
|
||||
debug:
|
||||
enabled: false
|
||||
|
||||
service:
|
||||
@@ -77,8 +77,6 @@ service:
|
||||
port5: 5005
|
||||
name5: debug
|
||||
nodePort5: 26
|
||||
annotations: {}
|
||||
nodePortStatic: true ## if true, nodePort ports are statically defined effectively prohibiting multiple deployments on the same cluster; if false, nodePort ports are chosen dynamically by k8s
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
@@ -87,9 +85,9 @@ persistence:
|
||||
|
||||
# the filestore internal: location of the folder where UI files will be stored
|
||||
# on the PV
|
||||
# the filestore url: externally reachable URL i.e.; reachable from AP, where it
|
||||
# the filestore url: externally reachable URL i.e.; reachable from AP, where it
|
||||
# can download the files from. Override this value (url) to the configured
|
||||
# HTTP server in your system
|
||||
# HTTP server in your system
|
||||
filestore:
|
||||
internal: "/tmp/filestore"
|
||||
url: DUMMY_FILESTORE_HTTPS_URL
|
||||
@@ -117,7 +115,7 @@ portal:
|
||||
|
||||
|
||||
# These are the address and ports for the externalhost
|
||||
# This is important for ovsdb and mqtt since
|
||||
# This is important for ovsdb and mqtt since
|
||||
# that's what AP sees. Please make sure to override
|
||||
# them in dev override file for your respective environments.
|
||||
# the default values below would be used if not overriden
|
||||
@@ -137,22 +135,6 @@ ethernetType:
|
||||
wanType: "bridge"
|
||||
wanName: "wan"
|
||||
|
||||
scalability:
|
||||
#how many concurrent connections single instance of OpenSyncGateway can accept
|
||||
tip_wlan_ovsdb_listener_threadPoolSize: 50
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
@@ -166,13 +148,17 @@ ingress:
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 750Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 500Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
|
||||
@@ -25,11 +25,11 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
{{- if .Values.probes.enabled }}
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
|
||||
@@ -6,18 +6,7 @@ use_identity_as_username true
|
||||
allow_anonymous false
|
||||
allow_duplicate_messages true
|
||||
autosave_interval 900
|
||||
log_dest file /mosquitto/log/mosquitto.log
|
||||
log_timestamp true
|
||||
log_timestamp_format %Y-%m-%dT%H:%M:%S
|
||||
log_type error
|
||||
log_type warning
|
||||
log_type notice
|
||||
log_type information
|
||||
# log_type debug
|
||||
# log_type websockets
|
||||
# log_type subscribe
|
||||
# log_type all
|
||||
connection_messages true
|
||||
log_dest stdout
|
||||
max_queued_bytes 0
|
||||
max_queued_messages 0
|
||||
message_size_limit 0
|
||||
|
||||
@@ -5,10 +5,6 @@ metadata:
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
{{- include "common.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
@@ -16,15 +12,11 @@ spec:
|
||||
targetPort: {{ .Values.service.port1 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name1 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort1 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port2 }}
|
||||
targetPort: {{ .Values.service.port2 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name2 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort2 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "common.selectorLabels" . | nindent 4 }}
|
||||
|
||||
@@ -45,8 +45,7 @@ spec:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
initContainers:
|
||||
- name: {{ include "common.name" . }}-init-dir-ownership-change
|
||||
image: {{ .Values.alpine.image }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
image: alpine:3.6
|
||||
# Change ownership to `mosquitto` user for a mounted volume
|
||||
command:
|
||||
- sh
|
||||
@@ -64,7 +63,7 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: {{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
{{- if .Values.probes.enabled }}
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
|
||||
@@ -6,10 +6,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
name: eclipse-mosquitto
|
||||
tag: 2.0.3
|
||||
|
||||
alpine:
|
||||
image: alpine:3.6
|
||||
tag: latest
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
@@ -64,8 +61,6 @@ service:
|
||||
port2: 9001
|
||||
name2: debug
|
||||
nodePort2: 32
|
||||
annotations: {}
|
||||
nodePortStatic: true ## if true, nodePort ports are statically defined effectively prohibiting multiple deployments on the same cluster; if false, nodePort ports are chosen dynamically by k8s
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
@@ -80,13 +75,17 @@ ingress:
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
|
||||
@@ -25,9 +25,9 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
env:
|
||||
|
||||
@@ -24,9 +24,7 @@ spec:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- if .secretName }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
@@ -34,12 +32,6 @@ spec:
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- if $.Values.ingress.lb_https_redirect }}
|
||||
- path: /*
|
||||
backend:
|
||||
serviceName: ssl-redirect
|
||||
servicePort: use-annotation
|
||||
{{- end }}
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
|
||||
@@ -12,8 +12,6 @@ spec:
|
||||
targetPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "common.selectorLabels" . | nindent 4 }}
|
||||
|
||||
@@ -51,11 +51,9 @@ service:
|
||||
port: 4000
|
||||
name: graphui
|
||||
nodePort: 23
|
||||
nodePortStatic: true ## if true, nodePort ports are statically defined effectively prohibiting multiple deployments on the same cluster; if false, nodePort ports are chosen dynamically by k8s
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
lb_https_redirect: false ## if set to true, enables http->https redirect on cloud load balancer
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
@@ -69,13 +67,17 @@ ingress:
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
|
||||
@@ -25,11 +25,11 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
env:
|
||||
- name: API
|
||||
value: {{ .Values.env.graphql }}
|
||||
|
||||
@@ -24,9 +24,7 @@ spec:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- if .secretName }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
@@ -34,12 +32,6 @@ spec:
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- if $.Values.ingress.lb_https_redirect }}
|
||||
- path: /*
|
||||
backend:
|
||||
serviceName: ssl-redirect
|
||||
servicePort: use-annotation
|
||||
{{- end }}
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
|
||||
@@ -5,10 +5,6 @@ metadata:
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
{{- include "common.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
|
||||
@@ -50,7 +50,6 @@ service:
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
lb_https_redirect: false ## if set to true, enables http->https redirect on cloud load balancer
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
@@ -64,13 +63,17 @@ ingress:
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 128Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ spec:
|
||||
initContainers:
|
||||
- name: {{ include "common.name" . }}-readiness
|
||||
image: busybox:1.28
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command: ['sh', '-c', "until nslookup {{ $pg }}.{{ $ns }}.svc.cluster.local; do echo waiting for POSTGRES; sleep 2; done"]
|
||||
- name: {{ include "common.name" . }}-create-db-schema
|
||||
env:
|
||||
@@ -49,7 +49,7 @@ spec:
|
||||
name: {{ include "common.fullname" . }}-creds
|
||||
key: schema-repo-password
|
||||
image: postgres:latest
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
@@ -59,7 +59,7 @@ spec:
|
||||
echo "***** Fetching cloud-sdk-schema-postgresql.sql from JFrog *****"
|
||||
curl -u$SCHEMA_REPO_USER:$SCHEMA_REPO_PASSWORD -O "https://tip-tip-wlan-cloud-docker-repo.jfrog.io/artifactory/tip-wlan-cloud-schemas/0.0.1-SNAPSHOT/sql/cloud-sdk-schema-postgresql.sql"
|
||||
echo "***** Now executing cloud-sdk-schema-postgresql.sql and creating/updating schema on Postgres instance *****"
|
||||
PGPASSWORD=$POSTGRES_PASSWORD psql -h {{- include "postgresql.service" . -}} -U postgres -f cloud-sdk-schema-postgresql.sql
|
||||
PGPASSWORD=$POSTGRES_PASSWORD psql -h tip-wlan-postgresql-headless -U postgres -f cloud-sdk-schema-postgresql.sql
|
||||
exit
|
||||
ports:
|
||||
- containerPort: 5432
|
||||
@@ -67,7 +67,7 @@ spec:
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
cpu: 250m
|
||||
memory: 256Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
@@ -82,11 +82,11 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
env:
|
||||
- name: BACKEND_SERVER
|
||||
value: {{ .Release.Name }}-{{ .Chart.Name }}
|
||||
|
||||
56
tip-wlan/charts/wlan-port-forwarding-gateway-service/resources/config/run.sh
Executable file
56
tip-wlan/charts/wlan-port-forwarding-gateway-service/resources/config/run.sh
Executable file
@@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
# local_port_range that Java process can use
|
||||
# These are then assigned to the container ports (in the deployment.yaml) which can either:
|
||||
# later be opened by the port-forwarding-gateway service as NodePorts (preferred)
|
||||
# or use kubectl port-forwarding to forward the container ports. Example:
|
||||
# kubectl port-forward pods/<port-forwarding-gw-pod> <local-machine-port>:<debugPort on the Pod>
|
||||
sysctl -w net.ipv4.ip_local_port_range="30410 30435"
|
||||
|
||||
PROFILES=" -Dspring.profiles.include=use_ssl_with_client_cert_and_digest_auth,client_certificate_and_digest_auth,RestTemplateConfiguration_X509_client_cert_auth"
|
||||
|
||||
LOGGING_PROPS=" -Dlogging.config=file:/app/port-forwarding-gateway/logback.xml"
|
||||
|
||||
# SSC_URL: something like https://${SSC_SERVER_HOST}:9031
|
||||
SSC_URL=${SSC_RELEASE_URL}
|
||||
# PROV_URL: something like https://${PROV_SERVER_HOST}:9091
|
||||
PROV_URL=${PROV_RELEASE_URL}
|
||||
# PF_GATEWAY_URL: something like https://${PF_GATEWAY_SERVER_HOST}:7070
|
||||
PF_GATEWAY_URL=${PF_GATEWAY_RELEASE_URL}
|
||||
PF_GATEWAY_ENCRYPTION_KEY=${PF_GATEWAY_RELEASE_ENCRYPTION_KEY:='MyToKeN0MyToKeN1'}
|
||||
PF_GATEWAY_EXT_HOST=${PF_GATEWAY_RELEASE_EXT_HOST:=''}
|
||||
PF_GATEWAY_EXT_PORT=${PF_GATEWAY_RELEASE_EXT_PORT:='0'}
|
||||
|
||||
|
||||
# SSC URLs
|
||||
HOST_PROPS=" "
|
||||
HOST_PROPS+=" -Dtip.wlan.cloudEventDispatcherBaseUrl=$SSC_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.statusServiceBaseUrl=$SSC_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.routingServiceBaseUrl=$SSC_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.alarmServiceBaseUrl=$SSC_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.systemEventServiceBaseUrl=$SSC_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.clientServiceBaseUrl=$SSC_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.serviceMetricServiceBaseUrl=$SSC_URL"
|
||||
|
||||
# PROV URLs
|
||||
HOST_PROPS+=" -Dtip.wlan.customerServiceBaseUrl=$PROV_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.portalUserServiceBaseUrl=$PROV_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.firmwareServiceBaseUrl=$PROV_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.locationServiceBaseUrl=$PROV_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.manufacturerServiceBaseUrl=$PROV_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.equipmentServiceBaseUrl=$PROV_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.profileServiceBaseUrl=$PROV_URL"
|
||||
|
||||
# Port-Forwarder Gateway Specific
|
||||
HOST_PROPS+=" -Dtip.wlan.portForwarderGatewayBaseUrl=$PF_GATEWAY_URL"
|
||||
HOST_PROPS+=" -Dtip.wlan.websocketSessionTokenEncryptionKey=$PF_GATEWAY_ENCRYPTION_KEY"
|
||||
HOST_PROPS+=" -Dtip.wlan.externallyVisibleHostName=$PF_GATEWAY_EXT_HOST"
|
||||
HOST_PROPS+=" -Dtip.wlan.externallyVisiblePort=$PF_GATEWAY_EXT_PORT"
|
||||
|
||||
|
||||
REMOTE_DEBUG_PORT=5010
|
||||
REMOTE_DEBUG=" -agentlib:jdwp=transport=dt_socket,server=y,address=*:$REMOTE_DEBUG_PORT,suspend=n"
|
||||
|
||||
export ALL_PROPS="$PROFILES $LOGGING_PROPS $HOST_PROPS $REMOTE_DEBUG"
|
||||
|
||||
java $ALL_PROPS -jar app.jar
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
initContainers:
|
||||
- name: {{ include "common.name" . }}-readiness-opensync-gw
|
||||
image: alpine
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
@@ -53,11 +53,11 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
env:
|
||||
{{- include "common.env" . | nindent 12 }}
|
||||
- name: PF_GATEWAY_RELEASE_URL
|
||||
@@ -71,10 +71,6 @@ spec:
|
||||
value: {{ .Values.externallyVisible.host }}
|
||||
- name: PF_GATEWAY_RELEASE_EXT_PORT
|
||||
value: "{{ .Values.externallyVisible.port }}"
|
||||
- name: EXT_PORT_RANGE_START
|
||||
value: "{{ include "apDebugPortsStart" . }}"
|
||||
- name: EXT_PORT_RANGE_END
|
||||
value: "{{ sub (include "apDebugPortsEnd" . | atoi) 1 }}"
|
||||
{{- if .Values.probes.enabled }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -110,7 +106,9 @@ spec:
|
||||
- mountPath: /app/port-forwarding-gateway/logback.xml
|
||||
name: configuration
|
||||
subPath: logback.xml
|
||||
|
||||
- mountPath: /app/run.sh
|
||||
name: configuration
|
||||
subPath: run.sh
|
||||
ports:
|
||||
- name: {{ .Values.service.name1 }}
|
||||
containerPort: {{ .Values.service.port1 }}
|
||||
@@ -123,7 +121,7 @@ spec:
|
||||
containerPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- include "container.dev.apDebugPorts" . | nindent 10 }}
|
||||
{{- include "container.dev.debugport" . | nindent 10 }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
|
||||
@@ -12,14 +12,14 @@ spec:
|
||||
targetPort: {{ .Values.service.port1 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name1 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefixExt | default .Values.nodePortPrefixExt }}{{ .Values.service.nodePort1 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port2 }}
|
||||
targetPort: {{ .Values.service.port2 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name2 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefixExt | default .Values.nodePortPrefixExt }}{{ .Values.service.nodePort2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.debug.enabled }}
|
||||
@@ -27,10 +27,10 @@ spec:
|
||||
targetPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name3 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefixExt | default .Values.nodePortPrefixExt }}{{ .Values.service.nodePort3 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- include "service.dev.apDebugPorts" . | nindent 2 }}
|
||||
{{- include "service.dev.debugport" . | nindent 2 }}
|
||||
selector:
|
||||
{{- include "common.selectorLabels" . | nindent 4 }}
|
||||
|
||||
@@ -78,11 +78,8 @@ service:
|
||||
port3: 5010
|
||||
name3: debug-appl
|
||||
nodePort3: '03'
|
||||
nodePortStatic: true ## if true, nodePort ports are calculated by Helm based on the given start index and length; if false, nodePort ports are chosen dynamically by k8s
|
||||
|
||||
# The below range will be combined with the nodePortPrefixExt to create a list of ports.
|
||||
# e.g. nodePortPrefixExt = 304, accessPointDebugPortRange.start = 10, accessPointDebugPortRange.length = 2, resulting ports = 30410, 30411
|
||||
# These ports are the ports that Java would choose as Local ports whenever it opens up
|
||||
# The below ports are the ports that Java would choose as Local ports whenever it opens up
|
||||
# a developer session for debug.
|
||||
# These ports are therefore assigned as container ports (in the deployment.yaml), so we
|
||||
# can reach them from inside the cluster.
|
||||
@@ -91,9 +88,33 @@ service:
|
||||
# NOTE: Another way to reach these container ports without opening NodePorts was to use
|
||||
# kubectl port forwarding. However, we dont want the developer to install kubectl.
|
||||
|
||||
accessPointDebugPortRange:
|
||||
start: 10
|
||||
length: 26
|
||||
debugPorts:
|
||||
- 30410
|
||||
- 30411
|
||||
- 30412
|
||||
- 30413
|
||||
- 30414
|
||||
- 30415
|
||||
- 30416
|
||||
- 30417
|
||||
- 30418
|
||||
- 30419
|
||||
- 30420
|
||||
- 30421
|
||||
- 30422
|
||||
- 30423
|
||||
- 30424
|
||||
- 30425
|
||||
- 30426
|
||||
- 30427
|
||||
- 30428
|
||||
- 30429
|
||||
- 30430
|
||||
- 30431
|
||||
- 30432
|
||||
- 30433
|
||||
- 30434
|
||||
- 30435
|
||||
|
||||
creds:
|
||||
enabled: true
|
||||
@@ -118,13 +139,17 @@ ingress:
|
||||
hosts:
|
||||
- example.com
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 350Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 280Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
|
||||
@@ -32,12 +32,6 @@ spec:
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- if $.Values.ingress.lb_https_redirect }}
|
||||
- path: /*
|
||||
backend:
|
||||
serviceName: ssl-redirect
|
||||
servicePort: use-annotation
|
||||
{{- end }}
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
|
||||
@@ -5,10 +5,6 @@ metadata:
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
{{- include "common.labels" . | nindent 4 }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
@@ -16,14 +12,14 @@ spec:
|
||||
targetPort: {{ .Values.service.port1 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name1 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort1 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port2 }}
|
||||
targetPort: {{ .Values.service.port2 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name2 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.debug.enabled }}
|
||||
@@ -31,7 +27,7 @@ spec:
|
||||
targetPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name3 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort3 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
matchLabels:
|
||||
{{- include "common.selectorLabels" . | nindent 6 }}
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.updateStrategy | indent 4 }}
|
||||
{{ toYaml .Values.updateStrategy | indent 4 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -30,7 +30,7 @@ spec:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
|
||||
{{- if .Values.schedulerName }}
|
||||
@@ -38,7 +38,7 @@ spec:
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: "{{ .Values.priorityClassName }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
imagePullSecrets:
|
||||
- name: "{{ include "common.namespace" . }}-docker-registry-key"
|
||||
serviceAccountName: {{ include "common.serviceAccountName" . }}
|
||||
@@ -49,36 +49,22 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
env:
|
||||
{{- include "common.env" . | nindent 12 }}
|
||||
- name: FILE_STORE_DIRECTORY_INTERNAL
|
||||
value: {{ $file_store_path }}
|
||||
- name: tip_wlan_AsyncExecutor_CorePoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_CorePoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_MaxPoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_MaxPoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_QueueCapacity
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_QueueCapacity }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsTotal
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsTotal }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsPerRoute
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsPerRoute }}"
|
||||
- name: tip_wlan_maxHttpThreads
|
||||
value: "{{ .Values.scalability.tip_wlan_maxHttpThreads }}"
|
||||
- name: JVM_MEM_OPTIONS
|
||||
value: "{{ .Values.scalability.JVM_MEM_OPTIONS }}"
|
||||
{{- if .Values.probes.enabled }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /ping
|
||||
port: {{ .Values.service.port1 }}
|
||||
scheme: {{ .Values.probes.livenessProbe.scheme }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
timeoutSeconds: {{ .Values.probes.livenessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.probes.livenessProbe.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.livenessProbe.periodSeconds }}
|
||||
@@ -164,4 +150,4 @@ spec:
|
||||
storageClassName: "{{ .Values.persistence.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -61,7 +61,7 @@ securityContext: {}
|
||||
testsEnabled: false
|
||||
|
||||
# Enable/Disable Remote debugging
|
||||
debug:
|
||||
debug:
|
||||
enabled: false
|
||||
|
||||
service:
|
||||
@@ -74,46 +74,34 @@ service:
|
||||
nodePort2: 52
|
||||
port3: 5006
|
||||
name3: debug
|
||||
nodePort3: 15
|
||||
nodePortStatic: true ## if true, nodePort ports are statically defined effectively prohibiting multiple deployments on the same cluster; if false, nodePort ports are chosen dynamically by k8s
|
||||
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
nodePort3: 15
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
lb_https_redirect: false ## if set to true, enables http->https redirect on cloud load balancer
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: wlan-portal-service.zone3.lab.connectus.ai
|
||||
paths: [
|
||||
/portal
|
||||
/portal
|
||||
]
|
||||
tls:
|
||||
tls:
|
||||
- secretName: portal-secret
|
||||
hosts:
|
||||
- wlan-portal-service.zone3.lab.connectus.ai
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 450Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 420Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
data:
|
||||
{{ tpl (.Files.Glob "resources/config/*").AsConfig . | indent 2 }}
|
||||
datasource.properties: |-
|
||||
singleDataSource.url=jdbc:postgresql://{{- include "postgresql.service" . -}}:5432/prov_db
|
||||
singleDataSource.url=jdbc:postgresql://tip-wlan-postgresql:5432/prov_db
|
||||
singleDataSource.username={{ .Values.creds.postgres.singleDataSourceUsername }}
|
||||
singleDataSource.password={{ .Values.creds.postgres.singleDataSourcePassword }}
|
||||
singleDataSource.driverClass=org.postgresql.Driver
|
||||
|
||||
@@ -23,11 +23,10 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
initContainers:
|
||||
- name: wait-for-services
|
||||
image: opsfleet/depends-on:latest
|
||||
args:
|
||||
- "-service={{ .Release.Name }}-postgresql"
|
||||
- -check_interval=5
|
||||
- name: {{ include "common.name" . }}-readiness
|
||||
image: busybox:1.28
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command: ['sh', '-c', "until nslookup {{ $pg }}.{{ $ns }}.svc.cluster.local; do echo waiting for POSTGRES; sleep 2; done"]
|
||||
- name: {{ include "common.name" . }}-create-db-schema
|
||||
env:
|
||||
- name: POSTGRESQL_PORT_NUMBER
|
||||
@@ -52,19 +51,19 @@ spec:
|
||||
secretKeyRef:
|
||||
name: {{ include "common.fullname" . }}-creds
|
||||
key: schema-repo-password
|
||||
image: {{ .Values.postgresql.image }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
image: postgres:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
- |
|
||||
apt update
|
||||
apt -y install curl
|
||||
echo "***** Fetching latest cloud-sdk-schema-postgresql for DB and Tables sql from JFrog *****"
|
||||
curl --insecure -u$SCHEMA_REPO_USER:$SCHEMA_REPO_PASSWORD -O "https://tip-tip-wlan-cloud-docker-repo.jfrog.io/artifactory/tip-wlan-cloud-schemas/0.0.1-SNAPSHOT/sql/cloud-sdk-schema-postgresql-db-user.sql"
|
||||
curl --insecure -u$SCHEMA_REPO_USER:$SCHEMA_REPO_PASSWORD -O "https://tip-tip-wlan-cloud-docker-repo.jfrog.io/artifactory/tip-wlan-cloud-schemas/0.0.1-SNAPSHOT/sql/cloud-sdk-schema-postgresql-tables.sql"
|
||||
echo "***** Now executing cloud-sdk-schema-postgresql-db-user.sql on host {{ $pg }} and creating db prov_db and user tip_user using User Postgres. This uses full client-cert authentication *****"
|
||||
### Observed that PSQL was unable to resolve the Postgres-service host because the postgres service wasnt
|
||||
### Observed that PSQL was unable to resolve the Postgres-service host because the postgres service wasnt
|
||||
### really ready when running Postgres in Master-Slave config... hence the below retry-logic
|
||||
psql 'host={{ $pg }} port=5432 user=postgres sslmode=verify-ca sslcert=/opt/tip-wlan/certs/postgresclientcert.pem sslkey=/opt/tip-wlan/certs/postgresclientkey_dec.pem sslrootcert=/opt/tip-wlan/certs/cacert.pem' -f cloud-sdk-schema-postgresql-db-user.sql
|
||||
status=$(echo $?)
|
||||
@@ -96,7 +95,7 @@ spec:
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
cpu: 250m
|
||||
memory: 256Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
@@ -119,42 +118,20 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
env:
|
||||
{{- include "common.env" . | nindent 12 }}
|
||||
- name: tip_wlan_AsyncExecutor_CorePoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_CorePoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_MaxPoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_MaxPoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_QueueCapacity
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_QueueCapacity }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsTotal
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsTotal }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsPerRoute
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsPerRoute }}"
|
||||
- name: tip_wlan_maxHttpThreads
|
||||
value: "{{ .Values.scalability.tip_wlan_maxHttpThreads }}"
|
||||
- name: JVM_MEM_OPTIONS
|
||||
value: "{{ .Values.scalability.JVM_MEM_OPTIONS }}"
|
||||
- name: singleDataSource_maxTotalConnections
|
||||
value: "{{ .Values.scalability.singleDataSource_maxTotalConnections }}"
|
||||
- name: singleDataSource_maxIdleConnections
|
||||
value: "{{ .Values.scalability.singleDataSource_maxIdleConnections }}"
|
||||
- name: singleDataSource_maxPreparedStatements
|
||||
value: "{{ .Values.scalability.singleDataSource_maxPreparedStatements }}"
|
||||
- name: singleDataSource_maxIdlePreparedStatements
|
||||
value: "{{ .Values.scalability.singleDataSource_maxIdlePreparedStatements }}"
|
||||
{{- if .Values.probes.enabled }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /ping
|
||||
port: {{ .Values.service.port2 }}
|
||||
scheme: {{ .Values.probes.livenessProbe.scheme }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
timeoutSeconds: {{ .Values.probes.livenessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.probes.livenessProbe.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.livenessProbe.periodSeconds }}
|
||||
@@ -206,7 +183,7 @@ spec:
|
||||
- name: {{ .Values.service.name3 }}
|
||||
containerPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
@@ -232,4 +209,4 @@ spec:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
- name: dshm
|
||||
emptyDir: {}
|
||||
emptyDir: {}
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
rules:
|
||||
- apiGroups: ["batch", "apps", ""]
|
||||
resources: ["pods", "services", "jobs"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "common.serviceAccountName" . }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -12,14 +12,14 @@ spec:
|
||||
targetPort: {{ .Values.service.port1 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name1 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort1 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port2 }}
|
||||
targetPort: {{ .Values.service.port2 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name2 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.debug.enabled }}
|
||||
@@ -27,7 +27,7 @@ spec:
|
||||
targetPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name3 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort3 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -65,7 +65,7 @@ creds:
|
||||
password: DUMMY_POSTGRES_PASSWORD
|
||||
tipUser:
|
||||
password: DUMMY_TIPUSER_PASSWORD
|
||||
|
||||
|
||||
schema_repo:
|
||||
username: DUMMY_SCHEMA_REPO_USERNAME
|
||||
password: DUMMY_SCHEMA_REPO_PASSWORD
|
||||
@@ -75,7 +75,7 @@ creds:
|
||||
singleDataSourceSslKeyPassword: DUMMY_SSL_PASSWORD
|
||||
|
||||
# Enable/Disable Remote debugging
|
||||
debug:
|
||||
debug:
|
||||
enabled: false
|
||||
|
||||
service:
|
||||
@@ -86,29 +86,6 @@ service:
|
||||
name2: secondary-port
|
||||
port3: 5007
|
||||
name3: debug
|
||||
nodePortStatic: true ## if true, nodePort ports are statically defined effectively prohibiting multiple deployments on the same cluster; if false, nodePort ports are chosen dynamically by k8s
|
||||
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
#max number of connections to PostgreSQL database
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
#max number of idle connections to PostgreSQL database
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
#max number of cached prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
#max number of cached idle prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
@@ -118,20 +95,24 @@ ingress:
|
||||
hosts:
|
||||
- host: example.com
|
||||
paths: [
|
||||
/portal
|
||||
/portal
|
||||
]
|
||||
tls:
|
||||
tls:
|
||||
- secretName: portal-secret
|
||||
hosts:
|
||||
- example.com
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 320Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 300Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
@@ -139,9 +120,8 @@ tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
postgresql:
|
||||
postgresql:
|
||||
url: postgresql
|
||||
image: postgres:latest
|
||||
|
||||
env:
|
||||
protocol: https
|
||||
|
||||
@@ -23,21 +23,20 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
initContainers:
|
||||
- name: wait-for-services
|
||||
image: opsfleet/depends-on:latest
|
||||
args:
|
||||
- "-service={{ .Release.Name }}-kafka-headless"
|
||||
- -check_interval=5
|
||||
- name: {{ include "common.name" . }}-readiness
|
||||
image: busybox:1.28
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command: ['sh', '-c', "until nslookup {{ $kafka }}.{{ $ns }}.svc.cluster.local; do echo waiting for Kafka; sleep 2; done"]
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
env:
|
||||
{{- include "common.env" . | nindent 12 }}
|
||||
- name: tip.wlan.kafka.bootstrapServers
|
||||
@@ -69,28 +68,13 @@ spec:
|
||||
value: SSL
|
||||
- name: tip.wlan.kafka.sslEndpointIdentificationAlgorithm
|
||||
value: ''
|
||||
- name: tip_wlan_AsyncExecutor_CorePoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_CorePoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_MaxPoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_MaxPoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_QueueCapacity
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_QueueCapacity }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsTotal
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsTotal }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsPerRoute
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsPerRoute }}"
|
||||
- name: tip_wlan_maxHttpThreads
|
||||
value: "{{ .Values.scalability.tip_wlan_maxHttpThreads }}"
|
||||
- name: JVM_MEM_OPTIONS
|
||||
value: "{{ .Values.scalability.JVM_MEM_OPTIONS }}"
|
||||
|
||||
{{- if .Values.probes.enabled }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /ping
|
||||
port: {{ .Values.service.port2 }}
|
||||
scheme: {{ .Values.probes.livenessProbe.scheme }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
timeoutSeconds: {{ .Values.probes.livenessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.probes.livenessProbe.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.livenessProbe.periodSeconds }}
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
rules:
|
||||
- apiGroups: ["batch", "apps", ""]
|
||||
resources: ["pods", "services", "jobs"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "common.serviceAccountName" . }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -12,14 +12,14 @@ spec:
|
||||
targetPort: {{ .Values.service.port1 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name1 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort1 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port2 }}
|
||||
targetPort: {{ .Values.service.port2 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name2 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.debug.enabled }}
|
||||
@@ -27,7 +27,7 @@ spec:
|
||||
targetPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name3 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort3 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -59,7 +59,7 @@ securityContext: {}
|
||||
testsEnabled: false
|
||||
|
||||
# Enable/Disable Remote debugging
|
||||
debug:
|
||||
debug:
|
||||
enabled: false
|
||||
|
||||
service:
|
||||
@@ -70,21 +70,6 @@ service:
|
||||
name2: secondary-port
|
||||
port3: 5009
|
||||
name3: debug
|
||||
nodePortStatic: true ## if true, nodePort ports are statically defined effectively prohibiting multiple deployments on the same cluster; if false, nodePort ports are chosen dynamically by k8s
|
||||
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
@@ -94,20 +79,24 @@ ingress:
|
||||
hosts:
|
||||
- host: example.com
|
||||
paths: [
|
||||
/portal
|
||||
/portal
|
||||
]
|
||||
tls:
|
||||
tls:
|
||||
- secretName: portal-secret
|
||||
hosts:
|
||||
- example.com
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 370Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 350Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
@@ -121,7 +110,7 @@ creds:
|
||||
sslKeystorePassword: DUMMY_PASSWORD
|
||||
sslTruststorePassword: DUMMY_PASSWORD
|
||||
|
||||
kafka:
|
||||
kafka:
|
||||
url: kafka-headless
|
||||
|
||||
env:
|
||||
|
||||
@@ -67,10 +67,12 @@ color = on
|
||||
;; A version of CQL to use (this should almost never be set)
|
||||
; version = 3.2.1
|
||||
|
||||
|
||||
|
||||
[connection]
|
||||
|
||||
;; The host to connect to
|
||||
hostname = {{ include "cassandra.service" . }}
|
||||
hostname = tip-wlan-cassandra-headless
|
||||
|
||||
;; The port to connect to (9042 is the native protocol default)
|
||||
port = 9042
|
||||
@@ -8,7 +8,7 @@ data:
|
||||
cassandra-application.conf: >-
|
||||
datastax-java-driver {
|
||||
basic {
|
||||
contact-points = [ "{{ include "cassandra.service" . }}:9042" ]
|
||||
contact-points = [ "tip-wlan-cassandra-headless:9042" ]
|
||||
load-balancing-policy.local-datacenter = datacenter1
|
||||
session-keyspace = tip_wlan_keyspace
|
||||
}
|
||||
@@ -28,6 +28,3 @@ data:
|
||||
password = {{ .Values.creds.cassandra.tip_password }}
|
||||
}
|
||||
}
|
||||
|
||||
cqlshrc.tip-wlan: |
|
||||
{{ tpl (.Files.Get "files/cqlshrc.tip-wlan") . | nindent 4 }}
|
||||
|
||||
@@ -24,12 +24,14 @@ spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
initContainers:
|
||||
- name: wait-for-services
|
||||
image: opsfleet/depends-on:latest
|
||||
args:
|
||||
- "-service={{ .Release.Name }}-kafka-headless"
|
||||
- "-service={{ .Release.Name }}-cassandra"
|
||||
- -check_interval=5
|
||||
- name: {{ include "common.name" . }}-kafka-readiness
|
||||
image: busybox:1.28
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command: ['sh', '-c', "until nslookup {{ $kafka }}.{{ $ns }}.svc.cluster.local; do echo waiting for Kafka; sleep 2; done"]
|
||||
- name: {{ include "common.name" . }}-cassandra-readiness
|
||||
image: busybox:1.28
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command: ['sh', '-c', "until nslookup {{ $cassandra }}.{{ $ns }}.svc.cluster.local; do echo waiting for Cassandra; sleep 2; done"]
|
||||
- name: {{ include "common.name" . }}-create-db-schema-cassandra
|
||||
env:
|
||||
- name: CASSANDRA_PORT_NUMBER
|
||||
@@ -44,8 +46,8 @@ spec:
|
||||
secretKeyRef:
|
||||
name: {{ include "common.fullname" . }}-creds
|
||||
key: schema-repo-password
|
||||
image: {{ .Values.cassandra.image }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
image: cassandra:3.11.6
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
@@ -91,7 +93,7 @@ spec:
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
cpu: 250m
|
||||
memory: 256Mi
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
@@ -107,17 +109,17 @@ spec:
|
||||
subPath: cassandraserverkey_dec.pem
|
||||
- mountPath: /opt/tip-wlan/certs/cacert.pem
|
||||
name: certificates
|
||||
subPath: cacert.pem
|
||||
subPath: cacert.pem
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.testingEnabled }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}-{{.Values.global.testingTimestamp}}
|
||||
{{- else }}
|
||||
image: {{ .Values.global.repository.registry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
image: {{ .Values.global.repository }}/{{ .Values.image.name }}:{{ .Values.image.tag }}
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.pullPolicy | default .Values.global.pullPolicy }}
|
||||
imagePullPolicy: {{ .Values.global.pullPolicy }}
|
||||
env:
|
||||
{{- include "common.env" . | nindent 12 }}
|
||||
- name: tip.wlan.kafka.bootstrapServers
|
||||
@@ -161,28 +163,13 @@ spec:
|
||||
key: cassandra_tip_user
|
||||
- name: CASSANDRA_HOST
|
||||
value: {{ $cassandra }}:{{ .Values.cassandra.port }}
|
||||
- name: tip_wlan_AsyncExecutor_CorePoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_CorePoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_MaxPoolSize
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_MaxPoolSize }}"
|
||||
- name: tip_wlan_AsyncExecutor_QueueCapacity
|
||||
value: "{{ .Values.scalability.tip_wlan_AsyncExecutor_QueueCapacity }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsTotal
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsTotal }}"
|
||||
- name: tip_wlan_httpClientConfig_maxConnectionsPerRoute
|
||||
value: "{{ .Values.scalability.tip_wlan_httpClientConfig_maxConnectionsPerRoute }}"
|
||||
- name: tip_wlan_maxHttpThreads
|
||||
value: "{{ .Values.scalability.tip_wlan_maxHttpThreads }}"
|
||||
- name: JVM_MEM_OPTIONS
|
||||
value: "{{ .Values.scalability.JVM_MEM_OPTIONS }}"
|
||||
|
||||
{{- if .Values.probes.enabled }}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /ping
|
||||
port: {{ .Values.service.port2 }}
|
||||
scheme: {{ .Values.probes.livenessProbe.scheme }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
initialDelaySeconds: {{ .Values.probes.livenessProbe.initialDelaySeconds }}
|
||||
timeoutSeconds: {{ .Values.probes.livenessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.probes.livenessProbe.failureThreshold }}
|
||||
periodSeconds: {{ .Values.probes.livenessProbe.periodSeconds }}
|
||||
@@ -231,7 +218,7 @@ spec:
|
||||
- name: {{ .Values.service.name3 }}
|
||||
containerPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
rules:
|
||||
- apiGroups: ["batch", "apps", ""]
|
||||
resources: ["pods", "services", "jobs"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "common.serviceAccountName" . }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "common.fullname" . }}-depends-on
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -12,14 +12,14 @@ spec:
|
||||
targetPort: {{ .Values.service.port1 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name1 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort1 }}
|
||||
{{- end }}
|
||||
- port: {{ .Values.service.port2 }}
|
||||
targetPort: {{ .Values.service.port2 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name2 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix | default .Values.nodePortPrefix }}{{ .Values.service.nodePort2 }}
|
||||
{{- end }}
|
||||
{{- if .Values.debug.enabled }}
|
||||
@@ -27,7 +27,7 @@ spec:
|
||||
targetPort: {{ .Values.service.port3 }}
|
||||
protocol: TCP
|
||||
name: {{ .Values.service.name3 }}
|
||||
{{- if and .Values.service.nodePortStatic (eq .Values.service.type "NodePort") }}
|
||||
{{- if eq .Values.service.type "NodePort" }}
|
||||
nodePort: {{ .Values.global.nodePortPrefix }}{{ .Values.service.nodePort3 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -59,7 +59,7 @@ securityContext: {}
|
||||
testsEnabled: false
|
||||
|
||||
# Enable/Disable Remote debugging
|
||||
debug:
|
||||
debug:
|
||||
enabled: false
|
||||
|
||||
service:
|
||||
@@ -70,21 +70,6 @@ service:
|
||||
name2: secondary-port
|
||||
port3: 5008
|
||||
name3: debug
|
||||
nodePortStatic: true ## if true, nodePort ports are statically defined effectively prohibiting multiple deployments on the same cluster; if false, nodePort ports are chosen dynamically by k8s
|
||||
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
@@ -94,20 +79,24 @@ ingress:
|
||||
hosts:
|
||||
- host: example.com
|
||||
paths: [
|
||||
/portal
|
||||
/portal
|
||||
]
|
||||
tls:
|
||||
tls:
|
||||
- secretName: portal-secret
|
||||
hosts:
|
||||
- example.com
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 420Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 400Mi
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
@@ -127,13 +116,12 @@ creds:
|
||||
username: DUMMY_SCHEMA_REPO_USERNAME
|
||||
password: DUMMY_SCHEMA_REPO_PASSWORD
|
||||
|
||||
kafka:
|
||||
kafka:
|
||||
url: kafka-headless
|
||||
|
||||
cassandra:
|
||||
url: cassandra-headless
|
||||
port: 9042
|
||||
image: cassandra:3.11.6
|
||||
|
||||
env:
|
||||
protocol: https
|
||||
|
||||
6
tip-wlan/charts/zookeeper/Chart.yaml
Normal file
6
tip-wlan/charts/zookeeper/Chart.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: zookeeper
|
||||
version: 0.1.0
|
||||
appVersion: 1.0.0
|
||||
description: Centralized service for maintaining configuration information, naming,
|
||||
providing distributed synchronization, and providing group services.
|
||||
20
tip-wlan/charts/zookeeper/templates/_helpers.tpl
Normal file
20
tip-wlan/charts/zookeeper/templates/_helpers.tpl
Normal file
@@ -0,0 +1,20 @@
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "zookeeper.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
The name of the zookeeper headless service.
|
||||
*/}}
|
||||
{{- define "zookeeper.headless" -}}
|
||||
{{- printf "%s-headless" (include "common.fullname" .) | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
The name of the zookeeper chroots job.
|
||||
*/}}
|
||||
{{- define "zookeeper.chroots" -}}
|
||||
{{- printf "%s-chroots" (include "common.fullname" .) | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
111
tip-wlan/charts/zookeeper/templates/config-script.yaml
Normal file
111
tip-wlan/charts/zookeeper/templates/config-script.yaml
Normal file
@@ -0,0 +1,111 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
app: {{ include "common.name" . }}
|
||||
chart: {{ template "zookeeper.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: server
|
||||
data:
|
||||
ok: |
|
||||
#!/bin/sh
|
||||
zkServer.sh status
|
||||
|
||||
ready: |
|
||||
#!/bin/sh
|
||||
echo ruok | nc 127.0.0.1 ${1:-2181}
|
||||
|
||||
run: |
|
||||
#!/bin/bash
|
||||
|
||||
set -a
|
||||
ROOT=$(echo /apache-zookeeper-*)
|
||||
|
||||
ZK_USER=${ZK_USER:-"zookeeper"}
|
||||
ZK_LOG_LEVEL=${ZK_LOG_LEVEL:-"INFO"}
|
||||
ZK_DATA_DIR=${ZK_DATA_DIR:-"/data"}
|
||||
ZK_DATA_LOG_DIR=${ZK_DATA_LOG_DIR:-"/data/log"}
|
||||
ZK_CONF_DIR=${ZK_CONF_DIR:-"/conf"}
|
||||
ZK_CLIENT_PORT=${ZK_CLIENT_PORT:-2181}
|
||||
ZK_SERVER_PORT=${ZK_SERVER_PORT:-2888}
|
||||
ZK_ELECTION_PORT=${ZK_ELECTION_PORT:-3888}
|
||||
ZK_TICK_TIME=${ZK_TICK_TIME:-2000}
|
||||
ZK_INIT_LIMIT=${ZK_INIT_LIMIT:-10}
|
||||
ZK_SYNC_LIMIT=${ZK_SYNC_LIMIT:-5}
|
||||
ZK_HEAP_SIZE=${ZK_HEAP_SIZE:-2G}
|
||||
ZK_MAX_CLIENT_CNXNS=${ZK_MAX_CLIENT_CNXNS:-60}
|
||||
ZK_MIN_SESSION_TIMEOUT=${ZK_MIN_SESSION_TIMEOUT:- $((ZK_TICK_TIME*2))}
|
||||
ZK_MAX_SESSION_TIMEOUT=${ZK_MAX_SESSION_TIMEOUT:- $((ZK_TICK_TIME*20))}
|
||||
ZK_SNAP_RETAIN_COUNT=${ZK_SNAP_RETAIN_COUNT:-3}
|
||||
ZK_PURGE_INTERVAL=${ZK_PURGE_INTERVAL:-0}
|
||||
ID_FILE="$ZK_DATA_DIR/myid"
|
||||
ZK_CONFIG_FILE="$ZK_CONF_DIR/zoo.cfg"
|
||||
LOG4J_PROPERTIES="$ZK_CONF_DIR/log4j.properties"
|
||||
HOST=$(hostname)
|
||||
DOMAIN=`hostname -d`
|
||||
JVMFLAGS="-Xmx$ZK_HEAP_SIZE -Xms$ZK_HEAP_SIZE"
|
||||
|
||||
APPJAR=$(echo $ROOT/*jar)
|
||||
CLASSPATH="${ROOT}/lib/*:${APPJAR}:${ZK_CONF_DIR}:"
|
||||
|
||||
if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
|
||||
NAME=${BASH_REMATCH[1]}
|
||||
ORD=${BASH_REMATCH[2]}
|
||||
MY_ID=$((ORD+1))
|
||||
else
|
||||
echo "Failed to extract ordinal from hostname $HOST"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p $ZK_DATA_DIR
|
||||
mkdir -p $ZK_DATA_LOG_DIR
|
||||
echo $MY_ID >> $ID_FILE
|
||||
|
||||
echo "clientPort=$ZK_CLIENT_PORT" >> $ZK_CONFIG_FILE
|
||||
echo "dataDir=$ZK_DATA_DIR" >> $ZK_CONFIG_FILE
|
||||
echo "dataLogDir=$ZK_DATA_LOG_DIR" >> $ZK_CONFIG_FILE
|
||||
echo "tickTime=$ZK_TICK_TIME" >> $ZK_CONFIG_FILE
|
||||
echo "initLimit=$ZK_INIT_LIMIT" >> $ZK_CONFIG_FILE
|
||||
echo "syncLimit=$ZK_SYNC_LIMIT" >> $ZK_CONFIG_FILE
|
||||
echo "maxClientCnxns=$ZK_MAX_CLIENT_CNXNS" >> $ZK_CONFIG_FILE
|
||||
echo "minSessionTimeout=$ZK_MIN_SESSION_TIMEOUT" >> $ZK_CONFIG_FILE
|
||||
echo "maxSessionTimeout=$ZK_MAX_SESSION_TIMEOUT" >> $ZK_CONFIG_FILE
|
||||
echo "autopurge.snapRetainCount=$ZK_SNAP_RETAIN_COUNT" >> $ZK_CONFIG_FILE
|
||||
echo "autopurge.purgeInterval=$ZK_PURGE_INTERVAL" >> $ZK_CONFIG_FILE
|
||||
echo "4lw.commands.whitelist=*" >> $ZK_CONFIG_FILE
|
||||
|
||||
for (( i=1; i<=$ZK_REPLICAS; i++ ))
|
||||
do
|
||||
echo "server.$i=$NAME-$((i-1)).$DOMAIN:$ZK_SERVER_PORT:$ZK_ELECTION_PORT" >> $ZK_CONFIG_FILE
|
||||
done
|
||||
|
||||
rm -f $LOG4J_PROPERTIES
|
||||
|
||||
echo "zookeeper.root.logger=$ZK_LOG_LEVEL, CONSOLE" >> $LOG4J_PROPERTIES
|
||||
echo "zookeeper.console.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES
|
||||
echo "zookeeper.log.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES
|
||||
echo "zookeeper.log.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES
|
||||
echo "zookeeper.log.file=zookeeper.log" >> $LOG4J_PROPERTIES
|
||||
echo "zookeeper.log.maxfilesize=256MB" >> $LOG4J_PROPERTIES
|
||||
echo "zookeeper.log.maxbackupindex=10" >> $LOG4J_PROPERTIES
|
||||
echo "zookeeper.tracelog.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES
|
||||
echo "zookeeper.tracelog.file=zookeeper_trace.log" >> $LOG4J_PROPERTIES
|
||||
echo "log4j.rootLogger=\${zookeeper.root.logger}" >> $LOG4J_PROPERTIES
|
||||
echo "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender" >> $LOG4J_PROPERTIES
|
||||
echo "log4j.appender.CONSOLE.Threshold=\${zookeeper.console.threshold}" >> $LOG4J_PROPERTIES
|
||||
echo "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout" >> $LOG4J_PROPERTIES
|
||||
echo "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n" >> $LOG4J_PROPERTIES
|
||||
|
||||
if [ -n "$JMXDISABLE" ]
|
||||
then
|
||||
MAIN=org.apache.zookeeper.server.quorum.QuorumPeerMain
|
||||
else
|
||||
MAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH -Dcom.sun.management.jmxremote.ssl=$JMXSSL -Dzookeeper.jmx.log4j.disable=$JMXLOG4J org.apache.zookeeper.server.quorum.QuorumPeerMain"
|
||||
fi
|
||||
|
||||
set -x
|
||||
exec java -cp "$CLASSPATH" $JVMFLAGS $MAIN $ZK_CONFIG_FILE
|
||||
26
tip-wlan/charts/zookeeper/templates/service-headless.yaml
Normal file
26
tip-wlan/charts/zookeeper/templates/service-headless.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "zookeeper.headless" . }}
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
app: {{ include "common.name" . }}
|
||||
chart: {{ template "zookeeper.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
{{- if .Values.headless.annotations }}
|
||||
annotations:
|
||||
{{ .Values.headless.annotations | toYaml | trimSuffix "\n" | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
{{- range $key, $port := .Values.ports }}
|
||||
- name: {{ $key }}
|
||||
port: {{ $port.containerPort }}
|
||||
targetPort: {{ $key }}
|
||||
protocol: {{ $port.protocol }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ template "common.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
26
tip-wlan/charts/zookeeper/templates/service.yaml
Normal file
26
tip-wlan/charts/zookeeper/templates/service.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
app: {{ include "common.name" . }}
|
||||
chart: {{ template "zookeeper.chart" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- with .Values.service.annotations }}
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
{{- range $key, $value := .Values.service.ports }}
|
||||
- name: {{ $key }}
|
||||
{{ toYaml $value | indent 6 }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app: {{ include "common.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
160
tip-wlan/charts/zookeeper/templates/statefulset.yaml
Normal file
160
tip-wlan/charts/zookeeper/templates/statefulset.yaml
Normal file
@@ -0,0 +1,160 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "common.fullname" . }}
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
app: {{ include "common.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
component: server
|
||||
spec:
|
||||
serviceName: {{ template "zookeeper.headless" . }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ include "common.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: server
|
||||
updateStrategy:
|
||||
{{ toYaml .Values.updateStrategy | indent 4 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "common.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
component: server
|
||||
{{- if .Values.podLabels }}
|
||||
## Custom pod labels
|
||||
{{- range $key, $value := .Values.podLabels }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
## Custom pod annotations
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }}
|
||||
{{- if .Values.schedulerName }}
|
||||
schedulerName: "{{ .Values.schedulerName }}"
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext | indent 8 }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: "{{ .Values.priorityClassName }}"
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: zookeeper
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- with .Values.command }}
|
||||
command: {{ range . }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- range $key, $port := .Values.ports }}
|
||||
- name: {{ $key }}
|
||||
{{ toYaml $port | indent 14 }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /config-scripts/ok
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 2
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- sh
|
||||
- /config-scripts/ready
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 2
|
||||
successThreshold: 1
|
||||
env:
|
||||
- name: ZK_REPLICAS
|
||||
value: {{ .Values.replicaCount | quote }}
|
||||
{{- range $key, $value := .Values.env }}
|
||||
- name: {{ $key | upper | replace "." "_" }}
|
||||
value: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- range $secret := .Values.secrets }}
|
||||
{{- range $key := $secret.keys }}
|
||||
- name: {{ (print $secret.name "_" $key) | upper }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ $secret.name }}
|
||||
key: {{ $key }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
{{- range $secret := .Values.secrets }}
|
||||
{{- if $secret.mountPath }}
|
||||
{{- range $key := $secret.keys }}
|
||||
- name: {{ $.Release.Name }}-{{ $secret.name }}
|
||||
mountPath: {{ $secret.mountPath }}/{{ $key }}
|
||||
subPath: {{ $key }}
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: config
|
||||
mountPath: /config-scripts
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "common.fullname" . }}
|
||||
defaultMode: 0555
|
||||
{{- range .Values.secrets }}
|
||||
- name: {{ $.Release.Name }}-{{ .name }}
|
||||
secret:
|
||||
secretName: {{ .name }}
|
||||
{{- end }}
|
||||
{{- if not .Values.persistence.enabled }}
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- if .Values.persistence.enabled }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size | quote }}
|
||||
{{- if .Values.persistence.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
171
tip-wlan/charts/zookeeper/values.yaml
Normal file
171
tip-wlan/charts/zookeeper/values.yaml
Normal file
@@ -0,0 +1,171 @@
|
||||
## As weighted quorums are not supported, it is imperative that an odd number of replicas
|
||||
## be chosen. Moreover, the number of replicas should be either 1, 3, 5, or 7.
|
||||
##
|
||||
## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper#stateful-set
|
||||
replicaCount: 1 # Desired quantity of ZooKeeper pods. This should always be (1,3,5, or 7)
|
||||
|
||||
podDisruptionBudget:
|
||||
maxUnavailable: 1 # Limits how many Zokeeper pods may be unavailable due to voluntary disruptions.
|
||||
|
||||
terminationGracePeriodSeconds: 1800 # Duration in seconds a Zokeeper pod needs to terminate gracefully.
|
||||
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
|
||||
## refs:
|
||||
## - https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper
|
||||
## - https://github.com/kubernetes/contrib/blob/master/statefulsets/zookeeper/Makefile#L1
|
||||
image:
|
||||
repository: zookeeper # Container image repository for zookeeper container.
|
||||
tag: 3.5.5 # Container image tag for zookeeper container.
|
||||
pullPolicy: IfNotPresent # Image pull criteria for zookeeper container.
|
||||
|
||||
service:
|
||||
type: ClusterIP # Exposes zookeeper on a cluster-internal IP.
|
||||
annotations: {} # Arbitrary non-identifying metadata for zookeeper service.
|
||||
## AWS example for use with LoadBalancer service type.
|
||||
# external-dns.alpha.kubernetes.io/hostname: zookeeper.cluster.local
|
||||
# service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: "true"
|
||||
# service.beta.kubernetes.io/aws-load-balancer-internal: "true"
|
||||
ports:
|
||||
client:
|
||||
port: 2181 # Service port number for client port.
|
||||
targetPort: client # Service target port for client port.
|
||||
protocol: TCP # Service port protocol for client port.
|
||||
|
||||
## Headless service.
|
||||
##
|
||||
headless:
|
||||
annotations: {}
|
||||
|
||||
ports:
|
||||
client:
|
||||
containerPort: 2181 # Port number for zookeeper container client port.
|
||||
protocol: TCP # Protocol for zookeeper container client port.
|
||||
election:
|
||||
containerPort: 3888 # Port number for zookeeper container election port.
|
||||
protocol: TCP # Protocol for zookeeper container election port.
|
||||
server:
|
||||
containerPort: 2888 # Port number for zookeeper container server port.
|
||||
protocol: TCP # Protocol for zookeeper container server port.
|
||||
|
||||
resources: {} # Optionally specify how much CPU and memory (RAM) each zookeeper container needs.
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
priorityClassName: ""
|
||||
|
||||
nodeSelector: {} # Node label-values required to run zookeeper pods.
|
||||
|
||||
tolerations: [] # Node taint overrides for zookeeper pods.
|
||||
|
||||
affinity: {} # Criteria by which pod label-values influence scheduling for zookeeper pods.
|
||||
# podAntiAffinity:
|
||||
# requiredDuringSchedulingIgnoredDuringExecution:
|
||||
# - topologyKey: "kubernetes.io/hostname"
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# release: zookeeper
|
||||
|
||||
podAnnotations: {} # Arbitrary non-identifying metadata for zookeeper pods.
|
||||
# prometheus.io/scrape: "true"
|
||||
# prometheus.io/path: "/metrics"
|
||||
# prometheus.io/port: "9141"
|
||||
|
||||
podLabels: {} # Key/value pairs that are attached to zookeeper pods.
|
||||
# team: "developers"
|
||||
# service: "zookeeper"
|
||||
|
||||
securityContext:
|
||||
fsGroup: 1000
|
||||
runAsUser: 1000
|
||||
|
||||
## Useful, if you want to use an alternate image.
|
||||
command:
|
||||
- /bin/bash
|
||||
- -xec
|
||||
- /config-scripts/run
|
||||
|
||||
## Useful if using any custom authorizer.
|
||||
## Pass any secrets to the kafka pods. Each secret will be passed as an
|
||||
## environment variable by default. The secret can also be mounted to a
|
||||
## specific path (in addition to environment variable) if required. Environment
|
||||
## variable names are generated as: `<secretName>_<secretKey>` (All upper case)
|
||||
# secrets:
|
||||
# - name: myKafkaSecret
|
||||
# keys:
|
||||
# - username
|
||||
# - password
|
||||
# # mountPath: /opt/kafka/secret
|
||||
# - name: myZkSecret
|
||||
# keys:
|
||||
# - user
|
||||
# - pass
|
||||
# mountPath: /opt/zookeeper/secret
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
## existingClaim: opensync-wifi-controller-zookeeper-data
|
||||
## volumeReclaimPolicy: Retain
|
||||
## If you want to bind to an existing PV, uncomment below with the pv name
|
||||
## and comment storageClass and belowannotation
|
||||
## volumeName: pvc-dc52b290-ae86-4cb3-aad0-f2c806a23114
|
||||
|
||||
## zookeeper data Persistent Volume Storage Class
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
storageClass: "-"
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
|
||||
## Use an alternate scheduler, e.g. "stork".
|
||||
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
|
||||
##
|
||||
# schedulerName:
|
||||
|
||||
## ref: https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper
|
||||
env:
|
||||
|
||||
## Options related to JMX exporter.
|
||||
## ref: https://github.com/apache/zookeeper/blob/master/bin/zkServer.sh#L36
|
||||
JMXAUTH: "false"
|
||||
JMXDISABLE: "false"
|
||||
JMXPORT: 1099
|
||||
JMXSSL: "false"
|
||||
|
||||
## The port on which the server will accept client requests.
|
||||
ZOO_PORT: 2181
|
||||
|
||||
## The number of Ticks that an ensemble member is allowed to perform leader
|
||||
## election.
|
||||
ZOO_INIT_LIMIT: 5
|
||||
|
||||
ZOO_TICK_TIME: 2000
|
||||
|
||||
## The maximum number of concurrent client connections that
|
||||
## a server in the ensemble will accept.
|
||||
ZOO_MAX_CLIENT_CNXNS: 60
|
||||
|
||||
## The number of Tick by which a follower may lag behind the ensembles leader.
|
||||
ZK_SYNC_LIMIT: 10
|
||||
|
||||
## The number of wall clock ms that corresponds to a Tick for the ensembles
|
||||
## internal time.
|
||||
ZK_TICK_TIME: 2000
|
||||
|
||||
ZOO_AUTOPURGE_PURGEINTERVAL: 0
|
||||
ZOO_AUTOPURGE_SNAPRETAINCOUNT: 3
|
||||
ZOO_STANDALONE_ENABLED: false
|
||||
@@ -1,10 +0,0 @@
|
||||
# Helm values for deploying two Cloud SDK instances into separate namespaces
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
helm install tip-wlan-1 tip-wlan -f tip-wlan/example-values/local-multi-namespace/ns-tip-1.yaml
|
||||
helm install tip-wlan-2 tip-wlan -f tip-wlan/example-values/local-multi-namespace/ns-tip-2.yaml
|
||||
```
|
||||
|
||||
This will create a Cloud SDK instance in each of the namespaces _tip-1_ and _tip-2_.
|
||||
@@ -1,270 +0,0 @@
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# It can be tweaked, based on the need to support different
|
||||
# dev environments.
|
||||
# This file expects to have a GlusterFS storage solution running
|
||||
# before "helm install" is performed.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nsPrefix: tip-1
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
# createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
externalhost:
|
||||
address:
|
||||
ovsdb: opensync-controller.wlan.local
|
||||
mqtt: opensync-mqtt-broker.wlan.local
|
||||
persistence:
|
||||
enabled: true
|
||||
filestore:
|
||||
url: "https://wlan-filestore.wlan.local"
|
||||
scalability:
|
||||
#how many concurrent connections single instance of OpenSyncGateway can accept
|
||||
tip_wlan_ovsdb_listener_threadPoolSize: 50
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
env:
|
||||
portalsvc: tip-wlan-1-wlan-portal-service:9051
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui-graphql-1.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui-graphql-1.wlan.local
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
env:
|
||||
graphql: https://wlan-ui-graphql-1.wlan.local
|
||||
service:
|
||||
type: NodePort
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui-1.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui-1.wlan.local
|
||||
wlan-portal-service:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
filestoreSize: 1Gi
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
wlan-prov-service:
|
||||
enabled: true
|
||||
creds:
|
||||
enabled: true
|
||||
db:
|
||||
postgresUser:
|
||||
password: postgres
|
||||
tipUser:
|
||||
password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
postgres:
|
||||
singleDataSourceUsername: tip_user
|
||||
singleDataSourcePassword: tip_password
|
||||
singleDataSourceSslKeyPassword: mypassword
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
#max number of connections to PostgreSQL database
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
#max number of idle connections to PostgreSQL database
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
#max number of cached prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
#max number of cached idle prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
|
||||
wlan-ssc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
cassandra:
|
||||
tip_user: tip_user
|
||||
tip_password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
wlan-spc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
service:
|
||||
type: LoadBalancer
|
||||
config:
|
||||
externalStatusAddress: "api.wlan.local"
|
||||
defaultTLS:
|
||||
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZXakNDQTBLZ0F3SUJBZ0lVUU5hUC9zcHZSSHRCVEFLd1lSTndieFJmRkFzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hURWJNQmtHQTFVRUF3d1NkMnhoYmkxMWFTNTNiR0Z1TG14dlkyRnNNQjRYRFRJd01EZ3lOekl3TWpZMQpObG9YRFRNd01EZ3lOVEl3TWpZMU5sb3dIVEViTUJrR0ExVUVBd3dTZDJ4aGJpMTFhUzUzYkdGdUxteHZZMkZzCk1JSUNJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBZzhBTUlJQ0NnS0NBZ0VBd1JhZ2lEV3pDTllCdFd3QmNLK2YKVGtrUW1NdCtRQWdUallyMEtTOERQSkNKZjZLa1BmWkhDdTN3NEx2cnh6WTlObWllaDJYVTgzNGFtZEp4SXVDdwo2SWJObzZ6c2tqc3lmb084d0ZEbWxMVldMZWc1SDlHOWRvZW0rV1RlS1BhRUhpM29xdXpOZ3Q2d0xzM212dk9BClR2aVRJb2M4OEVMams0ZFNSMlQ0ZGhoMHFLQ0NqK0hkWEJBNlYvOWJpcnUralYrL2t4RVF1TDJ6TTM5RHZWZDgKOWtzMzV6TVZVemUzNmxENElDT25sN2hnYVROQmk0NU85c2RMRDBZYVVtamlGd1FsdEpVZG1QS3BhQWRidmpVTwpuc3VwbkRZam0rVW0rOWFFcHFNNHRlMjNlZkM4TjhqMXVrZXh6SnJFMkdlRi9XQi9ZMUxGSUcyd2pxVm5zUGNzCm5GRjRZZDlFQlJSbmUxRVplWEJ1M0ZFTEZ5NmxDT0hJMTQ2b0JjYy9JYjYxN3JkVEtYcXh0di8yTkw2L1RxRmsKbnMvRUVqdmU2a1FZemxCWndXSFdwWndRZmczbW82TmFvRlpwVGFnOThNeXU1clpvT29mVGN4WEg2cExtNVB4MQpPQXpnTG5hOU8rMkZtQTRGanJnSGNNWTFOSXp5blpMK0RIOGZpYnQxRi92MkYyTUErUjl2bzg0dlI1Uk9HTmRECnZhMkFwZXZrTGNqUWcvTHdzWHYwZ1RvcFEvWEl6ZWpoNmJkVWtPcktTd0p6VDJDOS9lOUdRbjBncHBWOExCdUsKMXpRSG9ST0xuQTQxTUNGdlFMUUhvK1h0OEtHdytVYmFseTZoT3hCWkY1MUwvQmJxamtESDlBRUZhSkxwdGlFeQpxbjFFNXYrM3doZ0ZTNUlaVDhJVzV1VUNBd0VBQWFPQmtUQ0JqakFkQmdOVkhRNEVGZ1FVeTJiQVV5TlBYSFM5CjNWVFNEK3dvTjd0M3E4RXdId1lEVlIwakJCZ3dGb0FVeTJiQVV5TlBYSFM5M1ZUU0Qrd29ON3QzcThFd0R3WUQKVlIwVEFRSC9CQVV3QXdFQi96QTdCZ05WSFJFRU5EQXlnaHAzYkdGdUxYVnBMV2R5WVhCb2NXd3VkMnhoYmk1cwpiMk5oYklJT1lYQnBMbmRzWVc0dWJHOWpZV3lIQk1Db0FBRXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnSUJBS0grCmJxSmVlMTFuMzRTWWdCRHZnb1o4bEpMUVJ3c0ZucUV4Y1NyL3BsWjdHVklHRkg1L1EyS3lvOVZ5RWlUUHdySXMKS3NFckMxZXZINnh0MVVSZk16cDA1elZRMExZTTUra3NhbVJEYWdBZzNNMWNtN29LT2Rtcy9kcXpQZTJnWmZHSgpwVmR0VlcxQ0hyTDBSTFRSOTNoN2tnU2lCbFNFSVlNb2VLZk41SDlBYXZKNEtyeXlnUXM2M2trR1E1TTllc0FwCnU2YkIzMDd6eWZ6Z1MzdG1Rc1UwMXJnSmZoRUhRL1krQWs5d0R1T2d2bWZ4MFRXZ0FPR2JLcTZUdThNS1lkZWoKSWU3clYxRzVVdjdLZmdvelZYNzZnMktkblRWQmZzcFNLbzN6eXJaa2NrekFwdlV1OUllZkhkVG9lNEpNRVUweQpmazdsRVUvZXh6Qnl5TnhwKzZoZHUvWklnM3hiMXlBMW9WWThORWQxckwxekFWaVBlMzUxU0VORUtlSnBSYW5DCmtDTDNSQUZrYnhRN0loYWNqb3g4YmVsUitnbW84Y3lGWnBqOVhhb1BsU0ZTY2R3ejU3M0NUMGg5N3Y3NkE3c3cKeUMrQ2lTcDg1Z1dFVjV2Z0JpdE5KN1I5b25qQmRzdUgybGdFdE1EM0pOT3M4Y0NTUmloWXhyaXdaU3FoVDdvLwp0Y0lsY0o4NFc1bTZYNnpISjNHbXR1S0czUVBOT21zMC9WVm9EVHA5cWRwTCtFazE3dUIyQTQxTnB4ejNVUytsCjZ5SytwZFFRajdBTHpLdVJmT3lnODBYYk53MnY0U25wSTVxYlhGQlJ1bTUyZjg2c1BlbUZxMUtjdU5XZTRFVkMKeERHM2VLbHUrZGxsVXRLeC9QTjZ5ZmxiVDV4Y0dnY2Rtcnd6UmFXUwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRUUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Nzd2dna25BZ0VBQW9JQ0FRREJGcUNJTmJNSTFnRzEKYkFGd3I1OU9TUkNZeTM1QUNCT05pdlFwTHdNOGtJbC9vcVE5OWtjSzdmRGd1K3ZITmowMmFKNkhaZFR6ZmhxWgowbkVpNExEb2hzMmpyT3lTT3pKK2c3ekFVT2FVdFZZdDZEa2YwYjEyaDZiNVpONG85b1FlTGVpcTdNMkMzckF1CnplYSs4NEJPK0pNaWh6endRdU9UaDFKSFpQaDJHSFNvb0lLUDRkMWNFRHBYLzF1S3U3Nk5YNytURVJDNHZiTXoKZjBPOVYzejJTemZuTXhWVE43ZnFVUGdnSTZlWHVHQnBNMEdMams3Mngwc1BSaHBTYU9JWEJDVzBsUjJZOHFsbwpCMXUrTlE2ZXk2bWNOaU9iNVNiNzFvU21vemkxN2JkNThMdzN5UFc2UjdITW1zVFlaNFg5WUg5alVzVWdiYkNPCnBXZXc5eXljVVhoaDMwUUZGR2Q3VVJsNWNHN2NVUXNYTHFVSTRjalhqcWdGeHo4aHZyWHV0MU1wZXJHMi8vWTAKdnI5T29XU2V6OFFTTzk3cVJCak9VRm5CWWRhbG5CQitEZWFqbzFxZ1ZtbE5xRDN3eks3bXRtZzZoOU56RmNmcQprdWJrL0hVNERPQXVkcjA3N1lXWURnV091QWR3eGpVMGpQS2RrdjRNZngrSnUzVVgrL1lYWXdENUgyK2p6aTlICmxFNFkxME85cllDbDYrUXR5TkNEOHZDeGUvU0JPaWxEOWNqTjZPSHB0MVNRNnNwTEFuTlBZTDM5NzBaQ2ZTQ20KbFh3c0c0clhOQWVoRTR1Y0RqVXdJVzlBdEFlajVlM3dvYkQ1UnRxWExxRTdFRmtYblV2OEZ1cU9RTWYwQVFWbwprdW0ySVRLcWZVVG0vN2ZDR0FWTGtobFB3aGJtNVFJREFRQUJBb0lDQUMyR2hEc1pUaWtiTERQMlR6Q2VkOVVoCmJRUlpsbDdLaUxHcXZYNm9VdjhJcFNLdTJrS3h1blpkTzVvQk5NbzNnNTg4YzRSQkFrQ1d6dmJObzFjeDJ3UTQKSkd3ZTdYaGM5TDdYbUwxUFZjNWlJdnVYOFVBTFY3eUdwMXZONklPSC9BYVJsSFlZZHl3UURVSTcwZGZiMmJqRQo2d3dORHRVbk1Ea3NncjNLbExwamNiNEFla2dxWE9MRUFMMld1Nkt1T1hOankrdUU3b2hnVWN3bWlYWXZGb3VMCm1KYXVlS3l5U202NHdJZnpZQ1JwbUhHMVlCTGpic0xJb20zcmZYRkl3V1hqMkhBSGFIOFRWOVhyUmpwR2tEZm8KbFFqN3l0R0s2ZkllMWcva0ZBN3hDWDE2d1NYMS85bjM1WGYwVmMwZ08zdE9NVHJkM1JTVVNEaVp6eVR1WWxuZwpETEdmYXZjRS82QXJ5cTlWZ3hyUXdXbnZhd0hIcWxBWUtxVHpJYkRJS0Y3SjRYTE9FckFtRE50T1I2Lzc1WjJ3CnVPQlFYT0N3NFM1dWxWdzhIZUM0NGlFTmxJYU5lNDNWTkZUTGtRM3lCeW96VVlYWTN2eEJXMWpURFpFOTB5YTUKZzk4cmFiYWhIS0lockpGYzNXYTE0RWhicUE2TVVLSXRRTkk4K1N1Rk1KV3R4VW1iM1cxK2dHbXJvTmo1TU9kYQpzdjV5OThTYS93UUc4dGc0cmdNQ0xpQVNHL3hudDB3RURrNXFDVUUxRzRSdkdOeUYxU09zNk82c1BTOTg4Umd4CnJuamQvWWZoME5xVnhHcHFGNnhpQVgvZXkyU0NGUWNybEtmNnhGREF4YjI4RTdaNnRQSUZCTWxpQ1IrbzdYR3MKZDNvUWVuMThCalM1NjdtR2ZmNkJBb0lCQVFEanFFcHZqOVhJVVB3bk1RZitRY3R0R1pXZEp2bFZSa1BSMW9maApSVWI2UHdFRkEwdVQyM011ZmFvNGI4bWIrM2Vra1BkYTZmbWJqUGFUckQrbk5YNGxyRE5oYytvcVY4aFVEQnA0CmpVcEg3OXorTVNUZVVQclpnS3VMeEdqaDJiK0FWYVZjZTI2STVYUXVoUnR6ZHFYZDlIeSs4YXpYRTltbHlPQ00KMUpEK2VHZWxhaVJMbEZBbVRDNDNoNlV5T0Q5SmZOSW1oWDQ2WDJRRlFsbGc1cWxVdWQ4Ukx3eFViZTJoYzhTWQp4VnVvYVZSSUdBSmhqRkd3ZVhnRjdzc0tQNXBZMHRkTlNvSGsxeHRnUmVJTlllZFU1cmtpKzloZTN0cStqWUdJCmxVcVVzYzNzN3c4cUk1UXk5NGdmcUI5Lzd4K3BFdGEvak9leE4yL1pGOFJGSXVucEFvSUJBUURaSUpUaUUxKzkKc2xnQ0NGVllLR3Z5aE5odkppck94enlOUWU3YjIvZmxQNzVHd0pTTWpZZTdoTmhGK3JrZHRJcXF5dWxyeGF3YgpPbWliU0FCSG5kT20ycDRMdDhaK20vQXZaRUgzVklLdWkwY0xVbTlKRXNsWURVcFIrdG5BemloNzdrS2FlVzlnCk1wdlpiUzZGdXE2ZlBZQUJyK3dXeU1IazR0UnRNZ3duUFRtSzZQTW85b3FIUURTSVJjL3N0N2hBTUwwMDdtNlEKOTJkRXRqNTNtSTBURTRISVhtY3hZbjV5NGVJLy85aEFMb2xFa0ZHWDU0SmNMdWpDWWkwQ3RIU0xDcnNmQkJwZgpDS2NaMk5sWFNiYVREU1prZWhnQWFWTlM2OVp1K1o2eGFvNmZZMjVxSnNmeXlaUkNjSzJYY0FoUDV2QWNUbWhQClNKUFJZc1dSNXZ1ZEFvSUJBRmtRRXFiWWg1TkprNHdsazNIMS9ZYWVGcmtYY1QzYU1sZ2FiS2hGdVFIWHVpZGkKNWFOZm5BMFpIb25idWV6ckVTQnhra09mKzRYT1BQMEN5eGc0UmpTb3pLVVlld2k3dE9Ta280cDhCQTVtbVhkYwpkSWNBK1ZJMEUyaW5tenlZT21JVG41Q3h2VW1UTXNPc1VWUDNtK1pjYXAwczRTaDNYSk9PSmNNU3VmTEQyaENOCm1NdDBwM0tFSlNTV1RadDdBODlWSk1YclBibktiYy9jNkNpUHRMa3Z5a1BudXhRZ3VYR0xYK05BZXA1RkxyTFIKcWNUTjUzdDUyZW5BUlBDcWQxQytrM3BxWnF6SE5xK1FSMkppNWVTQ0t2V3p2eTlHVWg5d0xyZm5aL2tLSW56SgovWTNIdzRlNDdTa3RWYjF3S0Z1MXdndklMVEJZZHNwZ2tPbFhRbGtDZ2dFQUtKYVJuazFXMldRc1ZYenZUMEtICkkxZTRDZGNOcTRmTkJ1N3JVc2drNkFMcGM5cHVLblFPaW54RDNaa0gzOGl2SDB3OUpEdFlkK0tNU1hMRk1wNEwKUWFhZVlyeGc2NndFMHljZnViZGZrbmRRdVlvWWFZV01nOXhBSjJFSU1hV1lKY3FkUXJrdW04SDZKa1BsclhQLwpUcDgxZlp0QU8rWWRjTWNDUk1OVlNFU0dyRFB0dUp1VnU4REIwVE9Uc2NHS1BOMmZrUFI5VUxZZTVOWllpUXpJCldtZU1IRU9oY0xiandsLzlaazlTUW5Vd2pkT1luUmZXNDVxVlFqa09CdkpxMHM4WHVhMlBySEkyb250SjdhcEcKNmVoTVkvMzYzS0RUeGExMmNWcFNVd0lEVlVKR0VxdmJOc1I5NVltZ3VhMWtzR01RUVlwYXIyOTJ5bTUzVmxYaQpkUUtDQVFCTUFYS0RaNVZobHBRR1VlUk1FNVhqVm1KOE1WdlZTUzV3NzBGOC9CS0ZnZFBJNnR2Nkx5UGh3OTRPCmxZVldoOXJmdUpmbWllK1ZJSWhaeSthUVNpVVhGQUtTdjZFTWJ1bXdjN1pUNEkyYitDTXQxUEhaTEZtUEN0OXAKOEorUDdoaDlRYWRBYzZqZEdSa0NMNkpMU3VoeWhMbW90SG9IS0ZJazdhNENNZGl2QnB3SVdxMWVScHd0aWRrNwpIdytrdlJ5YW5DMUJVU1dYNGxJcW1LanAyR1B2UDVVdVV2RUlPNitqaWFyWTJDTUNKb3BtcVJ2WWQzNGtSVkF1CjZueFl4a05neEFQSnVWN2tkZVVzQXg5Q1FZcFQ1blFmendtdlVGa0FraHJoTmw5dUJRUDhMdkZORFQ0cWU0bFcKUWw0cXRFZFNiZDVxVWVVdkgzOG5JMmpTVDVMawotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==
|
||||
wildcardTLS:
|
||||
# self signed wildcard cert for *.wlan.local
|
||||
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZEekNDQXZlZ0F3SUJBZ0lVYSthaVJZWG9QTGliSS9wdVJCdi9DZ2RTTDNzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF3d01LaTUzYkdGdUxteHZZMkZzTUI0WERUSXdNVEl5TVRJd05UQXpNVm9YRFRNdwpNVEl4T1RJd05UQXpNVm93RnpFVk1CTUdBMVVFQXd3TUtpNTNiR0Z1TG14dlkyRnNNSUlDSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUEwQjhuZE1raGIzZEN6N0I2aS9kRlFWenJwaEtQK2RmY2JKN1gKMzB3MU1FWDIvM3ZvVStkRlBNbnZrS3hEdFJwanZCbnhCczF6L1VmajlyUFhDSzZkazNNYU5DajRQYVUxem9WQQppK1Z6amc0ZG5XNjJxWG1qYVYrYUMrQm5zQndyQmx2THQ0dkh1eFRLbHh1RnR4THdid3VObGJvS1lVN01kV3oyCllMY3QzUWZBWElVb2FRK1RTMUZGdVdFeEZOTFRidlBGZWNoajB4ZnhyOU5BcU9aTXI0RGE0NHMzVERxMVVyRTIKbjhaZXFxVXZ1YUE4ZmEzQjNVZVRFNmJ4OEdhN1JybG5Dakd3UDRGVml3ajdvellmanV6T2JOM2dlcjdWcEpLMQpMSjdIdDBBemZlRm9aQ2xPbUVBd1p5alFwRGZOckdNTCs1dWtIK2JxWngyaUo5UndFcDNmdlE4em5jN1Q4dHJDCmxzMjRWNUpySWhUQXlCcTZRWVNnSXdXM2V5TmVpUnQ0ZHp6Sk5rNnd4cFp0WE96WTFwamJrV2FMaEhKOW1LRWoKU3lqVVBnS3dKSVlmb3BJbTJoUzl1dVZHZDdiU1MyV055aHJSOU5LSG5Lamo5Y0IrUWU0eEh0Z1pEcm1GZ1ZpZQp0cXZBUHhJL0ZkV1pSN3RmT2JCNTR3alMxVFk3TEd6cll4TDZSMWNjZDE4WUppcGNTS05xa05ORlllZ092VkNICmFldW1OTUdVNlZZalJWS1JmQXMva2FzcWxleGpheSt0SXNtd3dDZGoxUUN3UitRa2VEZFdoNE8vQzM1NENRb28KTkxZYzRNRk8xbVVDY2NsbUgvbFBvcTd1anBCMWI2VmFBNVprNFhjVUpRc0c4SUlSMDFHTFM5RW1HVVZaeTlOWApwV2dCbXNVQ0F3RUFBYU5UTUZFd0hRWURWUjBPQkJZRUZOZlVJSGhXdnFwUzg2ZC82SnJvbmxFYzZMU1NNQjhHCkExVWRJd1FZTUJhQUZOZlVJSGhXdnFwUzg2ZC82SnJvbmxFYzZMU1NNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHcKRFFZSktvWklodmNOQVFFTEJRQURnZ0lCQUtlTW9PckhLZ3BGemtkckVhMVhSSVRUOHFZanhKc04yNmFIMFovLwp1dDRXVkE4ckNDNzV4VkpaNnpBQWlBOFE2eTRYSHBzRzl2ZSs1QlJIWEdCS0lYOU5FZGNrbWdNdExzQ2xOR0JCCkxkN3lWd3hhaGVCQzhVTWIrVTAxMlNwaFc3K0t6UFJhQ3g4cHNMMUlFQUkyblQ1MzlCNDBmR2NyTktNSDRqZGkKdkxad3VxT00rZnJucFJ1MkZlK3Bja2Fwek92SEJTb0I3THovR1dmMWUwZ0llc1B4WEdmVG9hbGM1SzU5bDF1TQpCTkhpUW15S3E4TS9MbllMejhyOWp3dHNKU2lLYUljelpISjNtQ0ZUb3ljREF3NTl3WEdmWXZWcFBMaWZXTTJxCm1uSlJKM2dQS1lzOUhXWFgyYktoSmZoMjRLOTN2M1duMVRUellYOGtTbWlnRG0wTUhOSTNwZktlMmJqVW9MNmgKMlQ4bWhRbjdPQ2dvZHMvOXczR1dOdmFxYTAySHRnc0tTbk9YdmpSNXFMaVFLRjdhMi95TTlrNWNQcUdHaW1GYgppUWV3eUgvMWw2YjQ0T0s2RGwwMVltWFltNUVqR3plckp2aU90eUhSaWhtME01VmNBWWJaYkE0S1Y1eHhLZjRqClIwaktwQXdqbEpzTDdRMk9zTC9IRkxmaDV1RU1HMXlmTzF1blZkVURKK1FBZHJQUG5tZTZVTVFQZm1UcGx1WjAKS3pvOXY3NEpYV0pwQkNtaWNTbFBQdnB3cXZLTk5iOWd6b0hjOXFheWMwNWVxRldRbzNNZjIzYU82b05wU2ZuaAp5aWMvczFQcC9ZS2FHakVSQXB1UmRvYTlWT1diUncycFZMei9rZVNraS9QTDJFRFc4RUVHYjFXcUFBMkJPVVhDCi9oYXQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRd0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Mwd2dna3BBZ0VBQW9JQ0FRRFFIeWQweVNGdmQwTFAKc0hxTDkwVkJYT3VtRW8vNTE5eHNudGZmVERVd1JmYi9lK2hUNTBVOHllK1FyRU8xR21POEdmRUd6WFA5UitQMgpzOWNJcnAyVGN4bzBLUGc5cFRYT2hVQ0w1WE9PRGgyZGJyYXBlYU5wWDVvTDRHZXdIQ3NHVzh1M2k4ZTdGTXFYCkc0VzNFdkJ2QzQyVnVncGhUc3gxYlBaZ3R5M2RCOEJjaFNocEQ1TkxVVVc1WVRFVTB0TnU4OFY1eUdQVEYvR3YKMDBDbzVreXZnTnJqaXpkTU9yVlNzVGFmeGw2cXBTKzVvRHg5cmNIZFI1TVRwdkh3WnJ0R3VXY0tNYkEvZ1ZXTApDUHVqTmgrTzdNNXMzZUI2dnRXa2tyVXNuc2UzUUROOTRXaGtLVTZZUURCbktOQ2tOODJzWXd2N202UWY1dXBuCkhhSW4xSEFTbmQrOUR6T2R6dFB5MnNLV3piaFhrbXNpRk1ESUdycEJoS0FqQmJkN0kxNkpHM2gzUE1rMlRyREcKbG0xYzdOaldtTnVSWm91RWNuMllvU05MS05RK0FyQWtoaCtpa2liYUZMMjY1VVozdHRKTFpZM0tHdEgwMG9lYwpxT1Axd0g1QjdqRWUyQmtPdVlXQldKNjJxOEEvRWo4VjFabEh1MTg1c0huakNOTFZOanNzYk90akV2cEhWeHgzClh4Z21LbHhJbzJxUTAwVmg2QTY5VUlkcDY2WTB3WlRwVmlORlVwRjhDeitScXlxVjdHTnJMNjBpeWJEQUoyUFYKQUxCSDVDUjROMWFIZzc4TGZuZ0pDaWcwdGh6Z3dVN1daUUp4eVdZZitVK2lydTZPa0hWdnBWb0RsbVRoZHhRbApDd2J3Z2hIVFVZdEwwU1laUlZuTDAxZWxhQUdheFFJREFRQUJBb0lDQVFDUVRkbXN4enl3cmUrY1ZCQlVkaW9GCjdTalRhTEY5bWFlVGhQdkhMMjc5dnJWSlpoK3I5WUp6YU16NzhnV3NUOVR4ZXNjOVlUMVlVLzJEZENUWU4wSzUKRnlrSEc1VXNJUjVTeU4vOVlDWWtURE5La3BhQ29mMmxOWTE1U0twOFdMdVlXQlBEZTE4TW41anM5ejlhdGY0Ugo4Ti9GL2szdU5KWGRvYVNmWU1Pakt4bTh6UE05RFhpaTA0SlZ6RWNjMmlXU0crSkQwNmNybWNHUm1SZVBSTWZOCk5Mb1E1ZGw4dUlRN0J2Y0tCNkJpRDlFc2t5YitPWGxmTlo2TUZNaFNXTmpuYSt3L0REN1plWkxYcVczWk45RGYKNStBbGFoNlkzVE1EUGxueXkxRk5CVzN1alZrMWdkS21ESFBEUTNDUFBNWVdEa01qdlVJcWdKRHMySVl6dWIvTwpXRjRVUTV5UEJhZzluaWp1dS9uMVZDdGZuSkxwakZIakU1VzdkK3p1UGh6aUJ1WDFOcjRtOVVJdEpaSTNsYmJtCmdvZFlMdGl4b3RwNWF3ang1eXA3MU1zUHlTZzcrbHBPenA4dStuRENJcnc0K0VSME56MG8yTXFmcmJ2VklGQXIKWHIyc2YrejljbmtxalBWWEZaVks3em1TUHI5N0YrbTV4RHpURG9lTG53aVlhUUpOQ0ZhejhMVERjNldVT2w4SQpLOWhHd3FaK0llTlgreW16em16Nkx6WWVPaGlrRmNRaUI0UXVPSjdWWnZWRmVoS3JJMXJLWHJDRU01VmpJZXBkCkhzR0c1eTlLUkcxdEszSU5ScmI0SHlhRDF6SHJSTHRneFpLT1BvWDN0UjNmbTJ1aGova3dwelZnWTltRXJDWDkKd2I4SVA5TXdRR3REQVNBcjZWVmJvUUtDQVFFQThIeWlaK1lVeFEzQnprL1hoNmZMWWNYakE3NTJrQ204VWZzWQp0d1Z4N0EyNW5YRStiUDRIT3UyVC9kTlc0Tkw1elZPT0JkaWZIcTJQVFNVMGYzQUFHL0pNcnVzM3NrNHd4azM5CitYYlh0dHltWkdxb3FEcVN3TUw2czVpY1RnangvenRhSXk1TWFKYWhUYUpNdFRQQlVpZ3U3enhoeGNwVlhNVUMKTklHcFl5Mkt5R2hyMjVVOFdlR0RYQm9SS2xYUXJXYkNZeW1kMXdYQStEaVl0dzA5eit0VHhPNTRodjFCZkJKZwpWMGd0VWdJU0I2WEZDMU9CWDZXQ1pXYlhCN2hPaHhISjNkNHAyQlZyN0gxL2JDQ0ZvVDY5by9WQVNHRmdtTHRiCnpGalRNbjFIaTluVW5jUFlScWpsN1h0NWdPOHBOa3BwMjVrNHIxRVludWhIazcrYzdRS0NBUUVBM1l3THozNloKNEVPRndvODIrUlVId2lkaFExOEdrU1JvWStKVm1udXJpSXdHZTk3ZmRTVk91d092SDlZSVhsRWpjRitoOHFQVQpJVnpIOXBuYXZjTENEMnhIOWZ5d09ML3pmYmJnYnExZjV4Y3BOUXlYM1JnTGFDUVpLNkpJa3NzOUtDb0dhSzlaCmpMVm41MjFFZlFBRE5DSi93YlRCb3dLQ0dTNDUzSzRBaWFEWHN6TkJLUk5MOHVaWWYwK0x0U2IzV3lkZVQ2eUgKdGZiSXR3NlBSS1lxb2NaeGIrM0pWQWFHcGxScjVZSlNDU1BtTjFMSjU0djlTcXBIVnJMNzJudFNwKzdDODJ6SgpJajVOSXFEOGFsOVZ3WFB5dExRd25hYWc1TW5ka0NLQ3R0MlVHSGZwMEh3ZTJTL1hkemppS2gzZTZaT3MyMSt1CitQUHVrSkUxTTZzU09RS0NBUUJjWVJRbDR6MUJRUHFjM1JESEhJN0UvVFlxWHdTK2RqblFLQ3VqU3FVcmIwNUoKQzVKV1hmSzdFVDVUTjliY3dFNlRNRENUVUZZM2U2WmJsUm9vaGdhVXRhdjlXWC9vcjU2TzNyRGNIbW5ZNWNQSgpPU3VXakFHSnFKeVRWdUZjSEpXUlhPUlFOVjNHbzI1Tkd6WnFPUHBmSys1em1mZFkrbE4yTW51WlhlR0twcGowClNTQjlsa003cDZSRlFnSXNDQkVFTzBBYXhZYkxiWHRtSHArVFdiUFA1ZThrN0JKQ2tKQ1NMNkR3aGxwYWNVOHAKdnVVRlo4dC95VjFneEhOL2xLNGR0cGliOE5hVUdnNStKdXRHeHV0dU9HS3kwK2dncGI5c2pEUkVPQzdRNjAwTApqTjdleDdlUjFSbVY4Mk9HUXRqSzhTVGU1V25mOXNBRmN1YmorNncxQW9JQkFHYXM4Z2hQRHpkOWM2OXd1alNFCkI1MTJyTUFSZVRTcEgrd3l5Q09aYnkwUVlDem1aTCtnODdUK2h4b0ZFc25MWnRZOHJBeU0ydEkvY3JrYUl1TlIKTUtqL01QYVREb1N1aVVWWkRQaWVSMVVOU2Q2NUlHU3FNUmNwcTdTcU9HSTM2UGNGU3dVWFJ6Uk1Hb1NLQW5UQQpIYnY2eFNUY0JlWHJVcW9pMzFRa0hFR3NsbXNKdFFnNVZqaVRncTQyQ25TQlE2QXVSYW85Tm9RaGhISTZRREc3CnBRUm11TW43OVJPSkZyeGRZY2Z6TnR2ZmxHRk5jQjlzcEk0SERwcml4cEJDR1ZPVTl5cmozdStNMmlqVFhVaGIKT0o0NGcySTJKRlhjRkxNVHp5aHVwZy9qN3kvTDIwUHhVa2Fyd25zUmxOZWFFbVpFTjVkUDZBS2U0cENEaTVtUApqaGtDZ2dFQkFMUmtVeG9oZDh2ZVBwR3hPbWlOak5HekpiTDlscGx0TWxhR0dPQ3JOUkZSeEppblgzWU9UVnhiCkRFVlpqaXRHNldydzFxaDdnZXAzeEdJaWZHQ1lZV3pNc0RZTitueGtwV0lRRmZOV3dYemNRWlhrTEduZVlUdTAKSVU2RjY5Myt1Q0tkcHVCdVl0d3BQNEJCVkNCRTVON0FzRGV4bFBYTzk1cEw3ZzR4OG5RckdNeGJlRXVOdytaTwpPYmYvTnFFMGZZcURkaERiVHI0UDR6bUpBRlpYeDlKMjNJdWRMUFI3MDZITGZ5bDMrb1pUS2Y2ZWdEL1drWXZGCllLdEtDZzI1UmtSYmZBakZkeDlpOVkzcDlPNEFNVUNaRVFIOWQwU1d6LzJWR0VmYzVha09YL2xvWlAyUXF3c2UKeXMyc0k1U0Z5TEd1ZGM3R2MzVTd5UGd0RVN0elVoWT0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
sslKeyPassword: mypassword
|
||||
cassandra:
|
||||
enabled: true
|
||||
image:
|
||||
debug: true
|
||||
cluster:
|
||||
replicaCount: 1
|
||||
seedCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlPassword: postgres
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful:
|
||||
## a. When we use HostPath as storage mechanism
|
||||
## b. If master goes down and one of the slave is promoted as master
|
||||
replication:
|
||||
enabled: true
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
@@ -1,258 +0,0 @@
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# It can be tweaked, based on the need to support different
|
||||
# dev environments.
|
||||
# This file expects to have a GlusterFS storage solution running
|
||||
# before "helm install" is performed.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 304
|
||||
nsPrefix: tip-2
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
# createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
externalhost:
|
||||
address:
|
||||
ovsdb: opensync-controller.wlan.local
|
||||
mqtt: opensync-mqtt-broker.wlan.local
|
||||
persistence:
|
||||
enabled: true
|
||||
filestore:
|
||||
url: "https://wlan-filestore.wlan.local"
|
||||
scalability:
|
||||
#how many concurrent connections single instance of OpenSyncGateway can accept
|
||||
tip_wlan_ovsdb_listener_threadPoolSize: 50
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
env:
|
||||
portalsvc: tip-wlan-2-wlan-portal-service:9051
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui-graphql-2.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui-graphql-2.wlan.local
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
env:
|
||||
graphql: https://wlan-ui-graphql-2.wlan.local
|
||||
service:
|
||||
type: NodePort
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui-2.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui-2.wlan.local
|
||||
wlan-portal-service:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
filestoreSize: 1Gi
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
wlan-prov-service:
|
||||
enabled: true
|
||||
creds:
|
||||
enabled: true
|
||||
db:
|
||||
postgresUser:
|
||||
password: postgres
|
||||
tipUser:
|
||||
password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
postgres:
|
||||
singleDataSourceUsername: tip_user
|
||||
singleDataSourcePassword: tip_password
|
||||
singleDataSourceSslKeyPassword: mypassword
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
#max number of connections to PostgreSQL database
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
#max number of idle connections to PostgreSQL database
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
#max number of cached prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
#max number of cached idle prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
|
||||
wlan-ssc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
cassandra:
|
||||
tip_user: tip_user
|
||||
tip_password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
wlan-spc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
scalability:
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: false
|
||||
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
sslKeyPassword: mypassword
|
||||
cassandra:
|
||||
enabled: true
|
||||
image:
|
||||
debug: true
|
||||
cluster:
|
||||
replicaCount: 1
|
||||
seedCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlPassword: postgres
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful:
|
||||
## a. When we use HostPath as storage mechanism
|
||||
## b. If master goes down and one of the slave is promoted as master
|
||||
replication:
|
||||
enabled: true
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
@@ -1,20 +1,48 @@
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# It can be tweaked, based on the need to support different
|
||||
# dev environments.
|
||||
# This file expects to have a GlusterFS storage solution running
|
||||
# before "helm install" is performed.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
|
||||
nsPrefix: tip
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
integratedDeployment:
|
||||
testingEnabled:
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
#createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
|
||||
common:
|
||||
efs-provisioner:
|
||||
enabled: true
|
||||
@@ -22,8 +50,7 @@ common:
|
||||
efsFileSystemId: fs-49a5104c
|
||||
awsRegion: us-west-2
|
||||
efsDnsName: fs-49a5104c.efs.us-west-2.amazonaws.com
|
||||
storageClass: aws-efs
|
||||
|
||||
storageClass: aws-efs
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
externalhost:
|
||||
@@ -33,20 +60,17 @@ opensync-gw-cloud:
|
||||
persistence:
|
||||
enabled: false
|
||||
filestore:
|
||||
url: https://wlan-filestore.demo.lab.wlan.tip.build
|
||||
|
||||
url: "https://wlan-filestore.demo.lab.wlan.tip.build"
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: gp2
|
||||
|
||||
storageClass: "gp2"
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
env:
|
||||
portalsvc: wlan-portal-svc.demo.lab.wlan.tip.build
|
||||
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
env:
|
||||
@@ -57,14 +81,12 @@ wlan-cloud-static-portal:
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
|
||||
wlan-portal-service:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: aws-efs
|
||||
filestoreSize: 10Gi
|
||||
|
||||
wlan-prov-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -77,7 +99,6 @@ wlan-prov-service:
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
|
||||
wlan-ssc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -90,14 +111,12 @@ wlan-ssc-service:
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
|
||||
wlan-spc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
|
||||
wlan-port-forwarding-gateway-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -105,28 +124,49 @@ wlan-port-forwarding-gateway-service:
|
||||
externallyVisible:
|
||||
host: api.wlan.demo.lab.wlan.tip.build
|
||||
port: 30401
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
config:
|
||||
externalStatusAddress: api.wlan.demo.lab.wlan.tip.build
|
||||
|
||||
externalStatusAddress: "api.wlan.demo.lab.wlan.tip.build"
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "gp2"
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: gp2
|
||||
|
||||
storageClass: "gp2"
|
||||
cassandra:
|
||||
enabled: true
|
||||
config:
|
||||
replicaCount: 3
|
||||
seedCount: 2
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: gp2
|
||||
|
||||
storageClass: "gp2"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 2Gi
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 4Gi
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlPassword: postgres
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful:
|
||||
## a. When we use HostPath as storage mechanism
|
||||
## b. If master goes down and one of the slave is promoted as master
|
||||
replication:
|
||||
enabled: true
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: gp2
|
||||
|
||||
storageClass: "gp2"
|
||||
@@ -1,20 +1,51 @@
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# It can be tweaked, based on the need to support different
|
||||
# dev environments.
|
||||
# This file expects to have a GlusterFS storage solution running
|
||||
# before "helm install" is performed.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
|
||||
nsPrefix: tip
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
integratedDeployment:
|
||||
testingEnabled: true
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
# Integrated Deployment which deploys Prov Service, Portal Service and
|
||||
# SSC Service in a single docker image
|
||||
integratedDeployment: true
|
||||
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
externalhost:
|
||||
@@ -29,19 +60,16 @@ opensync-gw-cloud:
|
||||
prov:
|
||||
service: wlan-integrated-cloud-component-service
|
||||
port: 9092
|
||||
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: gp2
|
||||
|
||||
storageClass: "gp2"
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
env:
|
||||
portalsvc: wlan-portal-svc.tip.lab.connectus.ai
|
||||
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
image:
|
||||
@@ -53,31 +81,44 @@ wlan-cloud-static-portal:
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
|
||||
wlan-integrated-cloud-component-service:
|
||||
enabled: true
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
config:
|
||||
externalStatusAddress: api.uswest2.tip.lab.connectus.ai
|
||||
|
||||
externalStatusAddress: "api.uswest2.tip.lab.connectus.ai"
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "gp2"
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: gp2
|
||||
|
||||
storageClass: "gp2"
|
||||
cassandra:
|
||||
enabled: true
|
||||
config:
|
||||
cluster_size: 3
|
||||
seed_size: 2
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: gp2
|
||||
|
||||
storageClass: "gp2"
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlPassword: cG9zdGdyZXMxMjM=
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful:
|
||||
## a. When we use HostPath as storage mechanism
|
||||
## b. If master goes down and one of the slave is promoted as master
|
||||
replication:
|
||||
enabled: true
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: gp2
|
||||
|
||||
storageClass: "gp2"
|
||||
@@ -1,21 +1,50 @@
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# It can be tweaked, based on the need to support different
|
||||
# dev environments.
|
||||
# This file expects to have a GlusterFS storage solution running
|
||||
# before "helm install" is performed.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
|
||||
nsPrefix: tip
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
# Integrated Deployment which deploys Prov Service, Portal Service and
|
||||
# SSC Service in a single docker image
|
||||
integratedDeployment: true
|
||||
testingEnabled:
|
||||
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
env:
|
||||
@@ -26,20 +55,16 @@ opensync-gw-cloud:
|
||||
prov:
|
||||
service: wlan-integrated-cloud-component-service
|
||||
port: 9092
|
||||
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
|
||||
storageClass: "glusterfs-storage"
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
|
||||
wlan-integrated-cloud-component-service:
|
||||
enabled: true
|
||||
image:
|
||||
@@ -54,7 +79,6 @@ wlan-integrated-cloud-component-service:
|
||||
password: tip-read
|
||||
integratedWithPersistence:
|
||||
enabled: true
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
@@ -63,21 +87,41 @@ nginx-ingress-controller:
|
||||
}
|
||||
config:
|
||||
externalStatusAddress: "192.168.56.101"
|
||||
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "glusterfs-storage"
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
|
||||
storageClass: "glusterfs-storage"
|
||||
cassandra:
|
||||
enabled: true
|
||||
config:
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful when we use HostPath as storage mechanism
|
||||
## For Hostpath storage, recommendation is cluster_size: 3 and seed_size: 2
|
||||
cluster_size: 1
|
||||
seed_size: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
|
||||
storageClass: "glusterfs-storage"
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlPassword: cG9zdGdyZXMxMjM=
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful:
|
||||
## a. When we use HostPath as storage mechanism
|
||||
## b. If master goes down and one of the slave is promoted as master
|
||||
replication:
|
||||
enabled: false
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
storageClass: "glusterfs-storage"
|
||||
@@ -1,31 +1,48 @@
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# It can be tweaked, based on the need to support different
|
||||
# dev environments.
|
||||
# This file expects to have a GlusterFS storage solution running
|
||||
# before "helm install" is performed.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
|
||||
nsPrefix: tip
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
integratedDeployment: false
|
||||
testingEnabled:
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
common:
|
||||
efs-provisioner:
|
||||
enabled: false
|
||||
# createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
|
||||
opensync-gw-cloud:
|
||||
service:
|
||||
type: LoadBalancer
|
||||
enabled: true
|
||||
externalhost:
|
||||
address:
|
||||
@@ -35,72 +52,48 @@ opensync-gw-cloud:
|
||||
enabled: true
|
||||
filestore:
|
||||
url: "https://wlan-filestore.wlan.local"
|
||||
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
service:
|
||||
type: LoadBalancer
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: hostpath
|
||||
storageClass: standard
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
env:
|
||||
portalsvc: tip-wlan-wlan-portal-service:9051
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- host: wlan-graphql.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
- host: wlan-ui-graphql.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-graphql.wlan.local
|
||||
- wlan-ui-graphql.wlan.local
|
||||
secretName: nginx-ingress-controller-default-server-secret
|
||||
env:
|
||||
portalsvc: tip-wlan-portal-service:9051
|
||||
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
env:
|
||||
graphql: https://wlan-graphql.wlan.local
|
||||
graphql: https://wlan-ui-graphql.wlan.local
|
||||
service:
|
||||
type: NodePort
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
- host: wlan-ui.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui.wlan.local
|
||||
secretName: nginx-ingress-controller-default-server-secret
|
||||
|
||||
wlan-portal-service:
|
||||
service:
|
||||
type: NodePort
|
||||
nodePort_static: false
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: hostpath
|
||||
accessMode: ReadWriteOnce
|
||||
filestoreSize: 10Gi
|
||||
tsp:
|
||||
host: wlan-portal-svc.wlan.local
|
||||
ingress:
|
||||
enabled: true
|
||||
hosts:
|
||||
- host: wlan-portal-svc.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-portal-svc.wlan.local
|
||||
secretName: nginx-ingress-controller-default-server-secret
|
||||
|
||||
storageClass: standard
|
||||
filestoreSize: 1Gi
|
||||
wlan-prov-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -117,7 +110,6 @@ wlan-prov-service:
|
||||
singleDataSourceUsername: tip_user
|
||||
singleDataSourcePassword: tip_password
|
||||
singleDataSourceSslKeyPassword: mypassword
|
||||
|
||||
wlan-ssc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -130,51 +122,121 @@ wlan-ssc-service:
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
|
||||
wlan-spc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
|
||||
wlan-port-forwarding-gateway-service:
|
||||
enabled: true
|
||||
creds:
|
||||
websocketSessionTokenEncKey: MyToKeN0MyToKeN1
|
||||
externallyVisible:
|
||||
host: api.wlan.local
|
||||
port: 30401
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
nginxDebug: true
|
||||
service:
|
||||
type: LoadBalancer
|
||||
config:
|
||||
externalStatusAddress: "api.wlan.local"
|
||||
defaultTLS:
|
||||
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZXakNDQTBLZ0F3SUJBZ0lVUU5hUC9zcHZSSHRCVEFLd1lSTndieFJmRkFzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hURWJNQmtHQTFVRUF3d1NkMnhoYmkxMWFTNTNiR0Z1TG14dlkyRnNNQjRYRFRJd01EZ3lOekl3TWpZMQpObG9YRFRNd01EZ3lOVEl3TWpZMU5sb3dIVEViTUJrR0ExVUVBd3dTZDJ4aGJpMTFhUzUzYkdGdUxteHZZMkZzCk1JSUNJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBZzhBTUlJQ0NnS0NBZ0VBd1JhZ2lEV3pDTllCdFd3QmNLK2YKVGtrUW1NdCtRQWdUallyMEtTOERQSkNKZjZLa1BmWkhDdTN3NEx2cnh6WTlObWllaDJYVTgzNGFtZEp4SXVDdwo2SWJObzZ6c2tqc3lmb084d0ZEbWxMVldMZWc1SDlHOWRvZW0rV1RlS1BhRUhpM29xdXpOZ3Q2d0xzM212dk9BClR2aVRJb2M4OEVMams0ZFNSMlQ0ZGhoMHFLQ0NqK0hkWEJBNlYvOWJpcnUralYrL2t4RVF1TDJ6TTM5RHZWZDgKOWtzMzV6TVZVemUzNmxENElDT25sN2hnYVROQmk0NU85c2RMRDBZYVVtamlGd1FsdEpVZG1QS3BhQWRidmpVTwpuc3VwbkRZam0rVW0rOWFFcHFNNHRlMjNlZkM4TjhqMXVrZXh6SnJFMkdlRi9XQi9ZMUxGSUcyd2pxVm5zUGNzCm5GRjRZZDlFQlJSbmUxRVplWEJ1M0ZFTEZ5NmxDT0hJMTQ2b0JjYy9JYjYxN3JkVEtYcXh0di8yTkw2L1RxRmsKbnMvRUVqdmU2a1FZemxCWndXSFdwWndRZmczbW82TmFvRlpwVGFnOThNeXU1clpvT29mVGN4WEg2cExtNVB4MQpPQXpnTG5hOU8rMkZtQTRGanJnSGNNWTFOSXp5blpMK0RIOGZpYnQxRi92MkYyTUErUjl2bzg0dlI1Uk9HTmRECnZhMkFwZXZrTGNqUWcvTHdzWHYwZ1RvcFEvWEl6ZWpoNmJkVWtPcktTd0p6VDJDOS9lOUdRbjBncHBWOExCdUsKMXpRSG9ST0xuQTQxTUNGdlFMUUhvK1h0OEtHdytVYmFseTZoT3hCWkY1MUwvQmJxamtESDlBRUZhSkxwdGlFeQpxbjFFNXYrM3doZ0ZTNUlaVDhJVzV1VUNBd0VBQWFPQmtUQ0JqakFkQmdOVkhRNEVGZ1FVeTJiQVV5TlBYSFM5CjNWVFNEK3dvTjd0M3E4RXdId1lEVlIwakJCZ3dGb0FVeTJiQVV5TlBYSFM5M1ZUU0Qrd29ON3QzcThFd0R3WUQKVlIwVEFRSC9CQVV3QXdFQi96QTdCZ05WSFJFRU5EQXlnaHAzYkdGdUxYVnBMV2R5WVhCb2NXd3VkMnhoYmk1cwpiMk5oYklJT1lYQnBMbmRzWVc0dWJHOWpZV3lIQk1Db0FBRXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnSUJBS0grCmJxSmVlMTFuMzRTWWdCRHZnb1o4bEpMUVJ3c0ZucUV4Y1NyL3BsWjdHVklHRkg1L1EyS3lvOVZ5RWlUUHdySXMKS3NFckMxZXZINnh0MVVSZk16cDA1elZRMExZTTUra3NhbVJEYWdBZzNNMWNtN29LT2Rtcy9kcXpQZTJnWmZHSgpwVmR0VlcxQ0hyTDBSTFRSOTNoN2tnU2lCbFNFSVlNb2VLZk41SDlBYXZKNEtyeXlnUXM2M2trR1E1TTllc0FwCnU2YkIzMDd6eWZ6Z1MzdG1Rc1UwMXJnSmZoRUhRL1krQWs5d0R1T2d2bWZ4MFRXZ0FPR2JLcTZUdThNS1lkZWoKSWU3clYxRzVVdjdLZmdvelZYNzZnMktkblRWQmZzcFNLbzN6eXJaa2NrekFwdlV1OUllZkhkVG9lNEpNRVUweQpmazdsRVUvZXh6Qnl5TnhwKzZoZHUvWklnM3hiMXlBMW9WWThORWQxckwxekFWaVBlMzUxU0VORUtlSnBSYW5DCmtDTDNSQUZrYnhRN0loYWNqb3g4YmVsUitnbW84Y3lGWnBqOVhhb1BsU0ZTY2R3ejU3M0NUMGg5N3Y3NkE3c3cKeUMrQ2lTcDg1Z1dFVjV2Z0JpdE5KN1I5b25qQmRzdUgybGdFdE1EM0pOT3M4Y0NTUmloWXhyaXdaU3FoVDdvLwp0Y0lsY0o4NFc1bTZYNnpISjNHbXR1S0czUVBOT21zMC9WVm9EVHA5cWRwTCtFazE3dUIyQTQxTnB4ejNVUytsCjZ5SytwZFFRajdBTHpLdVJmT3lnODBYYk53MnY0U25wSTVxYlhGQlJ1bTUyZjg2c1BlbUZxMUtjdU5XZTRFVkMKeERHM2VLbHUrZGxsVXRLeC9QTjZ5ZmxiVDV4Y0dnY2Rtcnd6UmFXUwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRUUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Nzd2dna25BZ0VBQW9JQ0FRREJGcUNJTmJNSTFnRzEKYkFGd3I1OU9TUkNZeTM1QUNCT05pdlFwTHdNOGtJbC9vcVE5OWtjSzdmRGd1K3ZITmowMmFKNkhaZFR6ZmhxWgowbkVpNExEb2hzMmpyT3lTT3pKK2c3ekFVT2FVdFZZdDZEa2YwYjEyaDZiNVpONG85b1FlTGVpcTdNMkMzckF1CnplYSs4NEJPK0pNaWh6endRdU9UaDFKSFpQaDJHSFNvb0lLUDRkMWNFRHBYLzF1S3U3Nk5YNytURVJDNHZiTXoKZjBPOVYzejJTemZuTXhWVE43ZnFVUGdnSTZlWHVHQnBNMEdMams3Mngwc1BSaHBTYU9JWEJDVzBsUjJZOHFsbwpCMXUrTlE2ZXk2bWNOaU9iNVNiNzFvU21vemkxN2JkNThMdzN5UFc2UjdITW1zVFlaNFg5WUg5alVzVWdiYkNPCnBXZXc5eXljVVhoaDMwUUZGR2Q3VVJsNWNHN2NVUXNYTHFVSTRjalhqcWdGeHo4aHZyWHV0MU1wZXJHMi8vWTAKdnI5T29XU2V6OFFTTzk3cVJCak9VRm5CWWRhbG5CQitEZWFqbzFxZ1ZtbE5xRDN3eks3bXRtZzZoOU56RmNmcQprdWJrL0hVNERPQXVkcjA3N1lXWURnV091QWR3eGpVMGpQS2RrdjRNZngrSnUzVVgrL1lYWXdENUgyK2p6aTlICmxFNFkxME85cllDbDYrUXR5TkNEOHZDeGUvU0JPaWxEOWNqTjZPSHB0MVNRNnNwTEFuTlBZTDM5NzBaQ2ZTQ20KbFh3c0c0clhOQWVoRTR1Y0RqVXdJVzlBdEFlajVlM3dvYkQ1UnRxWExxRTdFRmtYblV2OEZ1cU9RTWYwQVFWbwprdW0ySVRLcWZVVG0vN2ZDR0FWTGtobFB3aGJtNVFJREFRQUJBb0lDQUMyR2hEc1pUaWtiTERQMlR6Q2VkOVVoCmJRUlpsbDdLaUxHcXZYNm9VdjhJcFNLdTJrS3h1blpkTzVvQk5NbzNnNTg4YzRSQkFrQ1d6dmJObzFjeDJ3UTQKSkd3ZTdYaGM5TDdYbUwxUFZjNWlJdnVYOFVBTFY3eUdwMXZONklPSC9BYVJsSFlZZHl3UURVSTcwZGZiMmJqRQo2d3dORHRVbk1Ea3NncjNLbExwamNiNEFla2dxWE9MRUFMMld1Nkt1T1hOankrdUU3b2hnVWN3bWlYWXZGb3VMCm1KYXVlS3l5U202NHdJZnpZQ1JwbUhHMVlCTGpic0xJb20zcmZYRkl3V1hqMkhBSGFIOFRWOVhyUmpwR2tEZm8KbFFqN3l0R0s2ZkllMWcva0ZBN3hDWDE2d1NYMS85bjM1WGYwVmMwZ08zdE9NVHJkM1JTVVNEaVp6eVR1WWxuZwpETEdmYXZjRS82QXJ5cTlWZ3hyUXdXbnZhd0hIcWxBWUtxVHpJYkRJS0Y3SjRYTE9FckFtRE50T1I2Lzc1WjJ3CnVPQlFYT0N3NFM1dWxWdzhIZUM0NGlFTmxJYU5lNDNWTkZUTGtRM3lCeW96VVlYWTN2eEJXMWpURFpFOTB5YTUKZzk4cmFiYWhIS0lockpGYzNXYTE0RWhicUE2TVVLSXRRTkk4K1N1Rk1KV3R4VW1iM1cxK2dHbXJvTmo1TU9kYQpzdjV5OThTYS93UUc4dGc0cmdNQ0xpQVNHL3hudDB3RURrNXFDVUUxRzRSdkdOeUYxU09zNk82c1BTOTg4Umd4CnJuamQvWWZoME5xVnhHcHFGNnhpQVgvZXkyU0NGUWNybEtmNnhGREF4YjI4RTdaNnRQSUZCTWxpQ1IrbzdYR3MKZDNvUWVuMThCalM1NjdtR2ZmNkJBb0lCQVFEanFFcHZqOVhJVVB3bk1RZitRY3R0R1pXZEp2bFZSa1BSMW9maApSVWI2UHdFRkEwdVQyM011ZmFvNGI4bWIrM2Vra1BkYTZmbWJqUGFUckQrbk5YNGxyRE5oYytvcVY4aFVEQnA0CmpVcEg3OXorTVNUZVVQclpnS3VMeEdqaDJiK0FWYVZjZTI2STVYUXVoUnR6ZHFYZDlIeSs4YXpYRTltbHlPQ00KMUpEK2VHZWxhaVJMbEZBbVRDNDNoNlV5T0Q5SmZOSW1oWDQ2WDJRRlFsbGc1cWxVdWQ4Ukx3eFViZTJoYzhTWQp4VnVvYVZSSUdBSmhqRkd3ZVhnRjdzc0tQNXBZMHRkTlNvSGsxeHRnUmVJTlllZFU1cmtpKzloZTN0cStqWUdJCmxVcVVzYzNzN3c4cUk1UXk5NGdmcUI5Lzd4K3BFdGEvak9leE4yL1pGOFJGSXVucEFvSUJBUURaSUpUaUUxKzkKc2xnQ0NGVllLR3Z5aE5odkppck94enlOUWU3YjIvZmxQNzVHd0pTTWpZZTdoTmhGK3JrZHRJcXF5dWxyeGF3YgpPbWliU0FCSG5kT20ycDRMdDhaK20vQXZaRUgzVklLdWkwY0xVbTlKRXNsWURVcFIrdG5BemloNzdrS2FlVzlnCk1wdlpiUzZGdXE2ZlBZQUJyK3dXeU1IazR0UnRNZ3duUFRtSzZQTW85b3FIUURTSVJjL3N0N2hBTUwwMDdtNlEKOTJkRXRqNTNtSTBURTRISVhtY3hZbjV5NGVJLy85aEFMb2xFa0ZHWDU0SmNMdWpDWWkwQ3RIU0xDcnNmQkJwZgpDS2NaMk5sWFNiYVREU1prZWhnQWFWTlM2OVp1K1o2eGFvNmZZMjVxSnNmeXlaUkNjSzJYY0FoUDV2QWNUbWhQClNKUFJZc1dSNXZ1ZEFvSUJBRmtRRXFiWWg1TkprNHdsazNIMS9ZYWVGcmtYY1QzYU1sZ2FiS2hGdVFIWHVpZGkKNWFOZm5BMFpIb25idWV6ckVTQnhra09mKzRYT1BQMEN5eGc0UmpTb3pLVVlld2k3dE9Ta280cDhCQTVtbVhkYwpkSWNBK1ZJMEUyaW5tenlZT21JVG41Q3h2VW1UTXNPc1VWUDNtK1pjYXAwczRTaDNYSk9PSmNNU3VmTEQyaENOCm1NdDBwM0tFSlNTV1RadDdBODlWSk1YclBibktiYy9jNkNpUHRMa3Z5a1BudXhRZ3VYR0xYK05BZXA1RkxyTFIKcWNUTjUzdDUyZW5BUlBDcWQxQytrM3BxWnF6SE5xK1FSMkppNWVTQ0t2V3p2eTlHVWg5d0xyZm5aL2tLSW56SgovWTNIdzRlNDdTa3RWYjF3S0Z1MXdndklMVEJZZHNwZ2tPbFhRbGtDZ2dFQUtKYVJuazFXMldRc1ZYenZUMEtICkkxZTRDZGNOcTRmTkJ1N3JVc2drNkFMcGM5cHVLblFPaW54RDNaa0gzOGl2SDB3OUpEdFlkK0tNU1hMRk1wNEwKUWFhZVlyeGc2NndFMHljZnViZGZrbmRRdVlvWWFZV01nOXhBSjJFSU1hV1lKY3FkUXJrdW04SDZKa1BsclhQLwpUcDgxZlp0QU8rWWRjTWNDUk1OVlNFU0dyRFB0dUp1VnU4REIwVE9Uc2NHS1BOMmZrUFI5VUxZZTVOWllpUXpJCldtZU1IRU9oY0xiandsLzlaazlTUW5Vd2pkT1luUmZXNDVxVlFqa09CdkpxMHM4WHVhMlBySEkyb250SjdhcEcKNmVoTVkvMzYzS0RUeGExMmNWcFNVd0lEVlVKR0VxdmJOc1I5NVltZ3VhMWtzR01RUVlwYXIyOTJ5bTUzVmxYaQpkUUtDQVFCTUFYS0RaNVZobHBRR1VlUk1FNVhqVm1KOE1WdlZTUzV3NzBGOC9CS0ZnZFBJNnR2Nkx5UGh3OTRPCmxZVldoOXJmdUpmbWllK1ZJSWhaeSthUVNpVVhGQUtTdjZFTWJ1bXdjN1pUNEkyYitDTXQxUEhaTEZtUEN0OXAKOEorUDdoaDlRYWRBYzZqZEdSa0NMNkpMU3VoeWhMbW90SG9IS0ZJazdhNENNZGl2QnB3SVdxMWVScHd0aWRrNwpIdytrdlJ5YW5DMUJVU1dYNGxJcW1LanAyR1B2UDVVdVV2RUlPNitqaWFyWTJDTUNKb3BtcVJ2WWQzNGtSVkF1CjZueFl4a05neEFQSnVWN2tkZVVzQXg5Q1FZcFQ1blFmendtdlVGa0FraHJoTmw5dUJRUDhMdkZORFQ0cWU0bFcKUWw0cXRFZFNiZDVxVWVVdkgzOG5JMmpTVDVMawotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==
|
||||
wildcardTLS:
|
||||
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZEekNDQXZlZ0F3SUJBZ0lVYSthaVJZWG9QTGliSS9wdVJCdi9DZ2RTTDNzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF3d01LaTUzYkdGdUxteHZZMkZzTUI0WERUSXdNVEl5TVRJd05UQXpNVm9YRFRNdwpNVEl4T1RJd05UQXpNVm93RnpFVk1CTUdBMVVFQXd3TUtpNTNiR0Z1TG14dlkyRnNNSUlDSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUEwQjhuZE1raGIzZEN6N0I2aS9kRlFWenJwaEtQK2RmY2JKN1gKMzB3MU1FWDIvM3ZvVStkRlBNbnZrS3hEdFJwanZCbnhCczF6L1VmajlyUFhDSzZkazNNYU5DajRQYVUxem9WQQppK1Z6amc0ZG5XNjJxWG1qYVYrYUMrQm5zQndyQmx2THQ0dkh1eFRLbHh1RnR4THdid3VObGJvS1lVN01kV3oyCllMY3QzUWZBWElVb2FRK1RTMUZGdVdFeEZOTFRidlBGZWNoajB4ZnhyOU5BcU9aTXI0RGE0NHMzVERxMVVyRTIKbjhaZXFxVXZ1YUE4ZmEzQjNVZVRFNmJ4OEdhN1JybG5Dakd3UDRGVml3ajdvellmanV6T2JOM2dlcjdWcEpLMQpMSjdIdDBBemZlRm9aQ2xPbUVBd1p5alFwRGZOckdNTCs1dWtIK2JxWngyaUo5UndFcDNmdlE4em5jN1Q4dHJDCmxzMjRWNUpySWhUQXlCcTZRWVNnSXdXM2V5TmVpUnQ0ZHp6Sk5rNnd4cFp0WE96WTFwamJrV2FMaEhKOW1LRWoKU3lqVVBnS3dKSVlmb3BJbTJoUzl1dVZHZDdiU1MyV055aHJSOU5LSG5Lamo5Y0IrUWU0eEh0Z1pEcm1GZ1ZpZQp0cXZBUHhJL0ZkV1pSN3RmT2JCNTR3alMxVFk3TEd6cll4TDZSMWNjZDE4WUppcGNTS05xa05ORlllZ092VkNICmFldW1OTUdVNlZZalJWS1JmQXMva2FzcWxleGpheSt0SXNtd3dDZGoxUUN3UitRa2VEZFdoNE8vQzM1NENRb28KTkxZYzRNRk8xbVVDY2NsbUgvbFBvcTd1anBCMWI2VmFBNVprNFhjVUpRc0c4SUlSMDFHTFM5RW1HVVZaeTlOWApwV2dCbXNVQ0F3RUFBYU5UTUZFd0hRWURWUjBPQkJZRUZOZlVJSGhXdnFwUzg2ZC82SnJvbmxFYzZMU1NNQjhHCkExVWRJd1FZTUJhQUZOZlVJSGhXdnFwUzg2ZC82SnJvbmxFYzZMU1NNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHcKRFFZSktvWklodmNOQVFFTEJRQURnZ0lCQUtlTW9PckhLZ3BGemtkckVhMVhSSVRUOHFZanhKc04yNmFIMFovLwp1dDRXVkE4ckNDNzV4VkpaNnpBQWlBOFE2eTRYSHBzRzl2ZSs1QlJIWEdCS0lYOU5FZGNrbWdNdExzQ2xOR0JCCkxkN3lWd3hhaGVCQzhVTWIrVTAxMlNwaFc3K0t6UFJhQ3g4cHNMMUlFQUkyblQ1MzlCNDBmR2NyTktNSDRqZGkKdkxad3VxT00rZnJucFJ1MkZlK3Bja2Fwek92SEJTb0I3THovR1dmMWUwZ0llc1B4WEdmVG9hbGM1SzU5bDF1TQpCTkhpUW15S3E4TS9MbllMejhyOWp3dHNKU2lLYUljelpISjNtQ0ZUb3ljREF3NTl3WEdmWXZWcFBMaWZXTTJxCm1uSlJKM2dQS1lzOUhXWFgyYktoSmZoMjRLOTN2M1duMVRUellYOGtTbWlnRG0wTUhOSTNwZktlMmJqVW9MNmgKMlQ4bWhRbjdPQ2dvZHMvOXczR1dOdmFxYTAySHRnc0tTbk9YdmpSNXFMaVFLRjdhMi95TTlrNWNQcUdHaW1GYgppUWV3eUgvMWw2YjQ0T0s2RGwwMVltWFltNUVqR3plckp2aU90eUhSaWhtME01VmNBWWJaYkE0S1Y1eHhLZjRqClIwaktwQXdqbEpzTDdRMk9zTC9IRkxmaDV1RU1HMXlmTzF1blZkVURKK1FBZHJQUG5tZTZVTVFQZm1UcGx1WjAKS3pvOXY3NEpYV0pwQkNtaWNTbFBQdnB3cXZLTk5iOWd6b0hjOXFheWMwNWVxRldRbzNNZjIzYU82b05wU2ZuaAp5aWMvczFQcC9ZS2FHakVSQXB1UmRvYTlWT1diUncycFZMei9rZVNraS9QTDJFRFc4RUVHYjFXcUFBMkJPVVhDCi9oYXQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRd0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Mwd2dna3BBZ0VBQW9JQ0FRRFFIeWQweVNGdmQwTFAKc0hxTDkwVkJYT3VtRW8vNTE5eHNudGZmVERVd1JmYi9lK2hUNTBVOHllK1FyRU8xR21POEdmRUd6WFA5UitQMgpzOWNJcnAyVGN4bzBLUGc5cFRYT2hVQ0w1WE9PRGgyZGJyYXBlYU5wWDVvTDRHZXdIQ3NHVzh1M2k4ZTdGTXFYCkc0VzNFdkJ2QzQyVnVncGhUc3gxYlBaZ3R5M2RCOEJjaFNocEQ1TkxVVVc1WVRFVTB0TnU4OFY1eUdQVEYvR3YKMDBDbzVreXZnTnJqaXpkTU9yVlNzVGFmeGw2cXBTKzVvRHg5cmNIZFI1TVRwdkh3WnJ0R3VXY0tNYkEvZ1ZXTApDUHVqTmgrTzdNNXMzZUI2dnRXa2tyVXNuc2UzUUROOTRXaGtLVTZZUURCbktOQ2tOODJzWXd2N202UWY1dXBuCkhhSW4xSEFTbmQrOUR6T2R6dFB5MnNLV3piaFhrbXNpRk1ESUdycEJoS0FqQmJkN0kxNkpHM2gzUE1rMlRyREcKbG0xYzdOaldtTnVSWm91RWNuMllvU05MS05RK0FyQWtoaCtpa2liYUZMMjY1VVozdHRKTFpZM0tHdEgwMG9lYwpxT1Axd0g1QjdqRWUyQmtPdVlXQldKNjJxOEEvRWo4VjFabEh1MTg1c0huakNOTFZOanNzYk90akV2cEhWeHgzClh4Z21LbHhJbzJxUTAwVmg2QTY5VUlkcDY2WTB3WlRwVmlORlVwRjhDeitScXlxVjdHTnJMNjBpeWJEQUoyUFYKQUxCSDVDUjROMWFIZzc4TGZuZ0pDaWcwdGh6Z3dVN1daUUp4eVdZZitVK2lydTZPa0hWdnBWb0RsbVRoZHhRbApDd2J3Z2hIVFVZdEwwU1laUlZuTDAxZWxhQUdheFFJREFRQUJBb0lDQVFDUVRkbXN4enl3cmUrY1ZCQlVkaW9GCjdTalRhTEY5bWFlVGhQdkhMMjc5dnJWSlpoK3I5WUp6YU16NzhnV3NUOVR4ZXNjOVlUMVlVLzJEZENUWU4wSzUKRnlrSEc1VXNJUjVTeU4vOVlDWWtURE5La3BhQ29mMmxOWTE1U0twOFdMdVlXQlBEZTE4TW41anM5ejlhdGY0Ugo4Ti9GL2szdU5KWGRvYVNmWU1Pakt4bTh6UE05RFhpaTA0SlZ6RWNjMmlXU0crSkQwNmNybWNHUm1SZVBSTWZOCk5Mb1E1ZGw4dUlRN0J2Y0tCNkJpRDlFc2t5YitPWGxmTlo2TUZNaFNXTmpuYSt3L0REN1plWkxYcVczWk45RGYKNStBbGFoNlkzVE1EUGxueXkxRk5CVzN1alZrMWdkS21ESFBEUTNDUFBNWVdEa01qdlVJcWdKRHMySVl6dWIvTwpXRjRVUTV5UEJhZzluaWp1dS9uMVZDdGZuSkxwakZIakU1VzdkK3p1UGh6aUJ1WDFOcjRtOVVJdEpaSTNsYmJtCmdvZFlMdGl4b3RwNWF3ang1eXA3MU1zUHlTZzcrbHBPenA4dStuRENJcnc0K0VSME56MG8yTXFmcmJ2VklGQXIKWHIyc2YrejljbmtxalBWWEZaVks3em1TUHI5N0YrbTV4RHpURG9lTG53aVlhUUpOQ0ZhejhMVERjNldVT2w4SQpLOWhHd3FaK0llTlgreW16em16Nkx6WWVPaGlrRmNRaUI0UXVPSjdWWnZWRmVoS3JJMXJLWHJDRU01VmpJZXBkCkhzR0c1eTlLUkcxdEszSU5ScmI0SHlhRDF6SHJSTHRneFpLT1BvWDN0UjNmbTJ1aGova3dwelZnWTltRXJDWDkKd2I4SVA5TXdRR3REQVNBcjZWVmJvUUtDQVFFQThIeWlaK1lVeFEzQnprL1hoNmZMWWNYakE3NTJrQ204VWZzWQp0d1Z4N0EyNW5YRStiUDRIT3UyVC9kTlc0Tkw1elZPT0JkaWZIcTJQVFNVMGYzQUFHL0pNcnVzM3NrNHd4azM5CitYYlh0dHltWkdxb3FEcVN3TUw2czVpY1RnangvenRhSXk1TWFKYWhUYUpNdFRQQlVpZ3U3enhoeGNwVlhNVUMKTklHcFl5Mkt5R2hyMjVVOFdlR0RYQm9SS2xYUXJXYkNZeW1kMXdYQStEaVl0dzA5eit0VHhPNTRodjFCZkJKZwpWMGd0VWdJU0I2WEZDMU9CWDZXQ1pXYlhCN2hPaHhISjNkNHAyQlZyN0gxL2JDQ0ZvVDY5by9WQVNHRmdtTHRiCnpGalRNbjFIaTluVW5jUFlScWpsN1h0NWdPOHBOa3BwMjVrNHIxRVludWhIazcrYzdRS0NBUUVBM1l3THozNloKNEVPRndvODIrUlVId2lkaFExOEdrU1JvWStKVm1udXJpSXdHZTk3ZmRTVk91d092SDlZSVhsRWpjRitoOHFQVQpJVnpIOXBuYXZjTENEMnhIOWZ5d09ML3pmYmJnYnExZjV4Y3BOUXlYM1JnTGFDUVpLNkpJa3NzOUtDb0dhSzlaCmpMVm41MjFFZlFBRE5DSi93YlRCb3dLQ0dTNDUzSzRBaWFEWHN6TkJLUk5MOHVaWWYwK0x0U2IzV3lkZVQ2eUgKdGZiSXR3NlBSS1lxb2NaeGIrM0pWQWFHcGxScjVZSlNDU1BtTjFMSjU0djlTcXBIVnJMNzJudFNwKzdDODJ6SgpJajVOSXFEOGFsOVZ3WFB5dExRd25hYWc1TW5ka0NLQ3R0MlVHSGZwMEh3ZTJTL1hkemppS2gzZTZaT3MyMSt1CitQUHVrSkUxTTZzU09RS0NBUUJjWVJRbDR6MUJRUHFjM1JESEhJN0UvVFlxWHdTK2RqblFLQ3VqU3FVcmIwNUoKQzVKV1hmSzdFVDVUTjliY3dFNlRNRENUVUZZM2U2WmJsUm9vaGdhVXRhdjlXWC9vcjU2TzNyRGNIbW5ZNWNQSgpPU3VXakFHSnFKeVRWdUZjSEpXUlhPUlFOVjNHbzI1Tkd6WnFPUHBmSys1em1mZFkrbE4yTW51WlhlR0twcGowClNTQjlsa003cDZSRlFnSXNDQkVFTzBBYXhZYkxiWHRtSHArVFdiUFA1ZThrN0JKQ2tKQ1NMNkR3aGxwYWNVOHAKdnVVRlo4dC95VjFneEhOL2xLNGR0cGliOE5hVUdnNStKdXRHeHV0dU9HS3kwK2dncGI5c2pEUkVPQzdRNjAwTApqTjdleDdlUjFSbVY4Mk9HUXRqSzhTVGU1V25mOXNBRmN1YmorNncxQW9JQkFHYXM4Z2hQRHpkOWM2OXd1alNFCkI1MTJyTUFSZVRTcEgrd3l5Q09aYnkwUVlDem1aTCtnODdUK2h4b0ZFc25MWnRZOHJBeU0ydEkvY3JrYUl1TlIKTUtqL01QYVREb1N1aVVWWkRQaWVSMVVOU2Q2NUlHU3FNUmNwcTdTcU9HSTM2UGNGU3dVWFJ6Uk1Hb1NLQW5UQQpIYnY2eFNUY0JlWHJVcW9pMzFRa0hFR3NsbXNKdFFnNVZqaVRncTQyQ25TQlE2QXVSYW85Tm9RaGhISTZRREc3CnBRUm11TW43OVJPSkZyeGRZY2Z6TnR2ZmxHRk5jQjlzcEk0SERwcml4cEJDR1ZPVTl5cmozdStNMmlqVFhVaGIKT0o0NGcySTJKRlhjRkxNVHp5aHVwZy9qN3kvTDIwUHhVa2Fyd25zUmxOZWFFbVpFTjVkUDZBS2U0cENEaTVtUApqaGtDZ2dFQkFMUmtVeG9oZDh2ZVBwR3hPbWlOak5HekpiTDlscGx0TWxhR0dPQ3JOUkZSeEppblgzWU9UVnhiCkRFVlpqaXRHNldydzFxaDdnZXAzeEdJaWZHQ1lZV3pNc0RZTitueGtwV0lRRmZOV3dYemNRWlhrTEduZVlUdTAKSVU2RjY5Myt1Q0tkcHVCdVl0d3BQNEJCVkNCRTVON0FzRGV4bFBYTzk1cEw3ZzR4OG5RckdNeGJlRXVOdytaTwpPYmYvTnFFMGZZcURkaERiVHI0UDR6bUpBRlpYeDlKMjNJdWRMUFI3MDZITGZ5bDMrb1pUS2Y2ZWdEL1drWXZGCllLdEtDZzI1UmtSYmZBakZkeDlpOVkzcDlPNEFNVUNaRVFIOWQwU1d6LzJWR0VmYzVha09YL2xvWlAyUXF3c2UKeXMyc0k1U0Z5TEd1ZGM3R2MzVTd5UGd0RVN0elVoWT0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
||||
|
||||
kafka:
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: hostpath
|
||||
storageClass: standard
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
sslKeyPassword: mypassword
|
||||
|
||||
cassandra:
|
||||
enabled: true
|
||||
tlsEncryptionSecretName: tip-common-cassandra-certs
|
||||
image:
|
||||
debug: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: hostpath
|
||||
|
||||
storageClass: "hostpath"
|
||||
replicaCount: 1
|
||||
cluster:
|
||||
name: TipWlanCluster
|
||||
seedCount: 1
|
||||
internodeEncryption: all
|
||||
clientEncryption: true
|
||||
exporter:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
additionalLabels:
|
||||
release: prometheus-operator
|
||||
dbUser:
|
||||
user: cassandra
|
||||
password: cassandra
|
||||
resources:
|
||||
limits: {}
|
||||
requests:
|
||||
cpu: 1
|
||||
memory: 3Gi
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlDatabase: tip
|
||||
image:
|
||||
debug: true
|
||||
metrics:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
namespace: monitoring
|
||||
additionalLabels:
|
||||
release: prometheus-operator
|
||||
postgresqlUsername: postgres
|
||||
postgresqlPassword: postgres
|
||||
pgHbaConfiguration: |
|
||||
hostssl replication repl_user 0.0.0.0/0 md5 clientcert=0
|
||||
hostssl postgres postgres 0.0.0.0/0 cert clientcert=1
|
||||
hostssl postgres postgres ::/0 cert clientcert=1
|
||||
hostssl all all 0.0.0.0/0 md5 clientcert=1
|
||||
replication:
|
||||
enabled: true
|
||||
user: repl_user
|
||||
password: repl_password
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: hostpath
|
||||
|
||||
volumePermissions:
|
||||
enabled: true
|
||||
livenessProbe:
|
||||
enabled: false
|
||||
readinessProbe:
|
||||
enabled: false
|
||||
tls:
|
||||
enabled: true
|
||||
certificatesSecret: tip-common-postgres-certs
|
||||
certFilename: cert.crt
|
||||
certKeyFilename: cert.key
|
||||
certCAFilename: cacert.pem
|
||||
initdbScriptsConfigMap: tip-common-postgres-scripts
|
||||
extraEnv:
|
||||
- name: PGSSLCERT
|
||||
value: /opt/tip-wlan/certs/postgresclientcert.pem
|
||||
- name: PGSSLKEY
|
||||
value: /opt/tip-wlan/certs/postgresclientkey_dec.pem
|
||||
- name: PGSSLROOTCERT
|
||||
value: "/opt/tip-wlan/certs/cacert.pem"
|
||||
primary:
|
||||
extraInitContainers:
|
||||
- command: [ "sh", "-c", "chmod 0600 /opt/bitnami/postgresql/certs/postgresclientkey_dec.pem" ]
|
||||
image: busybox:latest
|
||||
name: chmod-client-cert-additional
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
volumeMounts:
|
||||
- mountPath: /opt/bitnami/postgresql/certs
|
||||
name: postgresql-certificates
|
||||
@@ -1,44 +1,47 @@
|
||||
shared:
|
||||
scalability: &jvm-options
|
||||
tip_wlan_ovsdb_listener_threadPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
JVM_MEM_OPTIONS: " "
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# It can be tweaked, based on the need to support different
|
||||
# dev environments.
|
||||
# This file expects to have a GlusterFS storage solution running
|
||||
# before "helm install" is performed.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
|
||||
nsPrefix: tip
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
integratedDeployment: false
|
||||
testingEnabled:
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
# createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
externalhost:
|
||||
@@ -48,17 +51,13 @@ opensync-gw-cloud:
|
||||
persistence:
|
||||
enabled: true
|
||||
filestore:
|
||||
url: https://wlan-filestore.wlan.local
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
url: "https://wlan-filestore.wlan.local"
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
env:
|
||||
@@ -73,7 +72,6 @@ wlan-cloud-graphql-gw:
|
||||
- hosts:
|
||||
- wlan-ui-graphql.wlan.local
|
||||
secretName: nginx-ingress-controller-default-server-secret
|
||||
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
env:
|
||||
@@ -90,16 +88,12 @@ wlan-cloud-static-portal:
|
||||
- hosts:
|
||||
- wlan-ui.wlan.local
|
||||
secretName: nginx-ingress-controller-default-server-secret
|
||||
|
||||
wlan-portal-service:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
filestoreSize: 1Gi
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-prov-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -116,9 +110,6 @@ wlan-prov-service:
|
||||
singleDataSourceUsername: tip_user
|
||||
singleDataSourcePassword: tip_password
|
||||
singleDataSourceSslKeyPassword: mypassword
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-ssc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -131,54 +122,66 @@ wlan-ssc-service:
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-spc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-port-forwarding-gateway-service:
|
||||
enabled: true
|
||||
creds:
|
||||
websocketSessionTokenEncKey: MyToKeN0MyToKeN1
|
||||
externallyVisible:
|
||||
host: api.wlan.demo.lab.wlan.tip.build
|
||||
port: 30401
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
service:
|
||||
type: LoadBalancer
|
||||
config:
|
||||
externalStatusAddress: api.wlan.local
|
||||
externalStatusAddress: "api.wlan.local"
|
||||
defaultTLS:
|
||||
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZXakNDQTBLZ0F3SUJBZ0lVUU5hUC9zcHZSSHRCVEFLd1lSTndieFJmRkFzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hURWJNQmtHQTFVRUF3d1NkMnhoYmkxMWFTNTNiR0Z1TG14dlkyRnNNQjRYRFRJd01EZ3lOekl3TWpZMQpObG9YRFRNd01EZ3lOVEl3TWpZMU5sb3dIVEViTUJrR0ExVUVBd3dTZDJ4aGJpMTFhUzUzYkdGdUxteHZZMkZzCk1JSUNJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBZzhBTUlJQ0NnS0NBZ0VBd1JhZ2lEV3pDTllCdFd3QmNLK2YKVGtrUW1NdCtRQWdUallyMEtTOERQSkNKZjZLa1BmWkhDdTN3NEx2cnh6WTlObWllaDJYVTgzNGFtZEp4SXVDdwo2SWJObzZ6c2tqc3lmb084d0ZEbWxMVldMZWc1SDlHOWRvZW0rV1RlS1BhRUhpM29xdXpOZ3Q2d0xzM212dk9BClR2aVRJb2M4OEVMams0ZFNSMlQ0ZGhoMHFLQ0NqK0hkWEJBNlYvOWJpcnUralYrL2t4RVF1TDJ6TTM5RHZWZDgKOWtzMzV6TVZVemUzNmxENElDT25sN2hnYVROQmk0NU85c2RMRDBZYVVtamlGd1FsdEpVZG1QS3BhQWRidmpVTwpuc3VwbkRZam0rVW0rOWFFcHFNNHRlMjNlZkM4TjhqMXVrZXh6SnJFMkdlRi9XQi9ZMUxGSUcyd2pxVm5zUGNzCm5GRjRZZDlFQlJSbmUxRVplWEJ1M0ZFTEZ5NmxDT0hJMTQ2b0JjYy9JYjYxN3JkVEtYcXh0di8yTkw2L1RxRmsKbnMvRUVqdmU2a1FZemxCWndXSFdwWndRZmczbW82TmFvRlpwVGFnOThNeXU1clpvT29mVGN4WEg2cExtNVB4MQpPQXpnTG5hOU8rMkZtQTRGanJnSGNNWTFOSXp5blpMK0RIOGZpYnQxRi92MkYyTUErUjl2bzg0dlI1Uk9HTmRECnZhMkFwZXZrTGNqUWcvTHdzWHYwZ1RvcFEvWEl6ZWpoNmJkVWtPcktTd0p6VDJDOS9lOUdRbjBncHBWOExCdUsKMXpRSG9ST0xuQTQxTUNGdlFMUUhvK1h0OEtHdytVYmFseTZoT3hCWkY1MUwvQmJxamtESDlBRUZhSkxwdGlFeQpxbjFFNXYrM3doZ0ZTNUlaVDhJVzV1VUNBd0VBQWFPQmtUQ0JqakFkQmdOVkhRNEVGZ1FVeTJiQVV5TlBYSFM5CjNWVFNEK3dvTjd0M3E4RXdId1lEVlIwakJCZ3dGb0FVeTJiQVV5TlBYSFM5M1ZUU0Qrd29ON3QzcThFd0R3WUQKVlIwVEFRSC9CQVV3QXdFQi96QTdCZ05WSFJFRU5EQXlnaHAzYkdGdUxYVnBMV2R5WVhCb2NXd3VkMnhoYmk1cwpiMk5oYklJT1lYQnBMbmRzWVc0dWJHOWpZV3lIQk1Db0FBRXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnSUJBS0grCmJxSmVlMTFuMzRTWWdCRHZnb1o4bEpMUVJ3c0ZucUV4Y1NyL3BsWjdHVklHRkg1L1EyS3lvOVZ5RWlUUHdySXMKS3NFckMxZXZINnh0MVVSZk16cDA1elZRMExZTTUra3NhbVJEYWdBZzNNMWNtN29LT2Rtcy9kcXpQZTJnWmZHSgpwVmR0VlcxQ0hyTDBSTFRSOTNoN2tnU2lCbFNFSVlNb2VLZk41SDlBYXZKNEtyeXlnUXM2M2trR1E1TTllc0FwCnU2YkIzMDd6eWZ6Z1MzdG1Rc1UwMXJnSmZoRUhRL1krQWs5d0R1T2d2bWZ4MFRXZ0FPR2JLcTZUdThNS1lkZWoKSWU3clYxRzVVdjdLZmdvelZYNzZnMktkblRWQmZzcFNLbzN6eXJaa2NrekFwdlV1OUllZkhkVG9lNEpNRVUweQpmazdsRVUvZXh6Qnl5TnhwKzZoZHUvWklnM3hiMXlBMW9WWThORWQxckwxekFWaVBlMzUxU0VORUtlSnBSYW5DCmtDTDNSQUZrYnhRN0loYWNqb3g4YmVsUitnbW84Y3lGWnBqOVhhb1BsU0ZTY2R3ejU3M0NUMGg5N3Y3NkE3c3cKeUMrQ2lTcDg1Z1dFVjV2Z0JpdE5KN1I5b25qQmRzdUgybGdFdE1EM0pOT3M4Y0NTUmloWXhyaXdaU3FoVDdvLwp0Y0lsY0o4NFc1bTZYNnpISjNHbXR1S0czUVBOT21zMC9WVm9EVHA5cWRwTCtFazE3dUIyQTQxTnB4ejNVUytsCjZ5SytwZFFRajdBTHpLdVJmT3lnODBYYk53MnY0U25wSTVxYlhGQlJ1bTUyZjg2c1BlbUZxMUtjdU5XZTRFVkMKeERHM2VLbHUrZGxsVXRLeC9QTjZ5ZmxiVDV4Y0dnY2Rtcnd6UmFXUwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRUUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Nzd2dna25BZ0VBQW9JQ0FRREJGcUNJTmJNSTFnRzEKYkFGd3I1OU9TUkNZeTM1QUNCT05pdlFwTHdNOGtJbC9vcVE5OWtjSzdmRGd1K3ZITmowMmFKNkhaZFR6ZmhxWgowbkVpNExEb2hzMmpyT3lTT3pKK2c3ekFVT2FVdFZZdDZEa2YwYjEyaDZiNVpONG85b1FlTGVpcTdNMkMzckF1CnplYSs4NEJPK0pNaWh6endRdU9UaDFKSFpQaDJHSFNvb0lLUDRkMWNFRHBYLzF1S3U3Nk5YNytURVJDNHZiTXoKZjBPOVYzejJTemZuTXhWVE43ZnFVUGdnSTZlWHVHQnBNMEdMams3Mngwc1BSaHBTYU9JWEJDVzBsUjJZOHFsbwpCMXUrTlE2ZXk2bWNOaU9iNVNiNzFvU21vemkxN2JkNThMdzN5UFc2UjdITW1zVFlaNFg5WUg5alVzVWdiYkNPCnBXZXc5eXljVVhoaDMwUUZGR2Q3VVJsNWNHN2NVUXNYTHFVSTRjalhqcWdGeHo4aHZyWHV0MU1wZXJHMi8vWTAKdnI5T29XU2V6OFFTTzk3cVJCak9VRm5CWWRhbG5CQitEZWFqbzFxZ1ZtbE5xRDN3eks3bXRtZzZoOU56RmNmcQprdWJrL0hVNERPQXVkcjA3N1lXWURnV091QWR3eGpVMGpQS2RrdjRNZngrSnUzVVgrL1lYWXdENUgyK2p6aTlICmxFNFkxME85cllDbDYrUXR5TkNEOHZDeGUvU0JPaWxEOWNqTjZPSHB0MVNRNnNwTEFuTlBZTDM5NzBaQ2ZTQ20KbFh3c0c0clhOQWVoRTR1Y0RqVXdJVzlBdEFlajVlM3dvYkQ1UnRxWExxRTdFRmtYblV2OEZ1cU9RTWYwQVFWbwprdW0ySVRLcWZVVG0vN2ZDR0FWTGtobFB3aGJtNVFJREFRQUJBb0lDQUMyR2hEc1pUaWtiTERQMlR6Q2VkOVVoCmJRUlpsbDdLaUxHcXZYNm9VdjhJcFNLdTJrS3h1blpkTzVvQk5NbzNnNTg4YzRSQkFrQ1d6dmJObzFjeDJ3UTQKSkd3ZTdYaGM5TDdYbUwxUFZjNWlJdnVYOFVBTFY3eUdwMXZONklPSC9BYVJsSFlZZHl3UURVSTcwZGZiMmJqRQo2d3dORHRVbk1Ea3NncjNLbExwamNiNEFla2dxWE9MRUFMMld1Nkt1T1hOankrdUU3b2hnVWN3bWlYWXZGb3VMCm1KYXVlS3l5U202NHdJZnpZQ1JwbUhHMVlCTGpic0xJb20zcmZYRkl3V1hqMkhBSGFIOFRWOVhyUmpwR2tEZm8KbFFqN3l0R0s2ZkllMWcva0ZBN3hDWDE2d1NYMS85bjM1WGYwVmMwZ08zdE9NVHJkM1JTVVNEaVp6eVR1WWxuZwpETEdmYXZjRS82QXJ5cTlWZ3hyUXdXbnZhd0hIcWxBWUtxVHpJYkRJS0Y3SjRYTE9FckFtRE50T1I2Lzc1WjJ3CnVPQlFYT0N3NFM1dWxWdzhIZUM0NGlFTmxJYU5lNDNWTkZUTGtRM3lCeW96VVlYWTN2eEJXMWpURFpFOTB5YTUKZzk4cmFiYWhIS0lockpGYzNXYTE0RWhicUE2TVVLSXRRTkk4K1N1Rk1KV3R4VW1iM1cxK2dHbXJvTmo1TU9kYQpzdjV5OThTYS93UUc4dGc0cmdNQ0xpQVNHL3hudDB3RURrNXFDVUUxRzRSdkdOeUYxU09zNk82c1BTOTg4Umd4CnJuamQvWWZoME5xVnhHcHFGNnhpQVgvZXkyU0NGUWNybEtmNnhGREF4YjI4RTdaNnRQSUZCTWxpQ1IrbzdYR3MKZDNvUWVuMThCalM1NjdtR2ZmNkJBb0lCQVFEanFFcHZqOVhJVVB3bk1RZitRY3R0R1pXZEp2bFZSa1BSMW9maApSVWI2UHdFRkEwdVQyM011ZmFvNGI4bWIrM2Vra1BkYTZmbWJqUGFUckQrbk5YNGxyRE5oYytvcVY4aFVEQnA0CmpVcEg3OXorTVNUZVVQclpnS3VMeEdqaDJiK0FWYVZjZTI2STVYUXVoUnR6ZHFYZDlIeSs4YXpYRTltbHlPQ00KMUpEK2VHZWxhaVJMbEZBbVRDNDNoNlV5T0Q5SmZOSW1oWDQ2WDJRRlFsbGc1cWxVdWQ4Ukx3eFViZTJoYzhTWQp4VnVvYVZSSUdBSmhqRkd3ZVhnRjdzc0tQNXBZMHRkTlNvSGsxeHRnUmVJTlllZFU1cmtpKzloZTN0cStqWUdJCmxVcVVzYzNzN3c4cUk1UXk5NGdmcUI5Lzd4K3BFdGEvak9leE4yL1pGOFJGSXVucEFvSUJBUURaSUpUaUUxKzkKc2xnQ0NGVllLR3Z5aE5odkppck94enlOUWU3YjIvZmxQNzVHd0pTTWpZZTdoTmhGK3JrZHRJcXF5dWxyeGF3YgpPbWliU0FCSG5kT20ycDRMdDhaK20vQXZaRUgzVklLdWkwY0xVbTlKRXNsWURVcFIrdG5BemloNzdrS2FlVzlnCk1wdlpiUzZGdXE2ZlBZQUJyK3dXeU1IazR0UnRNZ3duUFRtSzZQTW85b3FIUURTSVJjL3N0N2hBTUwwMDdtNlEKOTJkRXRqNTNtSTBURTRISVhtY3hZbjV5NGVJLy85aEFMb2xFa0ZHWDU0SmNMdWpDWWkwQ3RIU0xDcnNmQkJwZgpDS2NaMk5sWFNiYVREU1prZWhnQWFWTlM2OVp1K1o2eGFvNmZZMjVxSnNmeXlaUkNjSzJYY0FoUDV2QWNUbWhQClNKUFJZc1dSNXZ1ZEFvSUJBRmtRRXFiWWg1TkprNHdsazNIMS9ZYWVGcmtYY1QzYU1sZ2FiS2hGdVFIWHVpZGkKNWFOZm5BMFpIb25idWV6ckVTQnhra09mKzRYT1BQMEN5eGc0UmpTb3pLVVlld2k3dE9Ta280cDhCQTVtbVhkYwpkSWNBK1ZJMEUyaW5tenlZT21JVG41Q3h2VW1UTXNPc1VWUDNtK1pjYXAwczRTaDNYSk9PSmNNU3VmTEQyaENOCm1NdDBwM0tFSlNTV1RadDdBODlWSk1YclBibktiYy9jNkNpUHRMa3Z5a1BudXhRZ3VYR0xYK05BZXA1RkxyTFIKcWNUTjUzdDUyZW5BUlBDcWQxQytrM3BxWnF6SE5xK1FSMkppNWVTQ0t2V3p2eTlHVWg5d0xyZm5aL2tLSW56SgovWTNIdzRlNDdTa3RWYjF3S0Z1MXdndklMVEJZZHNwZ2tPbFhRbGtDZ2dFQUtKYVJuazFXMldRc1ZYenZUMEtICkkxZTRDZGNOcTRmTkJ1N3JVc2drNkFMcGM5cHVLblFPaW54RDNaa0gzOGl2SDB3OUpEdFlkK0tNU1hMRk1wNEwKUWFhZVlyeGc2NndFMHljZnViZGZrbmRRdVlvWWFZV01nOXhBSjJFSU1hV1lKY3FkUXJrdW04SDZKa1BsclhQLwpUcDgxZlp0QU8rWWRjTWNDUk1OVlNFU0dyRFB0dUp1VnU4REIwVE9Uc2NHS1BOMmZrUFI5VUxZZTVOWllpUXpJCldtZU1IRU9oY0xiandsLzlaazlTUW5Vd2pkT1luUmZXNDVxVlFqa09CdkpxMHM4WHVhMlBySEkyb250SjdhcEcKNmVoTVkvMzYzS0RUeGExMmNWcFNVd0lEVlVKR0VxdmJOc1I5NVltZ3VhMWtzR01RUVlwYXIyOTJ5bTUzVmxYaQpkUUtDQVFCTUFYS0RaNVZobHBRR1VlUk1FNVhqVm1KOE1WdlZTUzV3NzBGOC9CS0ZnZFBJNnR2Nkx5UGh3OTRPCmxZVldoOXJmdUpmbWllK1ZJSWhaeSthUVNpVVhGQUtTdjZFTWJ1bXdjN1pUNEkyYitDTXQxUEhaTEZtUEN0OXAKOEorUDdoaDlRYWRBYzZqZEdSa0NMNkpMU3VoeWhMbW90SG9IS0ZJazdhNENNZGl2QnB3SVdxMWVScHd0aWRrNwpIdytrdlJ5YW5DMUJVU1dYNGxJcW1LanAyR1B2UDVVdVV2RUlPNitqaWFyWTJDTUNKb3BtcVJ2WWQzNGtSVkF1CjZueFl4a05neEFQSnVWN2tkZVVzQXg5Q1FZcFQ1blFmendtdlVGa0FraHJoTmw5dUJRUDhMdkZORFQ0cWU0bFcKUWw0cXRFZFNiZDVxVWVVdkgzOG5JMmpTVDVMawotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==
|
||||
wildcardTLS:
|
||||
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZEekNDQXZlZ0F3SUJBZ0lVYSthaVJZWG9QTGliSS9wdVJCdi9DZ2RTTDNzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF3d01LaTUzYkdGdUxteHZZMkZzTUI0WERUSXdNVEl5TVRJd05UQXpNVm9YRFRNdwpNVEl4T1RJd05UQXpNVm93RnpFVk1CTUdBMVVFQXd3TUtpNTNiR0Z1TG14dlkyRnNNSUlDSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUEwQjhuZE1raGIzZEN6N0I2aS9kRlFWenJwaEtQK2RmY2JKN1gKMzB3MU1FWDIvM3ZvVStkRlBNbnZrS3hEdFJwanZCbnhCczF6L1VmajlyUFhDSzZkazNNYU5DajRQYVUxem9WQQppK1Z6amc0ZG5XNjJxWG1qYVYrYUMrQm5zQndyQmx2THQ0dkh1eFRLbHh1RnR4THdid3VObGJvS1lVN01kV3oyCllMY3QzUWZBWElVb2FRK1RTMUZGdVdFeEZOTFRidlBGZWNoajB4ZnhyOU5BcU9aTXI0RGE0NHMzVERxMVVyRTIKbjhaZXFxVXZ1YUE4ZmEzQjNVZVRFNmJ4OEdhN1JybG5Dakd3UDRGVml3ajdvellmanV6T2JOM2dlcjdWcEpLMQpMSjdIdDBBemZlRm9aQ2xPbUVBd1p5alFwRGZOckdNTCs1dWtIK2JxWngyaUo5UndFcDNmdlE4em5jN1Q4dHJDCmxzMjRWNUpySWhUQXlCcTZRWVNnSXdXM2V5TmVpUnQ0ZHp6Sk5rNnd4cFp0WE96WTFwamJrV2FMaEhKOW1LRWoKU3lqVVBnS3dKSVlmb3BJbTJoUzl1dVZHZDdiU1MyV055aHJSOU5LSG5Lamo5Y0IrUWU0eEh0Z1pEcm1GZ1ZpZQp0cXZBUHhJL0ZkV1pSN3RmT2JCNTR3alMxVFk3TEd6cll4TDZSMWNjZDE4WUppcGNTS05xa05ORlllZ092VkNICmFldW1OTUdVNlZZalJWS1JmQXMva2FzcWxleGpheSt0SXNtd3dDZGoxUUN3UitRa2VEZFdoNE8vQzM1NENRb28KTkxZYzRNRk8xbVVDY2NsbUgvbFBvcTd1anBCMWI2VmFBNVprNFhjVUpRc0c4SUlSMDFHTFM5RW1HVVZaeTlOWApwV2dCbXNVQ0F3RUFBYU5UTUZFd0hRWURWUjBPQkJZRUZOZlVJSGhXdnFwUzg2ZC82SnJvbmxFYzZMU1NNQjhHCkExVWRJd1FZTUJhQUZOZlVJSGhXdnFwUzg2ZC82SnJvbmxFYzZMU1NNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHcKRFFZSktvWklodmNOQVFFTEJRQURnZ0lCQUtlTW9PckhLZ3BGemtkckVhMVhSSVRUOHFZanhKc04yNmFIMFovLwp1dDRXVkE4ckNDNzV4VkpaNnpBQWlBOFE2eTRYSHBzRzl2ZSs1QlJIWEdCS0lYOU5FZGNrbWdNdExzQ2xOR0JCCkxkN3lWd3hhaGVCQzhVTWIrVTAxMlNwaFc3K0t6UFJhQ3g4cHNMMUlFQUkyblQ1MzlCNDBmR2NyTktNSDRqZGkKdkxad3VxT00rZnJucFJ1MkZlK3Bja2Fwek92SEJTb0I3THovR1dmMWUwZ0llc1B4WEdmVG9hbGM1SzU5bDF1TQpCTkhpUW15S3E4TS9MbllMejhyOWp3dHNKU2lLYUljelpISjNtQ0ZUb3ljREF3NTl3WEdmWXZWcFBMaWZXTTJxCm1uSlJKM2dQS1lzOUhXWFgyYktoSmZoMjRLOTN2M1duMVRUellYOGtTbWlnRG0wTUhOSTNwZktlMmJqVW9MNmgKMlQ4bWhRbjdPQ2dvZHMvOXczR1dOdmFxYTAySHRnc0tTbk9YdmpSNXFMaVFLRjdhMi95TTlrNWNQcUdHaW1GYgppUWV3eUgvMWw2YjQ0T0s2RGwwMVltWFltNUVqR3plckp2aU90eUhSaWhtME01VmNBWWJaYkE0S1Y1eHhLZjRqClIwaktwQXdqbEpzTDdRMk9zTC9IRkxmaDV1RU1HMXlmTzF1blZkVURKK1FBZHJQUG5tZTZVTVFQZm1UcGx1WjAKS3pvOXY3NEpYV0pwQkNtaWNTbFBQdnB3cXZLTk5iOWd6b0hjOXFheWMwNWVxRldRbzNNZjIzYU82b05wU2ZuaAp5aWMvczFQcC9ZS2FHakVSQXB1UmRvYTlWT1diUncycFZMei9rZVNraS9QTDJFRFc4RUVHYjFXcUFBMkJPVVhDCi9oYXQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRd0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Mwd2dna3BBZ0VBQW9JQ0FRRFFIeWQweVNGdmQwTFAKc0hxTDkwVkJYT3VtRW8vNTE5eHNudGZmVERVd1JmYi9lK2hUNTBVOHllK1FyRU8xR21POEdmRUd6WFA5UitQMgpzOWNJcnAyVGN4bzBLUGc5cFRYT2hVQ0w1WE9PRGgyZGJyYXBlYU5wWDVvTDRHZXdIQ3NHVzh1M2k4ZTdGTXFYCkc0VzNFdkJ2QzQyVnVncGhUc3gxYlBaZ3R5M2RCOEJjaFNocEQ1TkxVVVc1WVRFVTB0TnU4OFY1eUdQVEYvR3YKMDBDbzVreXZnTnJqaXpkTU9yVlNzVGFmeGw2cXBTKzVvRHg5cmNIZFI1TVRwdkh3WnJ0R3VXY0tNYkEvZ1ZXTApDUHVqTmgrTzdNNXMzZUI2dnRXa2tyVXNuc2UzUUROOTRXaGtLVTZZUURCbktOQ2tOODJzWXd2N202UWY1dXBuCkhhSW4xSEFTbmQrOUR6T2R6dFB5MnNLV3piaFhrbXNpRk1ESUdycEJoS0FqQmJkN0kxNkpHM2gzUE1rMlRyREcKbG0xYzdOaldtTnVSWm91RWNuMllvU05MS05RK0FyQWtoaCtpa2liYUZMMjY1VVozdHRKTFpZM0tHdEgwMG9lYwpxT1Axd0g1QjdqRWUyQmtPdVlXQldKNjJxOEEvRWo4VjFabEh1MTg1c0huakNOTFZOanNzYk90akV2cEhWeHgzClh4Z21LbHhJbzJxUTAwVmg2QTY5VUlkcDY2WTB3WlRwVmlORlVwRjhDeitScXlxVjdHTnJMNjBpeWJEQUoyUFYKQUxCSDVDUjROMWFIZzc4TGZuZ0pDaWcwdGh6Z3dVN1daUUp4eVdZZitVK2lydTZPa0hWdnBWb0RsbVRoZHhRbApDd2J3Z2hIVFVZdEwwU1laUlZuTDAxZWxhQUdheFFJREFRQUJBb0lDQVFDUVRkbXN4enl3cmUrY1ZCQlVkaW9GCjdTalRhTEY5bWFlVGhQdkhMMjc5dnJWSlpoK3I5WUp6YU16NzhnV3NUOVR4ZXNjOVlUMVlVLzJEZENUWU4wSzUKRnlrSEc1VXNJUjVTeU4vOVlDWWtURE5La3BhQ29mMmxOWTE1U0twOFdMdVlXQlBEZTE4TW41anM5ejlhdGY0Ugo4Ti9GL2szdU5KWGRvYVNmWU1Pakt4bTh6UE05RFhpaTA0SlZ6RWNjMmlXU0crSkQwNmNybWNHUm1SZVBSTWZOCk5Mb1E1ZGw4dUlRN0J2Y0tCNkJpRDlFc2t5YitPWGxmTlo2TUZNaFNXTmpuYSt3L0REN1plWkxYcVczWk45RGYKNStBbGFoNlkzVE1EUGxueXkxRk5CVzN1alZrMWdkS21ESFBEUTNDUFBNWVdEa01qdlVJcWdKRHMySVl6dWIvTwpXRjRVUTV5UEJhZzluaWp1dS9uMVZDdGZuSkxwakZIakU1VzdkK3p1UGh6aUJ1WDFOcjRtOVVJdEpaSTNsYmJtCmdvZFlMdGl4b3RwNWF3ang1eXA3MU1zUHlTZzcrbHBPenA4dStuRENJcnc0K0VSME56MG8yTXFmcmJ2VklGQXIKWHIyc2YrejljbmtxalBWWEZaVks3em1TUHI5N0YrbTV4RHpURG9lTG53aVlhUUpOQ0ZhejhMVERjNldVT2w4SQpLOWhHd3FaK0llTlgreW16em16Nkx6WWVPaGlrRmNRaUI0UXVPSjdWWnZWRmVoS3JJMXJLWHJDRU01VmpJZXBkCkhzR0c1eTlLUkcxdEszSU5ScmI0SHlhRDF6SHJSTHRneFpLT1BvWDN0UjNmbTJ1aGova3dwelZnWTltRXJDWDkKd2I4SVA5TXdRR3REQVNBcjZWVmJvUUtDQVFFQThIeWlaK1lVeFEzQnprL1hoNmZMWWNYakE3NTJrQ204VWZzWQp0d1Z4N0EyNW5YRStiUDRIT3UyVC9kTlc0Tkw1elZPT0JkaWZIcTJQVFNVMGYzQUFHL0pNcnVzM3NrNHd4azM5CitYYlh0dHltWkdxb3FEcVN3TUw2czVpY1RnangvenRhSXk1TWFKYWhUYUpNdFRQQlVpZ3U3enhoeGNwVlhNVUMKTklHcFl5Mkt5R2hyMjVVOFdlR0RYQm9SS2xYUXJXYkNZeW1kMXdYQStEaVl0dzA5eit0VHhPNTRodjFCZkJKZwpWMGd0VWdJU0I2WEZDMU9CWDZXQ1pXYlhCN2hPaHhISjNkNHAyQlZyN0gxL2JDQ0ZvVDY5by9WQVNHRmdtTHRiCnpGalRNbjFIaTluVW5jUFlScWpsN1h0NWdPOHBOa3BwMjVrNHIxRVludWhIazcrYzdRS0NBUUVBM1l3THozNloKNEVPRndvODIrUlVId2lkaFExOEdrU1JvWStKVm1udXJpSXdHZTk3ZmRTVk91d092SDlZSVhsRWpjRitoOHFQVQpJVnpIOXBuYXZjTENEMnhIOWZ5d09ML3pmYmJnYnExZjV4Y3BOUXlYM1JnTGFDUVpLNkpJa3NzOUtDb0dhSzlaCmpMVm41MjFFZlFBRE5DSi93YlRCb3dLQ0dTNDUzSzRBaWFEWHN6TkJLUk5MOHVaWWYwK0x0U2IzV3lkZVQ2eUgKdGZiSXR3NlBSS1lxb2NaeGIrM0pWQWFHcGxScjVZSlNDU1BtTjFMSjU0djlTcXBIVnJMNzJudFNwKzdDODJ6SgpJajVOSXFEOGFsOVZ3WFB5dExRd25hYWc1TW5ka0NLQ3R0MlVHSGZwMEh3ZTJTL1hkemppS2gzZTZaT3MyMSt1CitQUHVrSkUxTTZzU09RS0NBUUJjWVJRbDR6MUJRUHFjM1JESEhJN0UvVFlxWHdTK2RqblFLQ3VqU3FVcmIwNUoKQzVKV1hmSzdFVDVUTjliY3dFNlRNRENUVUZZM2U2WmJsUm9vaGdhVXRhdjlXWC9vcjU2TzNyRGNIbW5ZNWNQSgpPU3VXakFHSnFKeVRWdUZjSEpXUlhPUlFOVjNHbzI1Tkd6WnFPUHBmSys1em1mZFkrbE4yTW51WlhlR0twcGowClNTQjlsa003cDZSRlFnSXNDQkVFTzBBYXhZYkxiWHRtSHArVFdiUFA1ZThrN0JKQ2tKQ1NMNkR3aGxwYWNVOHAKdnVVRlo4dC95VjFneEhOL2xLNGR0cGliOE5hVUdnNStKdXRHeHV0dU9HS3kwK2dncGI5c2pEUkVPQzdRNjAwTApqTjdleDdlUjFSbVY4Mk9HUXRqSzhTVGU1V25mOXNBRmN1YmorNncxQW9JQkFHYXM4Z2hQRHpkOWM2OXd1alNFCkI1MTJyTUFSZVRTcEgrd3l5Q09aYnkwUVlDem1aTCtnODdUK2h4b0ZFc25MWnRZOHJBeU0ydEkvY3JrYUl1TlIKTUtqL01QYVREb1N1aVVWWkRQaWVSMVVOU2Q2NUlHU3FNUmNwcTdTcU9HSTM2UGNGU3dVWFJ6Uk1Hb1NLQW5UQQpIYnY2eFNUY0JlWHJVcW9pMzFRa0hFR3NsbXNKdFFnNVZqaVRncTQyQ25TQlE2QXVSYW85Tm9RaGhISTZRREc3CnBRUm11TW43OVJPSkZyeGRZY2Z6TnR2ZmxHRk5jQjlzcEk0SERwcml4cEJDR1ZPVTl5cmozdStNMmlqVFhVaGIKT0o0NGcySTJKRlhjRkxNVHp5aHVwZy9qN3kvTDIwUHhVa2Fyd25zUmxOZWFFbVpFTjVkUDZBS2U0cENEaTVtUApqaGtDZ2dFQkFMUmtVeG9oZDh2ZVBwR3hPbWlOak5HekpiTDlscGx0TWxhR0dPQ3JOUkZSeEppblgzWU9UVnhiCkRFVlpqaXRHNldydzFxaDdnZXAzeEdJaWZHQ1lZV3pNc0RZTitueGtwV0lRRmZOV3dYemNRWlhrTEduZVlUdTAKSVU2RjY5Myt1Q0tkcHVCdVl0d3BQNEJCVkNCRTVON0FzRGV4bFBYTzk1cEw3ZzR4OG5RckdNeGJlRXVOdytaTwpPYmYvTnFFMGZZcURkaERiVHI0UDR6bUpBRlpYeDlKMjNJdWRMUFI3MDZITGZ5bDMrb1pUS2Y2ZWdEL1drWXZGCllLdEtDZzI1UmtSYmZBakZkeDlpOVkzcDlPNEFNVUNaRVFIOWQwU1d6LzJWR0VmYzVha09YL2xvWlAyUXF3c2UKeXMyc0k1U0Z5TEd1ZGM3R2MzVTd5UGd0RVN0elVoWT0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
sslKeyPassword: mypassword
|
||||
cassandra:
|
||||
enabled: true
|
||||
image:
|
||||
debug: true
|
||||
cluster:
|
||||
replicaCount: 1
|
||||
seedCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlPassword: postgres
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful:
|
||||
## a. When we use HostPath as storage mechanism
|
||||
## b. If master goes down and one of the slave is promoted as master
|
||||
replication:
|
||||
enabled: true
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: standard
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
|
||||
@@ -1,189 +0,0 @@
|
||||
shared:
|
||||
scalability: &jvm-options
|
||||
tip_wlan_ovsdb_listener_threadPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
JVM_MEM_OPTIONS: " "
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
|
||||
global:
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
|
||||
pullPolicy: Always
|
||||
|
||||
integratedDeployment: false
|
||||
testingEnabled: false
|
||||
testingTimestamp:
|
||||
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
externalhost:
|
||||
address:
|
||||
ovsdb: opensync-controller.wlan.local
|
||||
mqtt: opensync-mqtt-broker.wlan.local
|
||||
persistence:
|
||||
enabled: true
|
||||
filestore:
|
||||
url: https://wlan-filestore.wlan.local
|
||||
service:
|
||||
type: LoadBalancer
|
||||
annotations:
|
||||
metallb.universe.tf/allow-shared-ip: default
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: microk8s-hostpath
|
||||
service:
|
||||
type: LoadBalancer
|
||||
annotations:
|
||||
metallb.universe.tf/allow-shared-ip: default
|
||||
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
env:
|
||||
portalsvc: tip-wlan-wlan-portal-service:9051
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui-graphql.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui-graphql.wlan.local
|
||||
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
env:
|
||||
graphql: https://wlan-ui-graphql.wlan.local
|
||||
service:
|
||||
type: NodePort
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui.wlan.local
|
||||
|
||||
wlan-portal-service:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: microk8s-hostpath
|
||||
filestoreSize: 1Gi
|
||||
service:
|
||||
type: LoadBalancer
|
||||
annotations:
|
||||
metallb.universe.tf/allow-shared-ip: default
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-prov-service:
|
||||
enabled: true
|
||||
creds:
|
||||
enabled: true
|
||||
db:
|
||||
postgresUser:
|
||||
password: postgres
|
||||
tipUser:
|
||||
password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
postgres:
|
||||
singleDataSourceUsername: tip_user
|
||||
singleDataSourcePassword: tip_password
|
||||
singleDataSourceSslKeyPassword: mypassword
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-ssc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
cassandra:
|
||||
tip_user: tip_user
|
||||
tip_password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-spc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
service:
|
||||
type: LoadBalancer
|
||||
annotations:
|
||||
metallb.universe.tf/allow-shared-ip: default
|
||||
config:
|
||||
externalStatusAddress: api.wlan.local
|
||||
defaultTLS:
|
||||
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZXakNDQTBLZ0F3SUJBZ0lVUU5hUC9zcHZSSHRCVEFLd1lSTndieFJmRkFzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hURWJNQmtHQTFVRUF3d1NkMnhoYmkxMWFTNTNiR0Z1TG14dlkyRnNNQjRYRFRJd01EZ3lOekl3TWpZMQpObG9YRFRNd01EZ3lOVEl3TWpZMU5sb3dIVEViTUJrR0ExVUVBd3dTZDJ4aGJpMTFhUzUzYkdGdUxteHZZMkZzCk1JSUNJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBZzhBTUlJQ0NnS0NBZ0VBd1JhZ2lEV3pDTllCdFd3QmNLK2YKVGtrUW1NdCtRQWdUallyMEtTOERQSkNKZjZLa1BmWkhDdTN3NEx2cnh6WTlObWllaDJYVTgzNGFtZEp4SXVDdwo2SWJObzZ6c2tqc3lmb084d0ZEbWxMVldMZWc1SDlHOWRvZW0rV1RlS1BhRUhpM29xdXpOZ3Q2d0xzM212dk9BClR2aVRJb2M4OEVMams0ZFNSMlQ0ZGhoMHFLQ0NqK0hkWEJBNlYvOWJpcnUralYrL2t4RVF1TDJ6TTM5RHZWZDgKOWtzMzV6TVZVemUzNmxENElDT25sN2hnYVROQmk0NU85c2RMRDBZYVVtamlGd1FsdEpVZG1QS3BhQWRidmpVTwpuc3VwbkRZam0rVW0rOWFFcHFNNHRlMjNlZkM4TjhqMXVrZXh6SnJFMkdlRi9XQi9ZMUxGSUcyd2pxVm5zUGNzCm5GRjRZZDlFQlJSbmUxRVplWEJ1M0ZFTEZ5NmxDT0hJMTQ2b0JjYy9JYjYxN3JkVEtYcXh0di8yTkw2L1RxRmsKbnMvRUVqdmU2a1FZemxCWndXSFdwWndRZmczbW82TmFvRlpwVGFnOThNeXU1clpvT29mVGN4WEg2cExtNVB4MQpPQXpnTG5hOU8rMkZtQTRGanJnSGNNWTFOSXp5blpMK0RIOGZpYnQxRi92MkYyTUErUjl2bzg0dlI1Uk9HTmRECnZhMkFwZXZrTGNqUWcvTHdzWHYwZ1RvcFEvWEl6ZWpoNmJkVWtPcktTd0p6VDJDOS9lOUdRbjBncHBWOExCdUsKMXpRSG9ST0xuQTQxTUNGdlFMUUhvK1h0OEtHdytVYmFseTZoT3hCWkY1MUwvQmJxamtESDlBRUZhSkxwdGlFeQpxbjFFNXYrM3doZ0ZTNUlaVDhJVzV1VUNBd0VBQWFPQmtUQ0JqakFkQmdOVkhRNEVGZ1FVeTJiQVV5TlBYSFM5CjNWVFNEK3dvTjd0M3E4RXdId1lEVlIwakJCZ3dGb0FVeTJiQVV5TlBYSFM5M1ZUU0Qrd29ON3QzcThFd0R3WUQKVlIwVEFRSC9CQVV3QXdFQi96QTdCZ05WSFJFRU5EQXlnaHAzYkdGdUxYVnBMV2R5WVhCb2NXd3VkMnhoYmk1cwpiMk5oYklJT1lYQnBMbmRzWVc0dWJHOWpZV3lIQk1Db0FBRXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnSUJBS0grCmJxSmVlMTFuMzRTWWdCRHZnb1o4bEpMUVJ3c0ZucUV4Y1NyL3BsWjdHVklHRkg1L1EyS3lvOVZ5RWlUUHdySXMKS3NFckMxZXZINnh0MVVSZk16cDA1elZRMExZTTUra3NhbVJEYWdBZzNNMWNtN29LT2Rtcy9kcXpQZTJnWmZHSgpwVmR0VlcxQ0hyTDBSTFRSOTNoN2tnU2lCbFNFSVlNb2VLZk41SDlBYXZKNEtyeXlnUXM2M2trR1E1TTllc0FwCnU2YkIzMDd6eWZ6Z1MzdG1Rc1UwMXJnSmZoRUhRL1krQWs5d0R1T2d2bWZ4MFRXZ0FPR2JLcTZUdThNS1lkZWoKSWU3clYxRzVVdjdLZmdvelZYNzZnMktkblRWQmZzcFNLbzN6eXJaa2NrekFwdlV1OUllZkhkVG9lNEpNRVUweQpmazdsRVUvZXh6Qnl5TnhwKzZoZHUvWklnM3hiMXlBMW9WWThORWQxckwxekFWaVBlMzUxU0VORUtlSnBSYW5DCmtDTDNSQUZrYnhRN0loYWNqb3g4YmVsUitnbW84Y3lGWnBqOVhhb1BsU0ZTY2R3ejU3M0NUMGg5N3Y3NkE3c3cKeUMrQ2lTcDg1Z1dFVjV2Z0JpdE5KN1I5b25qQmRzdUgybGdFdE1EM0pOT3M4Y0NTUmloWXhyaXdaU3FoVDdvLwp0Y0lsY0o4NFc1bTZYNnpISjNHbXR1S0czUVBOT21zMC9WVm9EVHA5cWRwTCtFazE3dUIyQTQxTnB4ejNVUytsCjZ5SytwZFFRajdBTHpLdVJmT3lnODBYYk53MnY0U25wSTVxYlhGQlJ1bTUyZjg2c1BlbUZxMUtjdU5XZTRFVkMKeERHM2VLbHUrZGxsVXRLeC9QTjZ5ZmxiVDV4Y0dnY2Rtcnd6UmFXUwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRUUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Nzd2dna25BZ0VBQW9JQ0FRREJGcUNJTmJNSTFnRzEKYkFGd3I1OU9TUkNZeTM1QUNCT05pdlFwTHdNOGtJbC9vcVE5OWtjSzdmRGd1K3ZITmowMmFKNkhaZFR6ZmhxWgowbkVpNExEb2hzMmpyT3lTT3pKK2c3ekFVT2FVdFZZdDZEa2YwYjEyaDZiNVpONG85b1FlTGVpcTdNMkMzckF1CnplYSs4NEJPK0pNaWh6endRdU9UaDFKSFpQaDJHSFNvb0lLUDRkMWNFRHBYLzF1S3U3Nk5YNytURVJDNHZiTXoKZjBPOVYzejJTemZuTXhWVE43ZnFVUGdnSTZlWHVHQnBNMEdMams3Mngwc1BSaHBTYU9JWEJDVzBsUjJZOHFsbwpCMXUrTlE2ZXk2bWNOaU9iNVNiNzFvU21vemkxN2JkNThMdzN5UFc2UjdITW1zVFlaNFg5WUg5alVzVWdiYkNPCnBXZXc5eXljVVhoaDMwUUZGR2Q3VVJsNWNHN2NVUXNYTHFVSTRjalhqcWdGeHo4aHZyWHV0MU1wZXJHMi8vWTAKdnI5T29XU2V6OFFTTzk3cVJCak9VRm5CWWRhbG5CQitEZWFqbzFxZ1ZtbE5xRDN3eks3bXRtZzZoOU56RmNmcQprdWJrL0hVNERPQXVkcjA3N1lXWURnV091QWR3eGpVMGpQS2RrdjRNZngrSnUzVVgrL1lYWXdENUgyK2p6aTlICmxFNFkxME85cllDbDYrUXR5TkNEOHZDeGUvU0JPaWxEOWNqTjZPSHB0MVNRNnNwTEFuTlBZTDM5NzBaQ2ZTQ20KbFh3c0c0clhOQWVoRTR1Y0RqVXdJVzlBdEFlajVlM3dvYkQ1UnRxWExxRTdFRmtYblV2OEZ1cU9RTWYwQVFWbwprdW0ySVRLcWZVVG0vN2ZDR0FWTGtobFB3aGJtNVFJREFRQUJBb0lDQUMyR2hEc1pUaWtiTERQMlR6Q2VkOVVoCmJRUlpsbDdLaUxHcXZYNm9VdjhJcFNLdTJrS3h1blpkTzVvQk5NbzNnNTg4YzRSQkFrQ1d6dmJObzFjeDJ3UTQKSkd3ZTdYaGM5TDdYbUwxUFZjNWlJdnVYOFVBTFY3eUdwMXZONklPSC9BYVJsSFlZZHl3UURVSTcwZGZiMmJqRQo2d3dORHRVbk1Ea3NncjNLbExwamNiNEFla2dxWE9MRUFMMld1Nkt1T1hOankrdUU3b2hnVWN3bWlYWXZGb3VMCm1KYXVlS3l5U202NHdJZnpZQ1JwbUhHMVlCTGpic0xJb20zcmZYRkl3V1hqMkhBSGFIOFRWOVhyUmpwR2tEZm8KbFFqN3l0R0s2ZkllMWcva0ZBN3hDWDE2d1NYMS85bjM1WGYwVmMwZ08zdE9NVHJkM1JTVVNEaVp6eVR1WWxuZwpETEdmYXZjRS82QXJ5cTlWZ3hyUXdXbnZhd0hIcWxBWUtxVHpJYkRJS0Y3SjRYTE9FckFtRE50T1I2Lzc1WjJ3CnVPQlFYT0N3NFM1dWxWdzhIZUM0NGlFTmxJYU5lNDNWTkZUTGtRM3lCeW96VVlYWTN2eEJXMWpURFpFOTB5YTUKZzk4cmFiYWhIS0lockpGYzNXYTE0RWhicUE2TVVLSXRRTkk4K1N1Rk1KV3R4VW1iM1cxK2dHbXJvTmo1TU9kYQpzdjV5OThTYS93UUc4dGc0cmdNQ0xpQVNHL3hudDB3RURrNXFDVUUxRzRSdkdOeUYxU09zNk82c1BTOTg4Umd4CnJuamQvWWZoME5xVnhHcHFGNnhpQVgvZXkyU0NGUWNybEtmNnhGREF4YjI4RTdaNnRQSUZCTWxpQ1IrbzdYR3MKZDNvUWVuMThCalM1NjdtR2ZmNkJBb0lCQVFEanFFcHZqOVhJVVB3bk1RZitRY3R0R1pXZEp2bFZSa1BSMW9maApSVWI2UHdFRkEwdVQyM011ZmFvNGI4bWIrM2Vra1BkYTZmbWJqUGFUckQrbk5YNGxyRE5oYytvcVY4aFVEQnA0CmpVcEg3OXorTVNUZVVQclpnS3VMeEdqaDJiK0FWYVZjZTI2STVYUXVoUnR6ZHFYZDlIeSs4YXpYRTltbHlPQ00KMUpEK2VHZWxhaVJMbEZBbVRDNDNoNlV5T0Q5SmZOSW1oWDQ2WDJRRlFsbGc1cWxVdWQ4Ukx3eFViZTJoYzhTWQp4VnVvYVZSSUdBSmhqRkd3ZVhnRjdzc0tQNXBZMHRkTlNvSGsxeHRnUmVJTlllZFU1cmtpKzloZTN0cStqWUdJCmxVcVVzYzNzN3c4cUk1UXk5NGdmcUI5Lzd4K3BFdGEvak9leE4yL1pGOFJGSXVucEFvSUJBUURaSUpUaUUxKzkKc2xnQ0NGVllLR3Z5aE5odkppck94enlOUWU3YjIvZmxQNzVHd0pTTWpZZTdoTmhGK3JrZHRJcXF5dWxyeGF3YgpPbWliU0FCSG5kT20ycDRMdDhaK20vQXZaRUgzVklLdWkwY0xVbTlKRXNsWURVcFIrdG5BemloNzdrS2FlVzlnCk1wdlpiUzZGdXE2ZlBZQUJyK3dXeU1IazR0UnRNZ3duUFRtSzZQTW85b3FIUURTSVJjL3N0N2hBTUwwMDdtNlEKOTJkRXRqNTNtSTBURTRISVhtY3hZbjV5NGVJLy85aEFMb2xFa0ZHWDU0SmNMdWpDWWkwQ3RIU0xDcnNmQkJwZgpDS2NaMk5sWFNiYVREU1prZWhnQWFWTlM2OVp1K1o2eGFvNmZZMjVxSnNmeXlaUkNjSzJYY0FoUDV2QWNUbWhQClNKUFJZc1dSNXZ1ZEFvSUJBRmtRRXFiWWg1TkprNHdsazNIMS9ZYWVGcmtYY1QzYU1sZ2FiS2hGdVFIWHVpZGkKNWFOZm5BMFpIb25idWV6ckVTQnhra09mKzRYT1BQMEN5eGc0UmpTb3pLVVlld2k3dE9Ta280cDhCQTVtbVhkYwpkSWNBK1ZJMEUyaW5tenlZT21JVG41Q3h2VW1UTXNPc1VWUDNtK1pjYXAwczRTaDNYSk9PSmNNU3VmTEQyaENOCm1NdDBwM0tFSlNTV1RadDdBODlWSk1YclBibktiYy9jNkNpUHRMa3Z5a1BudXhRZ3VYR0xYK05BZXA1RkxyTFIKcWNUTjUzdDUyZW5BUlBDcWQxQytrM3BxWnF6SE5xK1FSMkppNWVTQ0t2V3p2eTlHVWg5d0xyZm5aL2tLSW56SgovWTNIdzRlNDdTa3RWYjF3S0Z1MXdndklMVEJZZHNwZ2tPbFhRbGtDZ2dFQUtKYVJuazFXMldRc1ZYenZUMEtICkkxZTRDZGNOcTRmTkJ1N3JVc2drNkFMcGM5cHVLblFPaW54RDNaa0gzOGl2SDB3OUpEdFlkK0tNU1hMRk1wNEwKUWFhZVlyeGc2NndFMHljZnViZGZrbmRRdVlvWWFZV01nOXhBSjJFSU1hV1lKY3FkUXJrdW04SDZKa1BsclhQLwpUcDgxZlp0QU8rWWRjTWNDUk1OVlNFU0dyRFB0dUp1VnU4REIwVE9Uc2NHS1BOMmZrUFI5VUxZZTVOWllpUXpJCldtZU1IRU9oY0xiandsLzlaazlTUW5Vd2pkT1luUmZXNDVxVlFqa09CdkpxMHM4WHVhMlBySEkyb250SjdhcEcKNmVoTVkvMzYzS0RUeGExMmNWcFNVd0lEVlVKR0VxdmJOc1I5NVltZ3VhMWtzR01RUVlwYXIyOTJ5bTUzVmxYaQpkUUtDQVFCTUFYS0RaNVZobHBRR1VlUk1FNVhqVm1KOE1WdlZTUzV3NzBGOC9CS0ZnZFBJNnR2Nkx5UGh3OTRPCmxZVldoOXJmdUpmbWllK1ZJSWhaeSthUVNpVVhGQUtTdjZFTWJ1bXdjN1pUNEkyYitDTXQxUEhaTEZtUEN0OXAKOEorUDdoaDlRYWRBYzZqZEdSa0NMNkpMU3VoeWhMbW90SG9IS0ZJazdhNENNZGl2QnB3SVdxMWVScHd0aWRrNwpIdytrdlJ5YW5DMUJVU1dYNGxJcW1LanAyR1B2UDVVdVV2RUlPNitqaWFyWTJDTUNKb3BtcVJ2WWQzNGtSVkF1CjZueFl4a05neEFQSnVWN2tkZVVzQXg5Q1FZcFQ1blFmendtdlVGa0FraHJoTmw5dUJRUDhMdkZORFQ0cWU0bFcKUWw0cXRFZFNiZDVxVWVVdkgzOG5JMmpTVDVMawotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==
|
||||
wildcardTLS:
|
||||
cert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZEekNDQXZlZ0F3SUJBZ0lVYSthaVJZWG9QTGliSS9wdVJCdi9DZ2RTTDNzd0RRWUpLb1pJaHZjTkFRRUwKQlFBd0Z6RVZNQk1HQTFVRUF3d01LaTUzYkdGdUxteHZZMkZzTUI0WERUSXdNVEl5TVRJd05UQXpNVm9YRFRNdwpNVEl4T1RJd05UQXpNVm93RnpFVk1CTUdBMVVFQXd3TUtpNTNiR0Z1TG14dlkyRnNNSUlDSWpBTkJna3Foa2lHCjl3MEJBUUVGQUFPQ0FnOEFNSUlDQ2dLQ0FnRUEwQjhuZE1raGIzZEN6N0I2aS9kRlFWenJwaEtQK2RmY2JKN1gKMzB3MU1FWDIvM3ZvVStkRlBNbnZrS3hEdFJwanZCbnhCczF6L1VmajlyUFhDSzZkazNNYU5DajRQYVUxem9WQQppK1Z6amc0ZG5XNjJxWG1qYVYrYUMrQm5zQndyQmx2THQ0dkh1eFRLbHh1RnR4THdid3VObGJvS1lVN01kV3oyCllMY3QzUWZBWElVb2FRK1RTMUZGdVdFeEZOTFRidlBGZWNoajB4ZnhyOU5BcU9aTXI0RGE0NHMzVERxMVVyRTIKbjhaZXFxVXZ1YUE4ZmEzQjNVZVRFNmJ4OEdhN1JybG5Dakd3UDRGVml3ajdvellmanV6T2JOM2dlcjdWcEpLMQpMSjdIdDBBemZlRm9aQ2xPbUVBd1p5alFwRGZOckdNTCs1dWtIK2JxWngyaUo5UndFcDNmdlE4em5jN1Q4dHJDCmxzMjRWNUpySWhUQXlCcTZRWVNnSXdXM2V5TmVpUnQ0ZHp6Sk5rNnd4cFp0WE96WTFwamJrV2FMaEhKOW1LRWoKU3lqVVBnS3dKSVlmb3BJbTJoUzl1dVZHZDdiU1MyV055aHJSOU5LSG5Lamo5Y0IrUWU0eEh0Z1pEcm1GZ1ZpZQp0cXZBUHhJL0ZkV1pSN3RmT2JCNTR3alMxVFk3TEd6cll4TDZSMWNjZDE4WUppcGNTS05xa05ORlllZ092VkNICmFldW1OTUdVNlZZalJWS1JmQXMva2FzcWxleGpheSt0SXNtd3dDZGoxUUN3UitRa2VEZFdoNE8vQzM1NENRb28KTkxZYzRNRk8xbVVDY2NsbUgvbFBvcTd1anBCMWI2VmFBNVprNFhjVUpRc0c4SUlSMDFHTFM5RW1HVVZaeTlOWApwV2dCbXNVQ0F3RUFBYU5UTUZFd0hRWURWUjBPQkJZRUZOZlVJSGhXdnFwUzg2ZC82SnJvbmxFYzZMU1NNQjhHCkExVWRJd1FZTUJhQUZOZlVJSGhXdnFwUzg2ZC82SnJvbmxFYzZMU1NNQThHQTFVZEV3RUIvd1FGTUFNQkFmOHcKRFFZSktvWklodmNOQVFFTEJRQURnZ0lCQUtlTW9PckhLZ3BGemtkckVhMVhSSVRUOHFZanhKc04yNmFIMFovLwp1dDRXVkE4ckNDNzV4VkpaNnpBQWlBOFE2eTRYSHBzRzl2ZSs1QlJIWEdCS0lYOU5FZGNrbWdNdExzQ2xOR0JCCkxkN3lWd3hhaGVCQzhVTWIrVTAxMlNwaFc3K0t6UFJhQ3g4cHNMMUlFQUkyblQ1MzlCNDBmR2NyTktNSDRqZGkKdkxad3VxT00rZnJucFJ1MkZlK3Bja2Fwek92SEJTb0I3THovR1dmMWUwZ0llc1B4WEdmVG9hbGM1SzU5bDF1TQpCTkhpUW15S3E4TS9MbllMejhyOWp3dHNKU2lLYUljelpISjNtQ0ZUb3ljREF3NTl3WEdmWXZWcFBMaWZXTTJxCm1uSlJKM2dQS1lzOUhXWFgyYktoSmZoMjRLOTN2M1duMVRUellYOGtTbWlnRG0wTUhOSTNwZktlMmJqVW9MNmgKMlQ4bWhRbjdPQ2dvZHMvOXczR1dOdmFxYTAySHRnc0tTbk9YdmpSNXFMaVFLRjdhMi95TTlrNWNQcUdHaW1GYgppUWV3eUgvMWw2YjQ0T0s2RGwwMVltWFltNUVqR3plckp2aU90eUhSaWhtME01VmNBWWJaYkE0S1Y1eHhLZjRqClIwaktwQXdqbEpzTDdRMk9zTC9IRkxmaDV1RU1HMXlmTzF1blZkVURKK1FBZHJQUG5tZTZVTVFQZm1UcGx1WjAKS3pvOXY3NEpYV0pwQkNtaWNTbFBQdnB3cXZLTk5iOWd6b0hjOXFheWMwNWVxRldRbzNNZjIzYU82b05wU2ZuaAp5aWMvczFQcC9ZS2FHakVSQXB1UmRvYTlWT1diUncycFZMei9rZVNraS9QTDJFRFc4RUVHYjFXcUFBMkJPVVhDCi9oYXQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
|
||||
key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUpRd0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQ1Mwd2dna3BBZ0VBQW9JQ0FRRFFIeWQweVNGdmQwTFAKc0hxTDkwVkJYT3VtRW8vNTE5eHNudGZmVERVd1JmYi9lK2hUNTBVOHllK1FyRU8xR21POEdmRUd6WFA5UitQMgpzOWNJcnAyVGN4bzBLUGc5cFRYT2hVQ0w1WE9PRGgyZGJyYXBlYU5wWDVvTDRHZXdIQ3NHVzh1M2k4ZTdGTXFYCkc0VzNFdkJ2QzQyVnVncGhUc3gxYlBaZ3R5M2RCOEJjaFNocEQ1TkxVVVc1WVRFVTB0TnU4OFY1eUdQVEYvR3YKMDBDbzVreXZnTnJqaXpkTU9yVlNzVGFmeGw2cXBTKzVvRHg5cmNIZFI1TVRwdkh3WnJ0R3VXY0tNYkEvZ1ZXTApDUHVqTmgrTzdNNXMzZUI2dnRXa2tyVXNuc2UzUUROOTRXaGtLVTZZUURCbktOQ2tOODJzWXd2N202UWY1dXBuCkhhSW4xSEFTbmQrOUR6T2R6dFB5MnNLV3piaFhrbXNpRk1ESUdycEJoS0FqQmJkN0kxNkpHM2gzUE1rMlRyREcKbG0xYzdOaldtTnVSWm91RWNuMllvU05MS05RK0FyQWtoaCtpa2liYUZMMjY1VVozdHRKTFpZM0tHdEgwMG9lYwpxT1Axd0g1QjdqRWUyQmtPdVlXQldKNjJxOEEvRWo4VjFabEh1MTg1c0huakNOTFZOanNzYk90akV2cEhWeHgzClh4Z21LbHhJbzJxUTAwVmg2QTY5VUlkcDY2WTB3WlRwVmlORlVwRjhDeitScXlxVjdHTnJMNjBpeWJEQUoyUFYKQUxCSDVDUjROMWFIZzc4TGZuZ0pDaWcwdGh6Z3dVN1daUUp4eVdZZitVK2lydTZPa0hWdnBWb0RsbVRoZHhRbApDd2J3Z2hIVFVZdEwwU1laUlZuTDAxZWxhQUdheFFJREFRQUJBb0lDQVFDUVRkbXN4enl3cmUrY1ZCQlVkaW9GCjdTalRhTEY5bWFlVGhQdkhMMjc5dnJWSlpoK3I5WUp6YU16NzhnV3NUOVR4ZXNjOVlUMVlVLzJEZENUWU4wSzUKRnlrSEc1VXNJUjVTeU4vOVlDWWtURE5La3BhQ29mMmxOWTE1U0twOFdMdVlXQlBEZTE4TW41anM5ejlhdGY0Ugo4Ti9GL2szdU5KWGRvYVNmWU1Pakt4bTh6UE05RFhpaTA0SlZ6RWNjMmlXU0crSkQwNmNybWNHUm1SZVBSTWZOCk5Mb1E1ZGw4dUlRN0J2Y0tCNkJpRDlFc2t5YitPWGxmTlo2TUZNaFNXTmpuYSt3L0REN1plWkxYcVczWk45RGYKNStBbGFoNlkzVE1EUGxueXkxRk5CVzN1alZrMWdkS21ESFBEUTNDUFBNWVdEa01qdlVJcWdKRHMySVl6dWIvTwpXRjRVUTV5UEJhZzluaWp1dS9uMVZDdGZuSkxwakZIakU1VzdkK3p1UGh6aUJ1WDFOcjRtOVVJdEpaSTNsYmJtCmdvZFlMdGl4b3RwNWF3ang1eXA3MU1zUHlTZzcrbHBPenA4dStuRENJcnc0K0VSME56MG8yTXFmcmJ2VklGQXIKWHIyc2YrejljbmtxalBWWEZaVks3em1TUHI5N0YrbTV4RHpURG9lTG53aVlhUUpOQ0ZhejhMVERjNldVT2w4SQpLOWhHd3FaK0llTlgreW16em16Nkx6WWVPaGlrRmNRaUI0UXVPSjdWWnZWRmVoS3JJMXJLWHJDRU01VmpJZXBkCkhzR0c1eTlLUkcxdEszSU5ScmI0SHlhRDF6SHJSTHRneFpLT1BvWDN0UjNmbTJ1aGova3dwelZnWTltRXJDWDkKd2I4SVA5TXdRR3REQVNBcjZWVmJvUUtDQVFFQThIeWlaK1lVeFEzQnprL1hoNmZMWWNYakE3NTJrQ204VWZzWQp0d1Z4N0EyNW5YRStiUDRIT3UyVC9kTlc0Tkw1elZPT0JkaWZIcTJQVFNVMGYzQUFHL0pNcnVzM3NrNHd4azM5CitYYlh0dHltWkdxb3FEcVN3TUw2czVpY1RnangvenRhSXk1TWFKYWhUYUpNdFRQQlVpZ3U3enhoeGNwVlhNVUMKTklHcFl5Mkt5R2hyMjVVOFdlR0RYQm9SS2xYUXJXYkNZeW1kMXdYQStEaVl0dzA5eit0VHhPNTRodjFCZkJKZwpWMGd0VWdJU0I2WEZDMU9CWDZXQ1pXYlhCN2hPaHhISjNkNHAyQlZyN0gxL2JDQ0ZvVDY5by9WQVNHRmdtTHRiCnpGalRNbjFIaTluVW5jUFlScWpsN1h0NWdPOHBOa3BwMjVrNHIxRVludWhIazcrYzdRS0NBUUVBM1l3THozNloKNEVPRndvODIrUlVId2lkaFExOEdrU1JvWStKVm1udXJpSXdHZTk3ZmRTVk91d092SDlZSVhsRWpjRitoOHFQVQpJVnpIOXBuYXZjTENEMnhIOWZ5d09ML3pmYmJnYnExZjV4Y3BOUXlYM1JnTGFDUVpLNkpJa3NzOUtDb0dhSzlaCmpMVm41MjFFZlFBRE5DSi93YlRCb3dLQ0dTNDUzSzRBaWFEWHN6TkJLUk5MOHVaWWYwK0x0U2IzV3lkZVQ2eUgKdGZiSXR3NlBSS1lxb2NaeGIrM0pWQWFHcGxScjVZSlNDU1BtTjFMSjU0djlTcXBIVnJMNzJudFNwKzdDODJ6SgpJajVOSXFEOGFsOVZ3WFB5dExRd25hYWc1TW5ka0NLQ3R0MlVHSGZwMEh3ZTJTL1hkemppS2gzZTZaT3MyMSt1CitQUHVrSkUxTTZzU09RS0NBUUJjWVJRbDR6MUJRUHFjM1JESEhJN0UvVFlxWHdTK2RqblFLQ3VqU3FVcmIwNUoKQzVKV1hmSzdFVDVUTjliY3dFNlRNRENUVUZZM2U2WmJsUm9vaGdhVXRhdjlXWC9vcjU2TzNyRGNIbW5ZNWNQSgpPU3VXakFHSnFKeVRWdUZjSEpXUlhPUlFOVjNHbzI1Tkd6WnFPUHBmSys1em1mZFkrbE4yTW51WlhlR0twcGowClNTQjlsa003cDZSRlFnSXNDQkVFTzBBYXhZYkxiWHRtSHArVFdiUFA1ZThrN0JKQ2tKQ1NMNkR3aGxwYWNVOHAKdnVVRlo4dC95VjFneEhOL2xLNGR0cGliOE5hVUdnNStKdXRHeHV0dU9HS3kwK2dncGI5c2pEUkVPQzdRNjAwTApqTjdleDdlUjFSbVY4Mk9HUXRqSzhTVGU1V25mOXNBRmN1YmorNncxQW9JQkFHYXM4Z2hQRHpkOWM2OXd1alNFCkI1MTJyTUFSZVRTcEgrd3l5Q09aYnkwUVlDem1aTCtnODdUK2h4b0ZFc25MWnRZOHJBeU0ydEkvY3JrYUl1TlIKTUtqL01QYVREb1N1aVVWWkRQaWVSMVVOU2Q2NUlHU3FNUmNwcTdTcU9HSTM2UGNGU3dVWFJ6Uk1Hb1NLQW5UQQpIYnY2eFNUY0JlWHJVcW9pMzFRa0hFR3NsbXNKdFFnNVZqaVRncTQyQ25TQlE2QXVSYW85Tm9RaGhISTZRREc3CnBRUm11TW43OVJPSkZyeGRZY2Z6TnR2ZmxHRk5jQjlzcEk0SERwcml4cEJDR1ZPVTl5cmozdStNMmlqVFhVaGIKT0o0NGcySTJKRlhjRkxNVHp5aHVwZy9qN3kvTDIwUHhVa2Fyd25zUmxOZWFFbVpFTjVkUDZBS2U0cENEaTVtUApqaGtDZ2dFQkFMUmtVeG9oZDh2ZVBwR3hPbWlOak5HekpiTDlscGx0TWxhR0dPQ3JOUkZSeEppblgzWU9UVnhiCkRFVlpqaXRHNldydzFxaDdnZXAzeEdJaWZHQ1lZV3pNc0RZTitueGtwV0lRRmZOV3dYemNRWlhrTEduZVlUdTAKSVU2RjY5Myt1Q0tkcHVCdVl0d3BQNEJCVkNCRTVON0FzRGV4bFBYTzk1cEw3ZzR4OG5RckdNeGJlRXVOdytaTwpPYmYvTnFFMGZZcURkaERiVHI0UDR6bUpBRlpYeDlKMjNJdWRMUFI3MDZITGZ5bDMrb1pUS2Y2ZWdEL1drWXZGCllLdEtDZzI1UmtSYmZBakZkeDlpOVkzcDlPNEFNVUNaRVFIOWQwU1d6LzJWR0VmYzVha09YL2xvWlAyUXF3c2UKeXMyc0k1U0Z5TEd1ZGM3R2MzVTd5UGd0RVN0elVoWT0KLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo=
|
||||
|
||||
kafka:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: microk8s-hostpath
|
||||
|
||||
cassandra:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: microk8s-hostpath
|
||||
|
||||
postgresql:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: microk8s-hostpath
|
||||
@@ -1,21 +1,46 @@
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# This override file should be used when you don't have any
|
||||
# storage solution for creating persistent volumes.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
|
||||
nsPrefix: tip
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
integratedDeployment: false
|
||||
testingEnabled: false
|
||||
testingTimestamp:
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
externalhost:
|
||||
@@ -25,25 +50,20 @@ opensync-gw-cloud:
|
||||
persistence:
|
||||
enabled: true
|
||||
filestore:
|
||||
url: https://tip-wlan-opensync-gw-cloud:9096
|
||||
|
||||
url: "https://tip-wlan-opensync-gw-cloud:9096"
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: false
|
||||
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
|
||||
wlan-portal-service:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
|
||||
enabled: true
|
||||
wlan-prov-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -56,7 +76,6 @@ wlan-prov-service:
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
|
||||
wlan-ssc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -69,14 +88,12 @@ wlan-ssc-service:
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
|
||||
wlan-spc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
|
||||
wlan-port-forwarding-gateway-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -84,7 +101,6 @@ wlan-port-forwarding-gateway-service:
|
||||
externallyVisible:
|
||||
host: api.wlan.demo.lab.wlan.tip.build
|
||||
port: 30401
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
@@ -93,21 +109,28 @@ nginx-ingress-controller:
|
||||
}
|
||||
config:
|
||||
externalStatusAddress: "192.168.56.101"
|
||||
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: false
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: ""
|
||||
|
||||
enabled: false
|
||||
cassandra:
|
||||
enabled: true
|
||||
cluster:
|
||||
replicaCount: 3
|
||||
seedCount: 2
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: ""
|
||||
|
||||
enabled: false
|
||||
postgresql:
|
||||
enabled: true
|
||||
persistence:
|
||||
postgresqlPassword: postgres
|
||||
replication:
|
||||
enabled: true
|
||||
storageClass: ""
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: false
|
||||
@@ -1,40 +1,48 @@
|
||||
shared:
|
||||
scalability: &jvm-options
|
||||
tip_wlan_ovsdb_listener_threadPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
JVM_MEM_OPTIONS: " "
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
|
||||
# This is a development override file.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# It can be tweaked, based on the need to support different
|
||||
# dev environments.
|
||||
# This file expects to have a GlusterFS storage solution running
|
||||
# before "helm install" is performed.
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
|
||||
nsPrefix: tip
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
integratedDeployment: false
|
||||
testingEnabled: false
|
||||
testingTimestamp:
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
# Annotations for namespace
|
||||
annotations: {
|
||||
"helm.sh/resource-policy": keep
|
||||
}
|
||||
|
||||
createReleaseNamespace: false
|
||||
|
||||
# Docker registry secret
|
||||
dockerRegistrySecret: ewoJImF1dGhzIjogewoJCSJ0aXAtdGlwLXdsYW4tY2xvdWQtZG9ja2VyLXJlcG8uamZyb2cuaW8iOiB7CgkJCSJhdXRoIjogImRHbHdMWEpsWVdRNmRHbHdMWEpsWVdRPSIKCQl9Cgl9LAoJIkh0dHBIZWFkZXJzIjogewoJCSJVc2VyLUFnZW50IjogIkRvY2tlci1DbGllbnQvMTkuMDMuOCAobGludXgpIgoJfQp9
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
|
||||
opensync-gw-cloud:
|
||||
enabled: true
|
||||
externalhost:
|
||||
@@ -45,57 +53,21 @@ opensync-gw-cloud:
|
||||
enabled: true
|
||||
filestore:
|
||||
url: "https://tip-wlan-opensync-gw-cloud:9096"
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
opensync-mqtt-broker:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
|
||||
storageClass: "glusterfs-storage"
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: true
|
||||
env:
|
||||
portalsvc: tip-wlan-wlan-portal-service:9051
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui-graphql.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui-graphql.wlan.local
|
||||
secretName: nginx-ingress-controller-default-server-secret
|
||||
|
||||
wlan-cloud-static-portal:
|
||||
enabled: true
|
||||
env:
|
||||
graphql: https://wlan-ui-graphql.wlan.local
|
||||
service:
|
||||
type: NodePort
|
||||
ingress:
|
||||
hosts:
|
||||
- host: wlan-ui.wlan.local
|
||||
paths: [
|
||||
/
|
||||
]
|
||||
tls:
|
||||
- hosts:
|
||||
- wlan-ui.wlan.local
|
||||
secretName: nginx-ingress-controller-default-server-secret
|
||||
|
||||
wlan-portal-service:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
filestoreSize: 1Gi
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
storageClass: "glusterfs-storage"
|
||||
wlan-prov-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -112,9 +84,6 @@ wlan-prov-service:
|
||||
singleDataSourceUsername: tip_user
|
||||
singleDataSourcePassword: tip_password
|
||||
singleDataSourceSslKeyPassword: mypassword
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-ssc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -123,22 +92,16 @@ wlan-ssc-service:
|
||||
sslTruststorePassword: mypassword
|
||||
cassandra:
|
||||
tip_user: tip_user
|
||||
tip_password: tip_password
|
||||
tip_password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-spc-service:
|
||||
enabled: true
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
scalability:
|
||||
<<: *jvm-options
|
||||
|
||||
wlan-port-forwarding-gateway-service:
|
||||
enabled: true
|
||||
creds:
|
||||
@@ -146,7 +109,6 @@ wlan-port-forwarding-gateway-service:
|
||||
externallyVisible:
|
||||
host: api.wlan.demo.lab.wlan.tip.build
|
||||
port: 30401
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
controller:
|
||||
@@ -155,21 +117,52 @@ nginx-ingress-controller:
|
||||
}
|
||||
config:
|
||||
externalStatusAddress: "192.168.56.101"
|
||||
|
||||
zookeeper:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "glusterfs-storage"
|
||||
kafka:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
|
||||
storageClass: "glusterfs-storage"
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
sslKeyPassword: mypassword
|
||||
cassandra:
|
||||
enabled: true
|
||||
cluster:
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful when we use HostPath as storage mechanism
|
||||
## For Hostpath storage, recommendation is cluster_size: 3 and seed_size: 2
|
||||
replicaCount: 3
|
||||
seedCount: 2
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
|
||||
storageClass: "glusterfs-storage"
|
||||
creds:
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
postgresql:
|
||||
enabled: true
|
||||
postgresqlPassword: postgres
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful:
|
||||
## a. When we use HostPath as storage mechanism
|
||||
## b. If master goes down and one of the slave is promoted as master
|
||||
replication:
|
||||
enabled: true
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
storageClass: "glusterfs-storage"
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 30
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 30
|
||||
@@ -1,41 +1,146 @@
|
||||
# This override file is useful to test one or more subcharts.
|
||||
# It overrides the default Tip-Wlan parent chart behaviour
|
||||
#
|
||||
# Example use to enable a single subchart (from tip-wlan/ directory):
|
||||
#
|
||||
# helm install opensync-wifi-controller tip-wlan/ -n default
|
||||
# -f tip-wlan/resources/environments/disable-allcharts.yaml
|
||||
# --set opensync-gw-static.enabled=true
|
||||
#
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
nsPrefix: tip
|
||||
# image pull policy
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
pullPolicy: Always
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# override default mount path root directory
|
||||
# referenced by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
integratedDeployment: false
|
||||
testingEnabled:
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: true
|
||||
|
||||
createReleaseNamespace: false
|
||||
|
||||
createDockerRegistrySecret: false
|
||||
|
||||
#################################################################
|
||||
# Enable/disable and configure helm charts (ie. applications)
|
||||
# to customize the TIP-WLAN deployment.
|
||||
#################################################################
|
||||
opensync-gw-static:
|
||||
enabled: false
|
||||
opensync-gw-cloud:
|
||||
enabled: false
|
||||
externalhost:
|
||||
address:
|
||||
ovsdb: tip-wlan-opensync-gw-cloud
|
||||
mqtt: tip-wlan-opensync-mqtt-broker
|
||||
persistence:
|
||||
enabled: true
|
||||
filestore:
|
||||
url: "https://tip-wlan-opensync-gw-cloud:9096"
|
||||
opensync-mqtt-broker:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "glusterfs-storage"
|
||||
wlan-cloud-graphql-gw:
|
||||
enabled: false
|
||||
wlan-cloud-static-portal:
|
||||
enabled: false
|
||||
wlan-portal-service:
|
||||
enabled: false
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "glusterfs-storage"
|
||||
wlan-prov-service:
|
||||
enabled: false
|
||||
creds:
|
||||
enabled: true
|
||||
db:
|
||||
postgresUser:
|
||||
password: postgres
|
||||
tipUser:
|
||||
password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
wlan-ssc-service:
|
||||
enabled: false
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
cassandra:
|
||||
tip_user: tip_user
|
||||
tip_password: tip_password
|
||||
schema_repo:
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
wlan-spc-service:
|
||||
enabled: false
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
wlan-port-forwarding-gateway-service:
|
||||
enabled: false
|
||||
|
||||
creds:
|
||||
websocketSessionTokenEncKey: MyToKeN0MyToKeN1
|
||||
nginx-ingress-controller:
|
||||
enabled: false
|
||||
|
||||
controller:
|
||||
nodeSelector: {
|
||||
type: master
|
||||
}
|
||||
config:
|
||||
externalStatusAddress: "192.168.56.101"
|
||||
zookeeper:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "glusterfs-storage"
|
||||
kafka:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "glusterfs-storage"
|
||||
cassandra:
|
||||
enabled: false
|
||||
cluster:
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful when we use HostPath as storage mechanism
|
||||
## For Hostpath storage, recommendation is cluster_size: 3 and seed_size: 2
|
||||
replicaCount: 3
|
||||
seedCount: 2
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "glusterfs-storage"
|
||||
postgresql:
|
||||
enabled: false
|
||||
postgresqlPassword: postgres
|
||||
## NOTE: If we are using glusterfs as Storage class, we don't really need
|
||||
## replication turned on, since the data is anyway replicated on glusterfs nodes
|
||||
## Replication is useful:
|
||||
## a. When we use HostPath as storage mechanism
|
||||
## b. If master goes down and one of the slave is promoted as master
|
||||
replication:
|
||||
enabled: true
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: "glusterfs-storage"
|
||||
@@ -1,23 +0,0 @@
|
||||
global:
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
|
||||
kafka:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: REPLACEME
|
||||
|
||||
cassandra:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: REPLACEME
|
||||
|
||||
postgresql:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
existingClaim: data-tip-wlan-postgresql-master-0
|
||||
@@ -2,14 +2,14 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: tip-common-cassandra-certs
|
||||
name: {{ .Release.Namespace }}-common-cassandra-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
truststore: {{ .Files.Get "resources/certs/truststore.jks" | b64enc }}
|
||||
truststore-password: {{ .Values.global.creds.sslTruststorePassword | b64enc }}
|
||||
truststore-password: {{ "mypassword" | b64enc }}
|
||||
keystore: {{ .Files.Get "resources/certs/cassandra_server_keystore.jks" | b64enc }}
|
||||
keystore-password: {{ .Values.global.creds.sslKeystorePassword | b64enc }}
|
||||
keystore-password: {{ "mypassword" | b64enc }}
|
||||
cassandraservercert.pem: {{ .Files.Get "resources/certs/cassandraservercert.pem" | b64enc }}
|
||||
cassandraserverkey_dec.pem: {{ .Files.Get "resources/certs/cassandraserverkey_dec.pem" | b64enc }}
|
||||
|
||||
@@ -17,7 +17,7 @@ data:
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: tip-common-cassandra-client-certs
|
||||
name: {{ .Release.Namespace }}-common-cassandra-client-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
|
||||
@@ -1,21 +1,15 @@
|
||||
{{- define "imagePullSecret" }}
|
||||
{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.global.repository.registry (printf "%s:%s" .Values.global.repository.username .Values.global.repository.password | b64enc) | b64enc }}
|
||||
{{- end }}
|
||||
|
||||
{{- if (.Values.createDockerRegistrySecret | default false) -}}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
metadata:
|
||||
name: {{ .Release.Namespace }}-docker-registry-key
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ include "common.namespace" . }}-docker-registry-key
|
||||
namespace: {{ include "common.namespace" . }}
|
||||
labels:
|
||||
app: {{ include "common.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
|
||||
release: {{ .Release.Name }}
|
||||
heritage: {{ .Release.Service }}
|
||||
data:
|
||||
.dockerconfigjson: {{ template "imagePullSecret" . }}
|
||||
.dockerconfigjson: {{ .Values.dockerRegistrySecret }}
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
{{- end}}
|
||||
@@ -1,97 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: tip-common-kafka-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
kafka-0.keystore.jks: {{ .Files.Get "resources/certs/kafka-server.pkcs12" | b64enc }}
|
||||
kafka.truststore.jks: {{ .Files.Get "resources/certs/truststore.jks" | b64enc }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: tip-common-kafka-client-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
client_keystore.jks: {{ .Files.Get "resources/certs/client_keystore.jks" | b64enc }}
|
||||
kafka-server.pkcs12: {{ .Files.Get "resources/certs/kafka-server.pkcs12" | b64enc }}
|
||||
truststore.jks: {{ .Files.Get "resources/certs/truststore.jks" | b64enc }}
|
||||
server.pkcs12: {{ .Files.Get "resources/certs/server.pkcs12" | b64enc }}
|
||||
|
||||
|
||||
{{ if .Values.kafka.initJobConfig.topics }}
|
||||
{{- $zk := printf "%s-zookeeper-headless" .Release.Name -}}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-kafka-config-{{ .Release.Revision }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
backoffLimit: 10
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
initContainers:
|
||||
- name: depends-on
|
||||
image: {{ .Values.kafka.initJobConfig.image }}
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
until kafka-configs --zookeeper {{ $zk }} --entity-type topics --describe || (( count++ >= 6 ))
|
||||
do
|
||||
echo "Waiting for Zookeeper..."
|
||||
sleep 20
|
||||
done
|
||||
until nc -z {{ $.Release.Name }}-kafka {{ .Values.kafka.initJobConfig.port }} || (( retries++ >= 6 ))
|
||||
do
|
||||
echo "Waiting for Kafka..."
|
||||
sleep 20
|
||||
done
|
||||
containers:
|
||||
- name: kafka-config
|
||||
image: confluentinc/cp-kafka:5.0.1
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |
|
||||
set -e
|
||||
{{- range $n, $topic := .Values.kafka.initJobConfig.topics }}
|
||||
{{- if and $topic.partitions $topic.replicationFactor $topic.reassignPartitions }}
|
||||
cat << EOF > {{ $topic.name }}-increase-replication-factor.json
|
||||
{"version":1, "partitions":[
|
||||
{{- $partitions := (int $topic.partitions) }}
|
||||
{{- $replicas := (int $topic.replicationFactor) }}
|
||||
{{- range $i := until $partitions }}
|
||||
{"topic":"{{ $topic.name }}","partition":{{ $i }},"replicas":[{{- range $j := until $replicas }}{{ $j }}{{- if ne $j (sub $replicas 1) }},{{- end }}{{- end }}]}{{- if ne $i (sub $partitions 1) }},{{- end }}
|
||||
{{- end }}
|
||||
]}
|
||||
EOF
|
||||
kafka-reassign-partitions --zookeeper {{ $zk }} --reassignment-json-file {{ $topic.name }}-increase-replication-factor.json --execute
|
||||
kafka-reassign-partitions --zookeeper {{ $zk }} --reassignment-json-file {{ $topic.name }}-increase-replication-factor.json --verify
|
||||
{{- else if and $topic.partitions $topic.replicationFactor }}
|
||||
kafka-topics --zookeeper {{ $zk }} --create --if-not-exists --force --topic {{ $topic.name }} --partitions {{ $topic.partitions }} --replication-factor {{ $topic.replicationFactor }}
|
||||
{{- else if $topic.partitions }}
|
||||
kafka-topics --zookeeper {{ $zk }} --alter --force --topic {{ $topic.name }} --partitions {{ $topic.partitions }} || true
|
||||
{{- end }}
|
||||
{{- if $topic.defaultConfig }}
|
||||
kafka-configs --zookeeper {{ $zk }} --entity-type topics --entity-name {{ $topic.name }} --alter --force --delete-config {{ nospace $topic.defaultConfig }} || true
|
||||
{{- end }}
|
||||
{{- if $topic.config }}
|
||||
kafka-configs --zookeeper {{ $zk }} --entity-type topics --entity-name {{ $topic.name }} --alter --force --add-config {{ nospace $topic.config }}
|
||||
{{- end }}
|
||||
kafka-configs --zookeeper {{ $zk }} --entity-type topics --entity-name {{ $topic.name }} --describe
|
||||
{{- if $topic.acls }}
|
||||
{{- range $a, $acl := $topic.acls }}
|
||||
{{ if and $acl.user $acl.operations }}
|
||||
kafka-acls --authorizer-properties zookeeper.connect={{ $zk }} --force --add --allow-principal User:{{ $acl.user }}{{- range $operation := $acl.operations }} --operation {{ $operation }} {{- end }} --topic {{ $topic.name }} {{ $topic.extraParams }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- end -}}
|
||||
@@ -2,7 +2,7 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: tip-common-postgres-scripts
|
||||
name: {{ .Release.Namespace }}-common-postgres-scripts
|
||||
namespace: {{ .Release.Namespace }}
|
||||
data:
|
||||
{{ tpl (.Files.Glob "resources/scripts/creation-replication-user-role.sh").AsConfig . | indent 2 }}
|
||||
@@ -11,7 +11,7 @@ data:
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: tip-common-postgres-certs
|
||||
name: {{ .Release.Namespace }}-common-postgres-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
@@ -25,7 +25,7 @@ data:
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: tip-common-postgres-client-certs
|
||||
name: {{ .Release.Namespace }}-common-postgres-client-certs
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
# Default values for Tip-Wlan
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
@@ -8,6 +12,8 @@ global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
# namespace to deploy the release
|
||||
nsPrefix: tip
|
||||
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# image pull policy
|
||||
@@ -25,7 +31,10 @@ global:
|
||||
|
||||
# Integrated Deployment which deploys Prov Service, Portal Service and
|
||||
# SSC Service in a single docker image
|
||||
integratedDeployment: false
|
||||
integratedDeployment: false
|
||||
|
||||
# Namespace related
|
||||
createReleaseNamespace: true
|
||||
|
||||
# DockerRegistry Secret
|
||||
createDockerRegistrySecret: true
|
||||
@@ -57,7 +66,8 @@ wlan-integrated-cloud-component-service:
|
||||
enabled: true
|
||||
nginx-ingress-controller:
|
||||
enabled: true
|
||||
|
||||
zookeeper:
|
||||
enabled: true
|
||||
kafka:
|
||||
enabled: true
|
||||
cassandra:
|
||||
|
||||
@@ -1,49 +1,32 @@
|
||||
shared:
|
||||
scalability: &jvm-options
|
||||
#how many concurrent connections single instance of OpenSyncGateway can accept
|
||||
tip_wlan_ovsdb_listener_threadPoolSize: 50
|
||||
#asynchronous task executor - monitor metrics and adjust if tasks start being rejected
|
||||
tip_wlan_AsyncExecutor_CorePoolSize: 10
|
||||
tip_wlan_AsyncExecutor_MaxPoolSize: 50
|
||||
tip_wlan_AsyncExecutor_QueueCapacity: 50
|
||||
#max total number of persistent connections in the http client pool
|
||||
tip_wlan_httpClientConfig_maxConnectionsTotal: 100
|
||||
#max number of persistent connections in the http client pool per destination
|
||||
tip_wlan_httpClientConfig_maxConnectionsPerRoute: 10
|
||||
#max number of concurrent REST API calls a single instance of this service can process
|
||||
tip_wlan_maxHttpThreads: 100
|
||||
#memory tuning parameters for the JVM - max size, initialsize, garbage collection tuning options, etc.
|
||||
JVM_MEM_OPTIONS: " "
|
||||
#max number of connections to PostgreSQL database
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
#max number of idle connections to PostgreSQL database
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
#max number of cached prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
#max number of cached idle prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
#max number of connections to PostgreSQL database
|
||||
singleDataSource_maxTotalConnections: 8
|
||||
#max number of idle connections to PostgreSQL database
|
||||
singleDataSource_maxIdleConnections: 8
|
||||
#max number of cached prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxPreparedStatements: 200
|
||||
#max number of cached idle prepared statements used in PostgreSQL database
|
||||
singleDataSource_maxIdlePreparedStatements: 200
|
||||
# Default values for Tip-Wlan
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
#################################################################
|
||||
# Global configuration overrides.
|
||||
#
|
||||
# These overrides will affect all helm charts (ie. applications)
|
||||
# that are listed below and are 'enabled'.
|
||||
#################################################################
|
||||
global:
|
||||
# Change to an unused port prefix range to prevent port conflicts
|
||||
# with other instances running within the same k8s cluster
|
||||
nodePortPrefix: 302
|
||||
nodePortPrefixExt: 304
|
||||
# namespace to deploy the release
|
||||
nsPrefix: tip
|
||||
|
||||
repository:
|
||||
registry: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
username: tip-read
|
||||
password: tip-read
|
||||
repository: tip-tip-wlan-cloud-docker-repo.jfrog.io
|
||||
# image pull policy
|
||||
pullPolicy: Always
|
||||
|
||||
# default mount path root directory referenced
|
||||
# by persistent volumes and log files
|
||||
persistence:
|
||||
|
||||
# override default resource limit flavor for all charts
|
||||
flavor: unlimited
|
||||
|
||||
# flag to enable debugging - application support required
|
||||
debugEnabled: false
|
||||
|
||||
@@ -52,6 +35,7 @@ global:
|
||||
integratedDeployment: false
|
||||
|
||||
testingEnabled: false
|
||||
|
||||
testingTimestamp:
|
||||
|
||||
# DockerRegistry Secret
|
||||
@@ -86,144 +70,13 @@ wlan-port-forwarding-gateway-service:
|
||||
enabled: false
|
||||
wlan-integrated-cloud-component-service:
|
||||
enabled: false
|
||||
|
||||
nginx-ingress-controller:
|
||||
enabled: false
|
||||
|
||||
kafka:
|
||||
initJobConfig:
|
||||
image: confluentinc/cp-kafka:5.0.1
|
||||
port: 9092
|
||||
topics:
|
||||
- name: wlan_service_metrics
|
||||
partitions: 1
|
||||
replicationFactor: 1
|
||||
- name: system_events
|
||||
partitions: 1
|
||||
replicationFactor: 1
|
||||
- name: customer_events
|
||||
partitions: 1
|
||||
replicationFactor: 1
|
||||
creds:
|
||||
sslKeyPassword: mypassword
|
||||
sslKeystorePassword: mypassword
|
||||
sslTruststorePassword: mypassword
|
||||
zookeeper:
|
||||
enabled: false
|
||||
kafka:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
image:
|
||||
debug: true
|
||||
auth:
|
||||
clientProtocol: mtls
|
||||
interBrokerProtocol: tls
|
||||
jksSecret: tip-common-kafka-certs
|
||||
jksPassword: mypassword
|
||||
tlsEndpointIdentificationAlgorithm: ""
|
||||
jaas:
|
||||
clientUsers:
|
||||
- brokerUser
|
||||
clientPassword:
|
||||
- brokerPassword
|
||||
extraEnvVars:
|
||||
- name: KAFKA_CFG_SSL_KEYSTORE_TYPE
|
||||
value: PKCS12
|
||||
allowPlaintextListener: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
metrics:
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
namespace: monitoring
|
||||
selector:
|
||||
release: prometheus-operator
|
||||
zookeeper:
|
||||
enabled: true
|
||||
persistence:
|
||||
enabled: true
|
||||
|
||||
cassandra:
|
||||
enabled: false
|
||||
tlsEncryptionSecretName: tip-common-cassandra-certs
|
||||
image:
|
||||
debug: true
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
replicaCount: 1
|
||||
cluster:
|
||||
name: TipWlanCluster
|
||||
seedCount: 1
|
||||
internodeEncryption: all
|
||||
clientEncryption: true
|
||||
exporter:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
additionalLabels:
|
||||
release: prometheus-operator
|
||||
dbUser:
|
||||
user: cassandra
|
||||
password: cassandra
|
||||
resources:
|
||||
limits: {}
|
||||
requests:
|
||||
cpu: 1
|
||||
memory: 3Gi
|
||||
|
||||
postgresql:
|
||||
enabled: false
|
||||
postgresqlDatabase: tip
|
||||
image:
|
||||
debug: true
|
||||
metrics:
|
||||
enabled: false
|
||||
serviceMonitor:
|
||||
enabled: falsea
|
||||
namespace: monitoring
|
||||
additionalLabels:
|
||||
release: prometheus-operator
|
||||
postgresqlUsername: postgres
|
||||
postgresqlPassword: postgres
|
||||
pgHbaConfiguration: |
|
||||
hostssl replication repl_user 0.0.0.0/0 md5 clientcert=0
|
||||
hostssl postgres postgres 0.0.0.0/0 cert clientcert=1
|
||||
hostssl postgres postgres ::/0 cert clientcert=1
|
||||
hostssl all all 0.0.0.0/0 md5 clientcert=1
|
||||
replication:
|
||||
enabled: true
|
||||
user: repl_user
|
||||
password: repl_password
|
||||
slaveReplicas: 1
|
||||
persistence:
|
||||
enabled: true
|
||||
storageClass: glusterfs-storage
|
||||
volumePermissions:
|
||||
enabled: true
|
||||
livenessProbe:
|
||||
enabled: false
|
||||
readinessProbe:
|
||||
enabled: false
|
||||
tls:
|
||||
enabled: true
|
||||
certificatesSecret: tip-common-postgres-certs
|
||||
certFilename: cert.crt
|
||||
certKeyFilename: cert.key
|
||||
certCAFilename: cacert.pem
|
||||
initdbScriptsConfigMap: tip-common-postgres-scripts
|
||||
extraEnv:
|
||||
- name: PGSSLCERT
|
||||
value: /opt/tip-wlan/certs/postgresclientcert.pem
|
||||
- name: PGSSLKEY
|
||||
value: /opt/tip-wlan/certs/postgresclientkey_dec.pem
|
||||
- name: PGSSLROOTCERT
|
||||
value: /opt/tip-wlan/certs/cacert.pem
|
||||
primary:
|
||||
extraInitContainers:
|
||||
- command: [ "sh", "-c", "chmod 0600 /opt/bitnami/postgresql/certs/postgresclientkey_dec.pem" ]
|
||||
image: busybox:latest
|
||||
name: chmod-client-cert-additional
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
volumeMounts:
|
||||
- mountPath: /opt/bitnami/postgresql/certs
|
||||
name: postgresql-certificates
|
||||
enabled: false
|
||||
Reference in New Issue
Block a user