Files
wlan-toolsmith/helmfile/cloud-sdk/helmfile.yaml
2021-05-28 12:24:04 +03:00

586 lines
17 KiB
YAML

repositories:
- name: stable
url: https://charts.helm.sh/stable
- name: kokuwa
url: https://kokuwaio.github.io/helm-charts
- name: nginx
url: https://kubernetes.github.io/ingress-nginx
- name: eks
url: https://aws.github.io/eks-charts
- name: elastic
url: https://helm.elastic.co
- name: kubernetes-dashboard
url: https://kubernetes.github.io/dashboard/
- name: bitnami
url: https://charts.bitnami.com/bitnami
- name: influxdata
url: https://helm.influxdata.com
environments:
azure:
values:
- monitoring:
namespace: monitoring
- domain: tip.4c74356b41.com
- storageClass: default
- autoscaler:
enabled: true
- ingress:
enabled: true
- elastic:
enabled: true
- kibana:
enabled: true
- prometheus:
enabled: true
- external-dns:
enabled: true
amazon-cicd:
secrets:
- secrets/influxdb.yaml
values:
- eks:
clusterName: tip-wlan-main
region: us-east-2
accountID: 289708231103
hostedZoneId: cicd
certificateARN: arn:aws:acm:us-east-2:289708231103:certificate/bfa89c7a-5b64-4a8a-bcfe-ffec655b5285
- monitoring:
namespace: monitoring
nolaNamespaces: ['nola-01', 'nola-02', 'nola-04', 'nola-05', 'nola-15', 'nola-ext-01', 'nola-ext-02', 'nola-ext-03', 'nola-ext-04', 'nola-ext-05', 'nola-qualitest']
- domain: lab.wlan.tip.build
- storageClass: gp2
- autoscaler:
enabled: true
- ingress:
enabled: false
- elastic:
enabled: true
- kibana:
enabled: true
- logstash:
enabled: true
- prometheus:
enabled: true
- k8s-dashboard:
enabled: true
- metrics-server:
enabled: true
- external-dns:
enabled: true
- alb-ingress:
enabled: true
- node-termination-handler:
enabled: true
- influxdb:
enabled: true
helmDefaults:
force: false
timeout: 300
# dont seem to work
# wait: false
# recreatePods: true
# verify: true
templates:
default: &default
namespace: kube-system
missingFileHandler: Warn
cluster-autoscaler: &cluster-autoscaler
values:
- envs/common/cluster-autoscaler.yaml.gotmpl
external-dns: &external-dns
values:
- envs/common/external-dns.yaml.gotmpl
# core setup
releases:
- name: cluster-autoscaler
condition: autoscaler.enabled
<<: *default
<<: *cluster-autoscaler
chart: stable/cluster-autoscaler
version: 7.3.2
labels:
role: setup
group: system
app: autoscaler
- name: external-dns
condition: external-dns.enabled
<<: *default
<<: *external-dns
chart: stable/external-dns
version: 2.20.4
labels:
role: setup
group: system
app: external-dns
- name: nginx-ingress
condition: ingress.enabled
<<: *default
chart: nginx/ingress-nginx
version: 3.4.0
labels:
role: setup
group: system
app: ingress
values:
- controller:
ingressClass: nginx-sso
service:
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: {{ .Environment.Values.eks.certificateARN }}
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https
service.beta.kubernetes.io/aws-load-balancer-type: elb
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp
targetPorts:
http: http
https: http
publishService:
enabled: true
metrics:
enabled: true
serviceMonitor:
enabled: true
additionalLabels:
release: prometheus-operator
- defaultBackend:
enabled: true
image:
repository: 4c74356b41/custom-backend
tag: latest
# monitoring
- name: prometheus-operator
condition: prometheus.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: stable/prometheus-operator
labels:
role: setup
group: monitoring
app: prometheus-operator
values:
- prometheusOperator:
manageCrds: true
createCustomResource: false
- prometheus:
enabled: true
prometheusSpec:
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: gp2
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth"
nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri"
kubernetes.io/ingress.class: nginx-sso
hosts:
- prometheus.{{ .Environment.Values.domain }}
- grafana:
grafana.ini:
users:
viewers_can_edit: true
auth:
disable_login_form: true
disable_signout_menu: true
auth.anonymous:
enabled: true
org_role: Viewer
testFramework:
enabled: false
ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth"
nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri"
kubernetes.io/ingress.class: nginx-sso
hosts:
- grafana.{{ .Environment.Values.domain }}
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- name: 'default'
orgId: 1
folder: imported
type: file
disableDeletion: false
editable: true
options:
path: /var/lib/grafana/dashboards/default
dashboards:
default:
mosquitto:
gnetId: 11542
revision: 1
datasource: Prometheus
nginx-ingress:
gnetId: 9614
revision: 1
datasource: Prometheus
postgres:
gnetId: 6742
revision: 1
datasource: Prometheus
# still need to finalize the dashboards below but they are working partially
cassandra:
# gnetId: 5408
gnetId: 6258
revision: 3
datasource: Prometheus
kafka:
gnetId: 7589
revision: 5
# gnetId: 10555
# revision: 1
datasource: Prometheus
jvm-metrics-per-pod:
datasource: Prometheus
json: |
{{- readFile "grafana-dashboards/jvm-metrics-per-pod.json" | nindent 14 }}
cloud-controller-perf:
datasource: Prometheus
json: |
{{- readFile "grafana-dashboards/cloud-controller-perf.json" | nindent 14 }}
pod-events:
datasource: Prometheus
json: |
{{- readFile "grafana-dashboards/pod-events.json" | nindent 14 }}
datasources:
datasources.yaml:
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus-operated:9090
access: proxy
isDefault: false
- name: prometheus-operator-helper
condition: prometheus.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: charts/standalone-monitoring
labels:
role: setup
group: monitoring
app: prometheus-operator
sub: helper
values:
- monitoring:
namespace: {{ .Environment.Values.monitoring.namespace }}
domain: {{ .Environment.Values.domain }}
proxy:
namespace: kube-system
- name: prometheus-operator-ingress-auth
condition: prometheus.enabled
namespace: kube-system
chart: charts/sso
labels:
role: setup
group: monitoring
app: prometheus-operator
sub: oAuth
values:
- charts/sso/values.local.yaml # temporary solution to workaround secrets in plain text in the repo
- monitoring:
namespace: {{ .Environment.Values.monitoring.namespace }}
- name: fluentd
condition: elastic.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: kokuwa/fluentd-elasticsearch
labels:
role: setup
group: monitoring
app: fluentd
values:
- elasticsearch:
serviceAccount:
create: true
awsSigningSidecar:
enabled: false
hosts:
- elasticsearch-client.{{ .Environment.Values.monitoring.namespace }}.svc.cluster.local
- name: elasticsearch
condition: elastic.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: stable/elasticsearch
labels:
role: setup
group: monitoring
app: elasticsearch
values:
- data:
persistence:
size: 650Gi
- name: elasticsearch-curator
condition: elastic.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: stable/elasticsearch-curator
labels:
role: setup
group: monitoring
app: elasticsearch
values:
- configMaps:
config_yml: |-
client:
hosts:
- http://elasticsearch-client.{{ .Environment.Values.monitoring.namespace }}.svc.cluster.local:9200
action_file_yml: |-
actions:
1:
action: delete_indices
description: "Delete old indices"
options:
ignore_empty_list: True
continue_if_exception: True
timeout_override: 300
filters:
- filtertype: pattern
kind: prefix
value: 'logstash-'
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 30
- name: kibana
condition: kibana.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: stable/kibana
labels:
role: setup
group: monitoring
app: kibana
values:
- testFramework:
enabled: false
- image:
tag: "6.8.6"
- files:
kibana.yml:
elasticsearch.hosts: http://elasticsearch-client.{{ .Environment.Values.monitoring.namespace }}.svc.cluster.local:9200
- ingress:
enabled: true
annotations:
nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth"
nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri"
kubernetes.io/ingress.class: nginx-sso
hosts:
- kibana.{{ .Environment.Values.domain }}
- lifecycle:
postStart:
exec:
command:
- bash
- -c
- |
#!/bin/bash
# Config the index_pattern
TEMPLATE_NAME="logstash"
INDEX_PATTERN="logstash-*"
KIBANA_URL=http://localhost:5601
# Wait until service is ready
while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $KIBANA_URL/app/kibana)" != "200" ]]; do sleep 1; done
# Apply default Index Pattern into Kibana
curl -X POST -v $KIBANA_URL/api/saved_objects/index-pattern/$TEMPLATE_NAME \
-H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
-d '{"attributes": {"title": "'$INDEX_PATTERN'"}}'
- dashboardImport:
enabled: true
timeout: 60
basePath: ""
dashboards:
k8s-container-logs: |
{{- readFile "kibana-dashboards/k8s-container-logs.json" | nindent 10 }}
- name: logstash
condition: logstash.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: elastic/logstash
labels:
role: setup
group: monitoring
app: logstash
values:
- image: docker.elastic.co/logstash/logstash-oss
- imageTag: 7.12.0
- replicas: 2
- logstashConfig:
logstash.yml: |
http.host: 0.0.0.0
log.level: warn
- logstashPipeline:
logstash.conf: "" # override default pipeline
syslog.conf: |
input {
syslog {
port => 5514
}
}
output {
elasticsearch {
hosts => "http://elasticsearch-client.{{ .Environment.Values.monitoring.namespace }}.svc.cluster.local:9200"
index => "logstash-%{+YYYY.MM.dd}"
}
}
- service:
type: LoadBalancer
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb-ip"
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
# waiting for new aws-load-balancer-controller supporting this annotation to be able to set static IP addresses
# service.beta.kubernetes.io/aws-load-balancer-private-ipv4-addresses: "10.10.10.40,10.10.11.40,10.10.12.40"
ports:
- name: syslog
port: 514
targetPort: 5514
protocol: TCP
- name: k8s-dashboard-roles
condition: k8s-dashboard.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: charts/k8s-dashboard-roles
labels:
role: setup
group: monitoring
app: k8s-dashboard
- name: k8s-dashboard
condition: k8s-dashboard.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: kubernetes-dashboard/kubernetes-dashboard
labels:
role: setup
group: monitoring
app: k8s-dashboard
values:
- settings:
defaultNamespace: {{ index .Environment.Values.monitoring.nolaNamespaces 0 }}
namespaceFallbackList: {{ .Environment.Values.monitoring.nolaNamespaces | toJson }}
itemsPerPage: 25
clusterName: TIP WLAN CI/CD
- extraArgs:
- --enable-skip-login
- --system-banner=Welcome to the TIP WLAN CI/CD Kubernetes cluster. If you are missing your namespace in the above select box, please <a href="https://telecominfraproject.atlassian.net/browse/WIFI">create a ticket</a>.
- rbac:
create: false
clusterRoleMetrics: true
clusterReadOnlyRole: false
- service:
type: NodePort
externalPort: 80
- protocolHttp: true
- ingress:
enabled: true
paths:
- /
- /*
annotations:
#alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_302"}}'
#alb.ingress.kubernetes.io/group.name: wlan-cicd
#alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]'
#alb.ingress.kubernetes.io/scheme: internet-facing
#kubernetes.io/ingress.class: alb
nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth"
nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri"
kubernetes.io/ingress.class: nginx-sso
hosts:
- k8s-dashboard.{{ .Environment.Values.domain }}
- name: metrics-server
condition: metrics-server.enabled
namespace: {{ .Environment.Values.monitoring.namespace }}
chart: bitnami/metrics-server
labels:
role: setup
group: monitoring
app: metrics-server
values:
- apiService:
create: true
- name: aws-load-balancer-controller
<<: *default
condition: alb-ingress.enabled
chart: eks/aws-load-balancer-controller
version: 1.1.5
values:
- serviceAccount:
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::{{ .Values.eks.accountID }}:role/{{ .Values.eks.clusterName }}-alb-ingress
clusterName: {{ .Values.eks.clusterName }}
enableShield: false
enableWaf: false
enableWafv2: false
logLevel: info
- name: aws-node-termination-handler
<<: *default
condition: node-termination-handler.enabled
chart: eks/aws-node-termination-handler
version: 0.13.2
labels:
role: setup
group: system
app: node-termination-handler
values:
- deleteLocalData: true
- podTerminationGracePeriod: -1 # use values defined in Pod
- name: influxdb
namespace: test-bss
chart: influxdata/influxdb2
version: 2.0.1
condition: influxdb.enabled
labels:
role: setup
group: load-testing
app: influxdb
task: tools-133
values:
- image:
tag: 2.0.6-alpine
- adminUser:
organization: tip
bucket: tip-cicd
user: tip
password: {{ .Environment.Values.influxdb.adminUser.password }}
token: {{ .Environment.Values.influxdb.adminUser.token }}
retention_policy: "0s"
- persistence:
storageClass: gp2
size: 10Gi
- service:
type: NodePort
- ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/group.name: test-bss-load-testing
alb.ingress.kubernetes.io/certificate-arn: {{ .Environment.Values.eks.certificateARN }}
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS": 443}]'
alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_302"}}'
alb.ingress.kubernetes.io/healthcheck-path: /health
external-dns.alpha.kubernetes.io/hostname: influx.cicd.{{ .Environment.Values.domain }}
hostname: influx.cicd.{{ .Environment.Values.domain }}
path: "/*"