General consistency cleanup

* Made values.yaml consistent throughout charts.  Removed any globals
references in subcharts as these are difficult to override.  Only
ports should be in globals to build URLs which can come as part
of a future commit. The hostname endpoint aspect of a service
will come from openstack-base/_hosts.tpl and the port
would come from the chart itself as a global so other charts
can reference the port to build a complete URL.  Putting the
hostnames themselves as globals in individual charts makes it
difficult to make a sweeping top level FQDN change.

* Cleaned up yaml requirements and incorporated a new _common.tpl
that is distributed to all charts to allow common endpoint naming
while still retaining the ability to install individual charts.

* Fixed keystone URL generation during bootstrap as a correct
URL is critical given keystone uses this to construct all
subsequent URLs in the request. Also allow controlling the
default endpoint version and scheme.

* Added missing NAMESPACE declaration to keystone deployment
as this is required for entrypoint to discover resources
not in the 'default' namespace.

* Refactored all nodeSelector values to be consistent throughout
all charts
This commit is contained in:
Alan Meadows
2016-11-25 16:27:18 -08:00
parent 564f9757fc
commit 7929c94c21
59 changed files with 329 additions and 204 deletions

1
openstack-base/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
secrets/*

View File

@@ -0,0 +1,27 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
bin/
etc/
patches/
*.py
Makefile

4
openstack-base/Chart.yaml Executable file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
description: A base chart for all openstack charts
name: openstack-base
version: 0.1.0

7
openstack-base/Makefile Normal file
View File

@@ -0,0 +1,7 @@
EXCLUDE := templates/* charts/* Chart.yaml requirement* values.yaml Makefile utils/* openstack-base/Chart.yaml
FILES := $(shell find * -type f $(foreach e,$(EXCLUDE), -not -path "$(e)") )
templates/_common.tpl: Makefile $(FILES)
echo Generating $(CURDIR)/$@
rm -f $@
for i in $(FILES); do printf '{{ define "'$$i'" }}' >> $@; cat $$i >> $@; printf "{{ end }}\n" >> $@; done

View File

@@ -0,0 +1,3 @@
dependencies: []
digest: sha256:81059fe6210ccee4e3349c0f34c12d180f995150128a913d63b65b7937c6b152
generated: 2016-11-25T16:25:49.376763578-08:00

View File

@@ -0,0 +1 @@
dependencies: []

View File

@@ -0,0 +1,15 @@
# fqdn
{{define "region"}}cluster{{end}}
{{define "tld"}}local{{end}}
# infrastructure services
{{define "rabbitmq_host"}}rabbitmq.{{.Release.Namespace}}.svc.{{ include "region" . }}.{{ include "tld" . }}{{end}}
{{define "memcached_host"}}memcached.{{.Release.Namespace}}.svc.{{ include "region" . }}.{{ include "tld" . }}{{end}}
{{define "mariadb_host"}}mariadb.{{.Release.Namespace}}.svc.kubernetes.{{ include "region" . }}.{{ include "tld" . }}{{end}}
# keystone
{{define "keystone_db_host"}} {{ include "mariadb_host" . }}{{end}}
{{define "keystone_api_endpoint_host_admin"}}keystone-api.{{.Release.Namespace}}.svc.{{ include "region" . }}.{{ include "tld" . }}{{end}}
{{define "keystone_api_endpoint_host_internal"}}keystone-api.{{.Release.Namespace}}.svc.{{ include "region" . }}.{{ include "tld" . }}{{end}}
{{define "keystone_api_endpoint_host_public"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{end}}
{{define "keystone_api_endpoint_host_admin_ext"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{end}}

View File

@@ -0,0 +1,65 @@
# Ceph Kubernetes Secret Generation
This script will generate ceph keyrings and configs as Kubernetes secrets.
Sigil is required for template handling and must be installed in system PATH. Instructions can be found here: <https://github.com/gliderlabs/sigil>
The following functions are provided:
## Generate raw FSID (can be used for other functions)
```bash
./generate_secrets.sh fsid
```
## Generate raw ceph.conf (For verification)
```bash
./generate_secrets.sh ceph-conf-raw <fsid> "overridekey=value"
```
Take a look at `ceph/ceph.conf.tmpl` for the default values
## Generate encoded ceph.conf secret
```bash
./generate_secrets.sh ceph-conf <fsid> "overridekey=value"
```
## Generate encoded admin keyring secret
```bash
./generate_secrets.sh admin-keyring
```
## Generate encoded mon keyring secret
```bash
./generate_secrets.sh mon-keyring
```
## Generate a combined secret
Contains ceph.conf, admin keyring and mon keyring. Useful for generating the `/etc/ceph` directory
```bash
./generate_secrets.sh combined-conf
```
## Generate encoded boostrap keyring secret
```bash
./generate_secrets.sh bootstrap-keyring <osd|mds|rgw>
```
# Kubernetes workflow
```bash
./generator/generate_secrets.sh all `./generate_secrets.sh fsid`
kubectl create secret generic ceph-conf-combined --from-file=ceph.conf --from-file=ceph.client.admin.keyring --from-file=ceph.mon.keyring --namespace=ceph
kubectl create secret generic ceph-bootstrap-rgw-keyring --from-file=ceph.keyring=ceph.rgw.keyring --namespace=ceph
kubectl create secret generic ceph-bootstrap-mds-keyring --from-file=ceph.keyring=ceph.mds.keyring --namespace=ceph
kubectl create secret generic ceph-bootstrap-osd-keyring --from-file=ceph.keyring=ceph.osd.keyring --namespace=ceph
kubectl create secret generic ceph-client-key --from-file=ceph-client-key --namespace=ceph
```

View File

@@ -0,0 +1,15 @@
#!/bin/python
import os
import struct
import time
import base64
key = os.urandom(16)
header = struct.pack(
'<hiih',
1, # le16 type: CEPH_CRYPTO_AES
int(time.time()), # le32 created: seconds
0, # le32 created: nanoseconds,
len(key), # le16: len(key)
)
print(base64.b64encode(header + key).decode('ascii'))

View File

@@ -0,0 +1,82 @@
#!/bin/bash
gen-fsid() {
echo "$(uuidgen)"
}
gen-ceph-conf-raw() {
fsid=${1:?}
shift
conf=$(sigil -p -f templates/ceph/ceph.conf.tmpl "fsid=${fsid}" $@)
echo "${conf}"
}
gen-ceph-conf() {
fsid=${1:?}
shift
conf=$(sigil -p -f templates/ceph/ceph.conf.tmpl "fsid=${fsid}" $@)
echo "${conf}"
}
gen-admin-keyring() {
key=$(python ceph-key.py)
keyring=$(sigil -f templates/ceph/admin.keyring.tmpl "key=${key}")
echo "${keyring}"
}
gen-mon-keyring() {
key=$(python ceph-key.py)
keyring=$(sigil -f templates/ceph/mon.keyring.tmpl "key=${key}")
echo "${keyring}"
}
gen-combined-conf() {
fsid=${1:?}
shift
conf=$(sigil -p -f templates/ceph/ceph.conf.tmpl "fsid=${fsid}" $@)
echo "${conf}" > ../../secrets/ceph.conf
key=$(python ceph-key.py)
keyring=$(sigil -f templates/ceph/admin.keyring.tmpl "key=${key}")
echo "${key}" > ../../secrets/ceph-client-key
echo "${keyring}" > ../../secrets/ceph.client.admin.keyring
key=$(python ceph-key.py)
keyring=$(sigil -f templates/ceph/mon.keyring.tmpl "key=${key}")
echo "${keyring}" > ../../secrets/ceph.mon.keyring
}
gen-bootstrap-keyring() {
service="${1:-osd}"
key=$(python ceph-key.py)
bootstrap=$(sigil -f templates/ceph/bootstrap.keyring.tmpl "key=${key}" "service=${service}")
echo "${bootstrap}"
}
gen-all-bootstrap-keyrings() {
gen-bootstrap-keyring osd > ../../secrets/ceph.osd.keyring
gen-bootstrap-keyring mds > ../../secrets/ceph.mds.keyring
gen-bootstrap-keyring rgw > ../../secrets/ceph.rgw.keyring
}
gen-all() {
gen-combined-conf $@
gen-all-bootstrap-keyrings
}
main() {
set -eo pipefail
case "$1" in
fsid) shift; gen-fsid $@;;
ceph-conf-raw) shift; gen-ceph-conf-raw $@;;
ceph-conf) shift; gen-ceph-conf $@;;
admin-keyring) shift; gen-admin-keyring $@;;
mon-keyring) shift; gen-mon-keyring $@;;
bootstrap-keyring) shift; gen-bootstrap-keyring $@;;
combined-conf) shift; gen-combined-conf $@;;
all) shift; gen-all $@;;
esac
}
main "$@"

View File

@@ -0,0 +1,6 @@
[client.admin]
key = {{ $key }}
auid = 0
caps mds = "allow"
caps mon = "allow *"
caps osd = "allow *"

View File

@@ -0,0 +1,3 @@
[client.bootstrap-{{ $service }}]
key = {{ $key }}
caps mon = "allow profile bootstrap-{{ $service }}"

View File

@@ -0,0 +1,71 @@
[global]
fsid = ${fsid:?}
cephx = ${auth_cephx:-"true"}
cephx_require_signatures = ${auth_cephx_require_signatures:-"false"}
cephx_cluster_require_signatures = ${auth_cephx_cluster_require_signatures:-"true"}
cephx_service_require_signatures = ${auth_cephx_service_require_signatures:-"false"}
# auth
max_open_files = ${global_max_open_files:-"131072"}
osd_pool_default_pg_num = ${global_osd_pool_default_pg_num:-"128"}
osd_pool_default_pgp_num = ${global_osd_pool_default_pgp_num:-"128"}
osd_pool_default_size = ${global_osd_pool_default_size:-"3"}
osd_pool_default_min_size = ${global_osd_pool_default_min_size:-"1"}
mon_osd_full_ratio = ${global_mon_osd_full_ratio:-".95"}
mon_osd_nearfull_ratio = ${global_mon_osd_nearfull_ratio:-".85"}
mon_host = ${global_mon_host:-'ceph-mon'}
[mon]
mon_osd_down_out_interval = ${mon_mon_osd_down_out_interval:-"600"}
mon_osd_min_down_reporters = ${mon_mon_osd_min_down_reporters:-"4"}
mon_clock_drift_allowed = ${mon_mon_clock_drift_allowed:-".15"}
mon_clock_drift_warn_backoff = ${mon_mon_clock_drift_warn_backoff:-"30"}
mon_osd_report_timeout = ${mon_mon_osd_report_timeout:-"300"}
[osd]
journal_size = ${osd_journal_size:-"100"}
cluster_network = ${osd_cluster_network:-'10.244.0.0/16'}
public_network = ${osd_public_network:-'10.244.0.0/16'}
osd_mkfs_type = ${osd_osd_mkfs_type:-"xfs"}
osd_mkfs_options_xfs = ${osd_osd_mkfs_options_xfs:-"-f -i size=2048"}
osd_mon_heartbeat_interval = ${osd_osd_mon_heartbeat_interval:-"30"}
osd_max_object_name_len = ${osd_max_object_name_len:-"256"}
#crush
osd_pool_default_crush_rule = ${osd_pool_default_crush_rule:-"0"}
osd_crush_update_on_start = ${osd_osd_crush_update_on_start:-"true"}
#backend
osd_objectstore = ${osd_osd_objectstore:-"filestore"}
#performance tuning
filestore_merge_threshold = ${osd_filestore_merge_threshold:-"40"}
filestore_split_multiple = ${osd_filestore_split_multiple:-"8"}
osd_op_threads = ${osd_osd_op_threads:-"8"}
filestore_op_threads = ${osd_filestore_op_threads:-"8"}
filestore_max_sync_interval = ${osd_filestore_max_sync_interval:-"5"}
osd_max_scrubs = ${osd_osd_max_scrubs:-"1"}
#recovery tuning
osd_recovery_max_active = ${osd_osd_recovery_max_active:-"5"}
osd_max_backfills = ${osd_osd_max_backfills:-"2"}
osd_recovery_op_priority = ${osd_osd_recovery_op_priority:-"2"}
osd_client_op_priority = ${osd_osd_client_op_priority:-"63"}
osd_recovery_max_chunk = ${osd_osd_recovery_max_chunk:-"1048576"}
osd_recovery_threads = ${osd_osd_recovery_threads:-"1"}
#ports
ms_bind_port_min = ${osd_ms_bind_port_min:-"6800"}
ms_bind_port_max = ${osd_ms_bind_port_max:-"7100"}
[client]
rbd_cache_enabled = ${client_rbd_cache_enabled:-"true"}
rbd_cache_writethrough_until_flush = ${client_rbd_cache_writethrough_until_flush:-"true"}
rbd_default_features = ${client_rbd_default_features:-"1"}
[mds]
mds_cache_size = ${mds_mds_cache_size:-"100000"}

View File

@@ -0,0 +1,3 @@
[mon.]
key = {{ $key }}
caps mon = "allow *"

View File

@@ -0,0 +1,26 @@
apiVersion: v1
kind: Pod
metadata:
name: ceph-rbd-test
spec:
containers:
- name: cephrbd-rw
image: busybox
command:
- sh
- -c
- while true; do sleep 1; done
volumeMounts:
- mountPath: "/mnt/cephrbd"
name: cephrbd
volumes:
- name: cephrbd
rbd:
monitors:
#This only works if you have skyDNS resolveable from the kubernetes node. Otherwise you must manually put in one or more mon pod ips.
- ceph-mon.ceph:6789
user: admin
image: ceph-rbd-test
pool: rbd
secretRef:
name: ceph-client-key

View File

@@ -0,0 +1,8 @@
# Default values for utils.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
global:
region: cluster
tld: local