diff --git a/Makefile b/Makefile index 1dfa20f5..2697615c 100644 --- a/Makefile +++ b/Makefile @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -.PHONY: ceph bootstrap mariadb etcd postgresql keystone memcached rabbitmq helm-toolkit openstack neutron nova cinder heat maas all clean +.PHONY: ceph bootstrap mariadb etcd postgresql keystone memcached rabbitmq helm-toolkit neutron nova cinder heat maas all clean B64_DIRS := helm-toolkit/secrets B64_EXCLUDE := $(wildcard helm-toolkit/secrets/*.b64) -CHARTS := ceph mariadb etcd postgresql rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack +CHARTS := ceph mariadb etcd postgresql rabbitmq memcached keystone glance horizon neutron nova cinder heat maas TOOLKIT_TPL := helm-toolkit/templates/_globals.tpl -all: helm-toolkit ceph bootstrap mariadb etcd postgresql rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack +all: helm-toolkit ceph bootstrap mariadb etcd postgresql rabbitmq memcached keystone glance horizon neutron nova cinder heat maas helm-toolkit: build-helm-toolkit @@ -55,8 +55,6 @@ maas: build-maas memcached: build-memcached -openstack: build-openstack - clean: $(shell rm -rf helm-toolkit/secrets/*.b64) $(shell rm -rf */templates/_partials.tpl) diff --git a/cinder/values.yaml b/cinder/values.yaml index 22e65603..9cbb4a81 100644 --- a/cinder/values.yaml +++ b/cinder/values.yaml @@ -27,7 +27,7 @@ labels: node_selector_value: enabled images: - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton diff --git a/docs/developer/minikube.md b/docs/developer/minikube.md index e62852d7..f1e4fb96 100644 --- a/docs/developer/minikube.md +++ b/docs/developer/minikube.md @@ -16,9 +16,9 @@ Install a recent version of [Kubernetes/Helm](https://github.com/kubernetes/helm Helm Installation Quickstart: ``` -$ curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh -$ chmod 700 get_helm.sh -$ ./get_helm.sh +curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh +chmod 700 get_helm.sh +./get_helm.sh ``` # TLDR; @@ -32,8 +32,9 @@ git clone https://github.com/att-comdev/openstack-helm.git && cd openstack-helm # Get a list of the current tags: git tag -l -# Checkout the tag you want to work with (if desired, or use master for development): -git checkout 0.1.0 +# Checkout the tag you want to work with (use master for development): +# For stability and testing, checkout the latest stable branch. +git checkout 0.2.0 # Start a local Helm Server: helm serve & @@ -64,6 +65,7 @@ kubectl label nodes openstack-control-plane=enabled --all --namespace=openstack # Deploy each chart: helm install --name mariadb --set development.enabled=true local/mariadb --namespace=openstack helm install --name=memcached local/memcached --namespace=openstack +helm install --name=etcd-rabbitmq local/etcd --namespace=openstack helm install --name=rabbitmq local/rabbitmq --namespace=openstack helm install --name=keystone local/keystone --namespace=openstack helm install --name=cinder local/cinder --namespace=openstack @@ -79,7 +81,7 @@ helm install --name=horizon local/horizon --namespace=openstack After installation, start Minikube with the flags listed below. Ensure that you have supplied enough disk, memory, and the current version flag for Kubernetes during `minikube start`. More information can be found [HERE](https://github.com/kubernetes/minikube/blob/master/docs/minikube_start.md). ``` -$ minikube start \ +minikube start \ --network-plugin=cni \ --kubernetes-version v1.5.1 \ --disk-size 40g \ @@ -89,71 +91,80 @@ $ minikube start \ Next, deploy the [Calico](http://docs.projectcalico.org/master/getting-started/kubernetes/installation/hosted/hosted) manifest. This is not a requirement in cases where you want to use your own CNI-enabled SDN, however you are doing so at your own experience. Note which versions of Calico are recommended for the project in our [Installation Guide](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md#overview). ``` -$ kubectl create -f http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/calico.yaml +kubectl create -f http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/calico.yaml ``` Wait for the environment to come up without error (like shown below). ``` -$ kubectl get pods -o wide --all-namespaces -w -NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE -kube-system calico-node-r9b9s 2/2 Running 0 3m 192.168.99.100 minikube -kube-system calico-policy-controller-2974666449-hm0zr 1/1 Running 0 3m 192.168.99.100 minikube -kube-system configure-calico-r6lnw 0/1 Completed 0 3m 192.168.99.100 minikube -kube-system kube-addon-manager-minikube 1/1 Running 0 7m 192.168.99.100 minikube -kube-system kube-dns-v20-sh5gp 3/3 Running 0 7m 192.168.120.64 minikube -kube-system kubernetes-dashboard-m24s8 1/1 Running 0 7m 192.168.120.65 minikube +kubectl get pods -o wide --all-namespaces -w + +# NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE +# kube-system calico-node-r9b9s 2/2 Running 0 3m 192.168.99.100 minikube +# kube-system calico-policy-controller-2974666449-hm0zr 1/1 Running 0 3m 192.168.99.100 minikube +# kube-system configure-calico-r6lnw 0/1 Completed 0 3m 192.168.99.100 minikube +# kube-system kube-addon-manager-minikube 1/1 Running 0 7m 192.168.99.100 minikube +# kube-system kube-dns-v20-sh5gp 3/3 Running 0 7m 192.168.120.64 minikube +# kube-system kubernetes-dashboard-m24s8 1/1 Running 0 7m 192.168.120.65 minikube ``` Next, initialize [Helm](https://github.com/kubernetes/helm/blob/master/docs/install.md#easy-in-cluster-installation) (which includes deploying tiller). ``` -$ helm init -Creating /Users/admin/.helm -Creating /Users/admin/.helm/repository -Creating /Users/admin/.helm/repository/cache -Creating /Users/admin/.helm/repository/local -Creating /Users/admin/.helm/plugins -Creating /Users/admin/.helm/starters -Creating /Users/admin/.helm/repository/repositories.yaml -Creating /Users/admin/.helm/repository/local/index.yaml -$HELM_HOME has been configured at $HOME/.helm. +helm init -Tiller (the helm server side component) has been installed into your Kubernetes Cluster. -Happy Helming! +# Creating /Users/admin/.helm +# Creating /Users/admin/.helm/repository +# Creating /Users/admin/.helm/repository/cache +# Creating /Users/admin/.helm/repository/local +# Creating /Users/admin/.helm/plugins +# Creating /Users/admin/.helm/starters +# Creating /Users/admin/.helm/repository/repositories.yaml +# Creating /Users/admin/.helm/repository/local/index.yaml +# $HELM_HOME has been configured at $HOME/.helm. -$ kubectl get pods -o wide --all-namespaces | grep tiller -kube-system tiller-deploy-3299276078-n98ct 1/1 Running 0 39s 192.168.120.66 minikube +# Tiller (the helm server side component) has been installed into your Kubernetes Cluster. +# Happy Helming! +``` + +Ensure that Tiller is deployed successfully: + +``` +kubectl get pods -o wide --all-namespaces | grep tiller + +# kube-system tiller-deploy-3299276078-n98ct 1/1 Running 0 39s 192.168.120.66 minikube ``` With Helm installed, you will need to start a local [Helm server](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_serve.md#helm-serve) (in the background), and point to a locally configured Helm [repository](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_repo_index.md#helm-repo-index): ``` -$ helm serve & -$ helm repo add local http://localhost:8879/charts -"local" has been added to your repositories +helm serve & +helm repo add local http://localhost:8879/charts + +# "local" has been added to your repositories ``` Verify that the local repository is configured correctly: ``` -$ helm repo list -NAME URL -stable https://kubernetes-charts.storage.googleapis.com/ -local http://localhost:8879/charts +helm repo list + +# NAME URL +# stable https://kubernetes-charts.storage.googleapis.com/ +# local http://localhost:8879/charts ``` Download the latest release of the project, preferably from `master` since you are following the "developer" instructions. ``` -$ git clone https://github.com/att-comdev/openstack-helm.git +git clone https://github.com/att-comdev/openstack-helm.git ``` Run `make` against the newly cloned project, which will automatically build secrets for the deployment and push the charts to your new local Helm repository: ``` -$ cd openstack-helm -$ make +cd openstack-helm +make ``` Perfect! You’re ready to install, develop, deploy, destroy, and repeat (when necessary)! @@ -181,7 +192,7 @@ To deploy Openstack-Helm in development mode, ensure you've created a minikube-a As a result of this guidence, we recommend creating the following for MariaDB like shown below. ``` -$ sudo mkdir -p /data/openstack-helm/mariadb +sudo mkdir -p /data/openstack-helm/mariadb ``` ### Label Minikube Node @@ -189,7 +200,7 @@ $ sudo mkdir -p /data/openstack-helm/mariadb Be sure to label your minikube node according to the documentation in our installation guide (this remains exactly the same). ``` -$ kubectl label nodes openstack-control-plane=enabled --all --namespace=openstack +kubectl label nodes openstack-control-plane=enabled --all --namespace=openstack ``` ***NOTE:*** *You do not need to label your minikube cluster for `ceph-storage`, since development mode uses hostPath.* @@ -200,7 +211,7 @@ $ kubectl label nodes openstack-control-plane=enabled --all --namespace=openstac Now you can deploy the MariaDB chart, which is required by all other child charts. ``` -$ helm install --name mariadb --set development.enabled=true local/mariadb --namespace=openstack +helm install --name mariadb --set development.enabled=true local/mariadb --namespace=openstack ``` ***IMPORTANT:*** *MariaDB seeding tasks run for quite a while. This is expected behavior, as several checks are completed prior to completion. Please wait for a few minutes for these jobs to finish.* @@ -210,15 +221,16 @@ $ helm install --name mariadb --set development.enabled=true local/mariadb --nam Once MariaDB is deployed complete, deploy the other charts as needed. ``` -$ helm install --name=memcached local/memcached --namespace=openstack -$ helm install --name=rabbitmq local/rabbitmq --namespace=openstack -$ helm install --name=keystone local/keystone --namespace=openstack -$ helm install --name=horizon local/horizon --namespace=openstack -$ helm install --name=cinder local/cinder --namespace=openstack -$ helm install --name=glance local/glance --namespace=openstack -$ helm install --name=nova local/nova --namespace=openstack -$ helm install --name=neutron local/neutron --namespace=openstack -$ helm install --name=heat local/heat --namespace=openstack +helm install --name=memcached local/memcached --namespace=openstack +helm install --name=etcd-rabbitmq local/etcd --namespace=openstack +helm install --name=rabbitmq local/rabbitmq --namespace=openstack +helm install --name=keystone local/keystone --namespace=openstack +helm install --name=horizon local/horizon --namespace=openstack +helm install --name=cinder local/cinder --namespace=openstack +helm install --name=glance local/glance --namespace=openstack +helm install --name=nova local/nova --namespace=openstack +helm install --name=neutron local/neutron --namespace=openstack +helm install --name=heat local/heat --namespace=openstack ``` # Horizon Management @@ -226,7 +238,7 @@ $ helm install --name=heat local/heat --namespace=openstack After each chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually. ``` -$ sudo kubectl edit svc horizon -n openstack +sudo kubectl edit svc horizon -n openstack ``` With the deployed manifest in edit mode, you can enable `nodePort` by replicating some of the fields below (specifically, the `nodePort` lines). @@ -267,5 +279,3 @@ If you have any questions, comments, or find any bugs, please submit an issue so # Troubleshooting * [Openstack-Helm Minikube Troubleshooting](../troubleshooting/ts-minikube.md) - - diff --git a/docs/helm_overrides.md b/docs/helm_overrides.md index 1e8eb8b9..81cfcb62 100644 --- a/docs/helm_overrides.md +++ b/docs/helm_overrides.md @@ -139,7 +139,7 @@ An illustrative example of an `images:` section taken from the heat chart: ``` images: - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton db_sync: docker.io/kolla/ubuntu-source-heat-api:3.0.1 ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton diff --git a/docs/installation/getting-started.md b/docs/installation/getting-started.md index 36fa7b5c..fc175a2c 100644 --- a/docs/installation/getting-started.md +++ b/docs/installation/getting-started.md @@ -6,10 +6,10 @@ In order to drive towards a production-ready Openstack solution, our goal is to | | Version | Notes | |--- |--- |--- | -| **Kubernetes** | [v1.5.1](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v151) | [Custom Controller for RDB tools](https://quay.io/repository/attcomdev/kube-controller-manager?tab=tags) | -| **Helm** | [v2.1.3](https://github.com/kubernetes/helm/wiki/Roadmap#210-decided) | Planning for [v2.2.0](https://github.com/kubernetes/helm/wiki/Roadmap#220-open-for-small-additions) | +| **Kubernetes** | [v1.5.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v153) | [Custom Controller for RDB tools](https://quay.io/repository/attcomdev/kube-controller-manager?tab=tags) | +| **Helm** | [v2.2.1](https://github.com/kubernetes/helm/releases/tag/v2.2.1) | Planning for [v2.3.0](https://github.com/kubernetes/helm/milestone/30) | | **Calico** | [v2.0](http://docs.projectcalico.org/v2.0/releases/) | [`calicoctl` v1.0](https://github.com/projectcalico/calicoctl/releases) | -| **Docker** | [v1.12.1](https://github.com/docker/docker/releases/tag/v1.12.1) | [Per kubeadm Instructions](http://kubernetes.io/docs/getting-started-guides/kubeadm/) | | +| **Docker** | [v1.12.6](https://github.com/docker/docker/releases/tag/v1.12.1) | [Per kubeadm Instructions](http://kubernetes.io/docs/getting-started-guides/kubeadm/) | | Other versions and considerations (such as other CNI SDN providers), config map data, and value overrides will be included in other documentation as we explore these options further. @@ -42,7 +42,7 @@ admin@kubenode01:~$ After an initial `kubeadmn` deployment has been scheduled, it is time to deploy a CNI-enabled SDN. We have selected **Calico**, but have also confirmed that this works for Weave, and Romana. For Calico version v2.0, you can apply the provided [Kubeadm Hosted Install](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/kubeadm/) manifest: ``` -admin@kubenode01:~$ kubectl apply -f http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/kubeadm/calico.yaml +kubectl apply -f http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/kubeadm/calico.yaml ``` **PLEASE NOTE:** If you are using a 192.168.0.0/16 CIDR for your Kubernetes hosts, you will need to modify [line 42](https://gist.github.com/v1k0d3n/a152b1f5b8db5a8ae9c8c7da575a9694#file-calico-kubeadm-hosted-yml-L42) for the `cidr` declaration within the `ippool`. This must be a `/16` range or more, as the `kube-controller` will hand out `/24` ranges to each node. We have included a sample comparison of the changes [here](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/kubeadm/calico.yaml) and [here](https://gist.githubusercontent.com/v1k0d3n/a152b1f5b8db5a8ae9c8c7da575a9694/raw/c950eef1123a7dcc4b0dedca1a202e0c06248e9e/calico-kubeadm-hosted.yml). @@ -76,7 +76,7 @@ Persistent storage is improving. Please check our current and/or resolved [issue At some future point, we want to ensure that our solution is cloud-native, allowing installation on any host system without a package manager and only a container runtime (i.e. CoreOS). Until this happens, we will need to ensure that `ceph-common` is installed on each of our hosts. Using our Ubuntu example: ``` -admin@kubenode01:~$ sudo apt-get install ceph-common -y +sudo apt-get install ceph-common -y ``` We will always attempt to keep host-specific requirements to a minimum, and we are working with the Ceph team (Sébastien Han) to quickly address this Ceph requirement. @@ -85,7 +85,7 @@ We will always attempt to keep host-specific requirements to a minimum, and we a Another thing of interest is that our deployment assumes that you can generate secrets at the time of the container deployment. We require the [`sigil`](https://github.com/gliderlabs/sigil/releases/download/v0.4.0/sigil_0.4.0_Linux_x86_64.tgz) binary on your deployment host in order to perform this action. ``` -admin@kubenode01:~$ curl -L https://github.com/gliderlabs/sigil/releases/download/v0.4.0/sigil_0.4.0_Linux_x86_64.tgz | tar -zxC /usr/local/bin +curl -L https://github.com/gliderlabs/sigil/releases/download/v0.4.0/sigil_0.4.0_Linux_x86_64.tgz | tar -zxC /usr/local/bin ``` ### Kubernetes Controller Manager @@ -94,8 +94,8 @@ Before deploying Ceph, you will need to re-deploy a custom Kubernetes Controller To make these changes, export your Kubernetes version, and edit the `image` line of your `kube-controller-manager` json manifest on your Kubernetes Master: ``` -admin@kubenode01:~$ export kube_version=v1.5.1 -admin@kubenode01:~$ sed -i "s|gcr.io/google_containers/kube-controller-manager-amd64:'$kube_version'|quay.io/attcomdev/kube-controller-manager:'$kube_version'|g" /etc/kubernetes/manifests/kube-controller-manager.json +export kube_version=v1.5.3 +sed -i "s|gcr.io/google_containers/kube-controller-manager-amd64:'$kube_version'|quay.io/attcomdev/kube-controller-manager:'$kube_version'|g" /etc/kubernetes/manifests/kube-controller-manager.json ``` Now you will want to `restart` your Kubernetes master server to continue. @@ -142,42 +142,42 @@ nameserver 192.168.1.70 nameserver 8.8.8.8 search svc.cluster.local jinkit.com EOF -root@kubenode01:/# +root@kubenode01:/# ``` Now you can test your changes by deploying a service to your cluster, and resolving this from the controller. As an example, lets deploy something useful, like [Kubernetes dashboard](https://github.com/kubernetes/dashboard): ``` -admin@kubenode01:~$ kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml +kubectl create -f https://rawgit.com/kubernetes/dashboard/master/src/deploy/kubernetes-dashboard.yaml ``` Note the `IP` field: ``` admin@kubenode01:~$ kubectl describe svc kubernetes-dashboard -n kube-system -Name: kubernetes-dashboard -Namespace: kube-system -Labels: app=kubernetes-dashboard -Selector: app=kubernetes-dashboard -Type: NodePort -IP: 10.110.207.144 -Port: 80/TCP -NodePort: 32739/TCP -Endpoints: 10.25.178.65:9090 -Session Affinity: None +Name: kubernetes-dashboard +Namespace: kube-system +Labels: app=kubernetes-dashboard +Selector: app=kubernetes-dashboard +Type: NodePort +IP: 10.110.207.144 +Port: 80/TCP +NodePort: 32739/TCP +Endpoints: 10.25.178.65:9090 +Session Affinity: None No events. -admin@kubenode01:~$ +admin@kubenode01:~$ ``` Now you should be able to resolve the host `kubernetes-dashboard.kube-system.svc.cluster.local`: + ``` admin@kubenode01:~$ kubectl exec kube-controller-manager-kubenode01 -it -n kube-system -- ping kubernetes-dashboard.kube-system.svc.cluster.local PING kubernetes-dashboard.kube-system.svc.cluster.local (10.110.207.144) 56(84) bytes of data. -... -... -admin@kubenode01:~$ ``` + (Note: This host example above has `iputils-ping` installed) ### Kubernetes Node DNS Resolution + For each of the nodes to know exactly how to communicate with Ceph (and thus MariaDB) endpoints, each host much also have an entry for `kube-dns`. Since we are using Ubuntu for our example, place these changes in `/etc/network/interfaces` to ensure they remain after reboot. Now we are ready to continue with the Openstack-Helm installation. @@ -190,6 +190,7 @@ Please ensure that you have verified and completed the steps above to prevent is Although Ceph is mentioned throughout this guide, our deployment is flexible to allow you the option of bringing any type of persistent storage. Although most of these verification steps are the same, if not very similar, we will use Ceph as our example throughout this guide. ## Node Labels + First, we must label our nodes according to their role. Although we are labeling `all` nodes, you are free to label only the nodes you wish. You must have at least one, although a minimum of three are recommended. Nodes are labeled according to their Openstack roles: **Storage Nodes:** `ceph-storage` @@ -197,259 +198,168 @@ First, we must label our nodes according to their role. Although we are labeling **Compute Nodes:** `openvswitch`, `openstack-compute-node` ``` -admin@kubenode01:~$ kubectl label nodes openstack-control-plane=enabled --all -admin@kubenode01:~$ kubectl label nodes ceph-storage=enabled --all -admin@kubenode01:~$ kubectl label nodes openvswitch=enabled --all -admin@kubenode01:~$ kubectl label nodes openstack-compute-node=enabled --all +kubectl label nodes openstack-control-plane=enabled --all +kubectl label nodes ceph-storage=enabled --all +kubectl label nodes openvswitch=enabled --all +kubectl label nodes openstack-compute-node=enabled --all ``` ## Obtaining the Project + Download the latest copy of Openstack-Helm: + ``` -admin@kubenode01:~$ git clone https://github.com/att-comdev/openstack-helm.git -admin@kubenode01:~$ cd openstack-helm +git clone https://github.com/att-comdev/openstack-helm.git +cd openstack-helm ``` ## Ceph Preparation and Installation + Ceph must be aware of the OSX cluster and public networks. These CIDR ranges are the exact same ranges you used earlier in your Calico deployment yaml (our example was 10.25.0.0/16 due to our 192.168.0.0/16 overlap). Explore this variable to your deployment environment by issuing the following commands: + ``` -admin@kubenode01:~$ export osd_cluster_network=10.25.0.0/16 -admin@kubenode01:~$ export osd_public_network=10.25.0.0/16 +export osd_cluster_network=10.25.0.0/16 +export osd_public_network=10.25.0.0/16 ``` ## Ceph Storage Volumes + Ceph must also have volumes to mount on each host labeled for `ceph-storage`. On each host that you labeled, create the following directory (can be overriden): + ``` -admin@kubenode01:~$ mkdir -p /var/lib/openstack-helm/ceph +mkdir -p /var/lib/openstack-helm/ceph ``` + *Repeat this step for each node labeled: `ceph-storage`* ## Ceph Secrets Generation + Although you can bring your own secrets, we have conveniently created a secret generation tool for you (for greenfield deployments). You can create secrets for your project by issuing the following: + ``` -admin@kubenode01:~$ cd helm-toolkit/utils/secret-generator -admin@kubenode01:~$ ./generate_secrets.sh all `./generate_secrets.sh fsid` -admin@kubenode01:~$ cd ../../.. +cd helm-toolkit/utils/secret-generator +./generate_secrets.sh all `./generate_secrets.sh fsid` +cd ../../.. ``` ## Nova Compute Instance Storage + Nova Compute requires a place to store instances locally. Each node labeled `openstack-compute-node` needs to have the following directory: + ``` -admin@kubenode01:~$ mkdir -p /var/lib/nova/instances +mkdir -p /var/lib/nova/instances ``` + *Repeat this step for each node labeled: `openstack-compute-node`* ## Helm Preparation + Now we need to install and prepare Helm, the core of our project. Please use the installation guide from the [Kubernetes/Helm](https://github.com/kubernetes/helm/blob/master/docs/install.md#from-the-binary-releases) repository. Please take note of our required versions above. Once installed, and initiated (`helm init`), you will need your local environment to serve helm charts for use. You can do this by: + ``` -admin@kubenode01:~$ helm serve . & -admin@kubenode01:~$ helm repo add local http://localhost:8879/charts +helm serve & +helm repo add local http://localhost:8879/charts ``` # Openstack-Helm Installation Now we are ready to deploy, and verify our Openstack-Helm installation. The first required is to build out the deployment secrets, lint and package each of the charts for the project. Do this my running `make` in the `openstack-helm` directory: + ``` -admin@kubenode01:~$ make +make ``` **Helpful Note:** If you need to make any changes to the deployment, you may run `make` again, delete your helm-deployed chart, and redeploy the chart (update). If you need to delete a chart for any reason, do the following: + ``` -admin@kubenode01:~$ helm list -NAME REVISION UPDATED STATUS CHART -bootstrap 1 Fri Dec 23 13:37:35 2016 DEPLOYED bootstrap-0.1.0 -bootstrap-ceph 1 Fri Dec 23 14:27:51 2016 DEPLOYED bootstrap-0.1.0 -ceph 3 Fri Dec 23 14:18:49 2016 DEPLOYED ceph-0.1.0 -keystone 1 Fri Dec 23 16:40:56 2016 DEPLOYED keystone-0.1.0 -mariadb 1 Fri Dec 23 16:15:29 2016 DEPLOYED mariadb-0.1.0 -memcached 1 Fri Dec 23 16:39:15 2016 DEPLOYED memcached-0.1.0 -rabbitmq 1 Fri Dec 23 16:40:34 2016 DEPLOYED rabbitmq-0.1.0 -admin@kubenode01:~$ -admin@kubenode01:~$ -admin@kubenode01:~$ helm delete --purge keystone +helm list + +# NAME REVISION UPDATED STATUS CHART +# bootstrap 1 Fri Dec 23 13:37:35 2016 DEPLOYED bootstrap-0.2.0 +# bootstrap-ceph 1 Fri Dec 23 14:27:51 2016 DEPLOYED bootstrap-0.2.0 +# ceph 3 Fri Dec 23 14:18:49 2016 DEPLOYED ceph-0.2.0 +# keystone 1 Fri Dec 23 16:40:56 2016 DEPLOYED keystone-0.2.0 +# mariadb 1 Fri Dec 23 16:15:29 2016 DEPLOYED mariadb-0.2.0 +# memcached 1 Fri Dec 23 16:39:15 2016 DEPLOYED memcached-0.2.0 +# rabbitmq 1 Fri Dec 23 16:40:34 2016 DEPLOYED rabbitmq-0.2.0 + +helm delete --purge keystone ``` Please ensure that you use ``--purge`` whenever deleting a project. ## Ceph Installation and Verification Install the first service, which is Ceph. If all instructions have been followed as mentioned above, this installation should go smoothly. Use the following command to install Ceph: ``` -admin@kubenode01:~$ helm install --set network.public=$osd_public_network --name=ceph local/ceph --namespace=ceph +helm install --set network.public=$osd_public_network --name=ceph local/ceph --namespace=ceph ``` ## Bootstrap Installation At this time (and before verification of Ceph) you'll need to install the `bootstrap` chart. The `bootstrap` chart will install secrets for both the `ceph` and `openstack` namespaces for the general StorageClass: ``` -admin@kubenode01:~$ helm install --name=bootstrap-ceph local/bootstrap --namespace=ceph -admin@kubenode01:~$ helm install --name=bootstrap-openstack local/bootstrap --namespace=openstack +helm install --name=bootstrap-ceph local/bootstrap --namespace=ceph +helm install --name=bootstrap-openstack local/bootstrap --namespace=openstack ``` -You may want to validate that Ceph is deployed successfully. Here are a couple of recommendations for this. +You may want to validate that Ceph is deployed successfully. For more information on this, please see the section entitled [Ceph Troubleshooting](../troubleshooting/ts-persistent-storage.md). -### Ceph Validating PVC -To validate persistent volume claim (PVC) creation, we've placed a test manifest in the `./test/` directory. Deploy this pvc and explore the deployment: -``` -admin@kubenode01:~$ kubectl get pvc -o wide --all-namespaces -w -NAMESPACE NAME STATUS VOLUME CAPACITY ACCESSMODES AGE -ceph pvc-test Bound pvc-bc768dea-c93e-11e6-817f-001fc69c26d1 1Gi RWO 9h -admin@kubenode01:~$ -``` -The output above indicates that the PVC is 'bound' correctly. Now digging deeper: -``` -admin@kubenode01:~/projects/openstack-helm$ kubectl describe pvc pvc-test -n ceph -Name: pvc-test -Namespace: ceph -StorageClass: general -Status: Bound -Volume: pvc-bc768dea-c93e-11e6-817f-001fc69c26d1 -Labels: -Capacity: 1Gi -Access Modes: RWO -No events. -admin@kubenode01:~/projects/openstack-helm$ -``` -We can see that we have a VolumeID, and the 'capacity' is 1GB. It is a 'general' storage class. It is just a simple test. You can safely delete this test by issuing the following: -``` -admin@kubenode01:~/projects/openstack-helm$ kubectl delete pvc pvc-test -n ceph -persistentvolumeclaim "pvc-test" deleted -admin@kubenode01:~/projects/openstack-helm$ -``` - -### Ceph Validating StorageClass -Next we can look at the storage class, to make sure that it was created correctly: -``` -admin@kubenode01:~$ kubectl describe storageclass/general -Name: general -IsDefaultClass: No -Annotations: -Provisioner: kubernetes.io/rbd -Parameters: adminId=admin,adminSecretName=pvc-ceph-conf-combined-storageclass,adminSecretNamespace=ceph,monitors=ceph-mon.ceph:6789,pool=rbd,userId=admin,userSecretName=pvc-ceph-client-key -No events. -admin@kubenode01:~$ -``` -The parameters is what we're looking for here. If we see parameters passed to the StorageClass correctly, we will see the `ceph-mon.ceph:6789` hostname/port, things like `userid`, and appropriate secrets used for volume claims. This all looks great, and it time to Ceph itself. - -### Ceph Validation -Most commonly, we want to validate that Ceph is working correctly. This can be done with the following ceph command: -``` -admin@kubenode01:~$ kubectl exec -t -i ceph-mon-0 -n ceph -- ceph status - cluster 046de582-f8ee-4352-9ed4-19de673deba0 - health HEALTH_OK - monmap e3: 3 mons at {ceph-mon-392438295-6q04c=10.25.65.131:6789/0,ceph-mon-392438295-ksrb2=10.25.49.196:6789/0,ceph-mon-392438295-l0pzj=10.25.79.193:6789/0} - election epoch 6, quorum 0,1,2 ceph-mon-392438295-ksrb2,ceph-mon-392438295-6q04c,ceph-mon-392438295-l0pzj - fsmap e5: 1/1/1 up {0=mds-ceph-mds-2810413505-gtjgv=up:active} - osdmap e23: 5 osds: 5 up, 5 in - flags sortbitwise - pgmap v22012: 80 pgs, 3 pools, 12712 MB data, 3314 objects - 101 GB used, 1973 GB / 2186 GB avail - 80 active+clean -admin@kubenode01:~$ -``` -Use one of your Ceph Monitors to check the status of the cluster. A couple of things to note above; our health is 'HEALTH_OK', we have 3 mons, we've established a quorum, and we can see that our active mds is 'ceph-mds-2810413505-gtjgv'. We have a healthy environment. - -For Glance and Cinder to operate, you will need to create some storage pools for these systems. Additionally, Nova can be configured to use a pool as well, but this is off by default. - -``` -kubectl exec -n ceph -it ceph-mon-0 ceph osd pool create volumes 128 -kubectl exec -n ceph -it ceph-mon-0 ceph osd pool create images 128 -``` - -Nova storage would be added like this: -``` -kubectl exec -n ceph -it ceph-mon-0 ceph osd pool create vms 128 -``` - -The choosing the amount of storage is up to you and can be changed by replacing the 128 to meet your needs. - -We are now ready to install our next chart, MariaDB. ## MariaDB Installation and Verification We are using Galera to cluster MariaDB and establish a quorum. To install the MariaDB, issue the following command: ``` -admin@kubenode01:~$ helm install --name=mariadb local/mariadb --namespace=openstack +helm install --name=mariadb local/mariadb --namespace=openstack ``` -MariaDB is a StatefulSet (PetSets have been retired in Kubernetes v1.5.0). As such, it initiates a 'seed' which is used to deploy MariaDB members via [affinity/anti-affinity](http://kubernetes.io/docs/user-guide/node-selection/) features. Ceph uses this as well. So what you will notice is the following behavior: -``` -openstack mariadb-0 0/1 Running 0 28s 10.25.49.199 kubenode05 -openstack mariadb-seed-0ckf4 1/1 Running 0 48s 10.25.162.197 kubenode01 - - -NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE -openstack mariadb-0 1/1 Running 0 1m 10.25.49.199 kubenode05 -openstack mariadb-1 0/1 Pending 0 0s -openstack mariadb-1 0/1 Pending 0 0s kubenode04 -openstack mariadb-1 0/1 ContainerCreating 0 0s kubenode04 -openstack mariadb-1 0/1 Running 0 3s 10.25.178.74 kubenode04 -``` -What you're seeing is the output of `kubectl get pods -o wide --all-namespaces`, which is used to monitor the seed host preparing each of the MariaDB/Galera members in order: mariadb-0, then mariadb-1, then mariadb-2. This process can take up to a few minutes, so be patient. - -To test MariaDB, do the following: -``` -admin@kubenode01:~/projects/openstack-helm$ kubectl exec mariadb-0 -it -n openstack -- mysql -h mariadb.openstack -uroot -ppassword -e 'show databases;' -+--------------------+ -| Database | -+--------------------+ -| information_schema | -| keystone | -| mysql | -| performance_schema | -+--------------------+ -admin@kubenode01:~/projects/openstack-helm$ -``` -Now you can see that MariaDB is loaded, with databases intact! If you're at this point, the rest of the installation is easy. You can run the following to check on Galera: -``` -admin@kubenode01:~/projects/openstack-helm$ kubectl describe po/mariadb-0 -n openstack -Name: mariadb-0 -Namespace: openstack -Node: kubenode05/192.168.3.25 -Start Time: Fri, 23 Dec 2016 16:15:49 -0500 -Labels: app=mariadb - galera=enabled -Status: Running -IP: 10.25.49.199 -Controllers: StatefulSet/mariadb -... -... -... - FirstSeen LastSeen Count From SubObjectPath Type Reason Message - --------- -------- ----- ---- ------------- -------- ------ ------- - 5s 5s 1 {default-scheduler } Normal Scheduled Successfully assigned mariadb-0 to kubenode05 - 3s 3s 1 {kubelet kubenode05} spec.containers{mariadb} Normal Pulling pulling image "quay.io/stackanetes/stackanetes-mariadb:newton" - 2s 2s 1 {kubelet kubenode05} spec.containers{mariadb} Normal Pulled Successfully pulled image "quay.io/stackanetes/stackanetes-mariadb:newton" - 2s 2s 1 {kubelet kubenode05} spec.containers{mariadb} Normal Created Created container with docker id f702bd7c11ef; Security:[seccomp=unconfined] - 2s 2s 1 {kubelet kubenode05} spec.containers{mariadb} Normal Started Started container with docker id f702bd7c11ef -``` -So you can see that galera is enabled. ## Installation of Other Services Now you can easily install the other services simply by going in order: -**Install Memcached/RabbitMQ:** +**Install Memcached/Etcd/RabbitMQ:** ``` -admin@kubenode01:~$ helm install --name=memcached local/memcached --namespace=openstack -admin@kubenode01:~$ helm install --name=rabbitmq local/rabbitmq --namespace=openstack +helm install --name=memcached local/memcached --namespace=openstack +helm install --name=etcd-rabbitmq local/etcd --namespace=openstack +helm install --name=rabbitmq local/rabbitmq --namespace=openstack ``` **Install Keystone:** ``` -admin@kubenode01:~$ helm install --name=keystone local/keystone --namespace=openstack +helm install --name=keystone local/keystone --set replicas=2 --namespace=openstack ``` **Install Horizon:** ``` -admin@kubenode01:~$ helm install --name=horizon local/horizon --namespace=openstack +helm install --name=horizon local/horizon --set network.enable_node_port=true --namespace=openstack ``` **Install Glance:** ``` -admin@kubenode01:~$ helm install --name=glance local/glance --namespace=openstack +helm install --name=glance local/glance --set replicas.api=2,replicas.registry=2 --namespace=openstack +``` + +**Install Heat:** +``` +helm install --name=heat local/heat --namespace=openstack +``` + +**Install Neutron:** +``` +helm install --name=neutron local/neutron --set replicas.server=2 --namespace=openstack +``` + +**Install Nova:** +``` +helm install --name=nova local/nova --set control_replicas=2 --namespace=openstack +``` + +**Install Cinder:** +``` +helm install --name=cinder local/cinder --set replicas.api=2 --namespace=openstack ``` ## Final Checks -Now you can run through your final checks. Wait for all services to come up: +Now you can run through your final checks. Wait for all services to come up : ``` -admin@kubenode01:~$ watch kubectl get all --namespace=openstack +watch kubectl get all --namespace=openstack ``` Finally, you should now be able to access horizon at http:// using admin/password diff --git a/docs/troubleshooting/README.md b/docs/troubleshooting/README.md index 726bbf5a..cbd94e14 100644 --- a/docs/troubleshooting/README.md +++ b/docs/troubleshooting/README.md @@ -1,10 +1,11 @@ # Troubleshooting -Sometimes things go wrong. These guides will help you solve many common issues. +Sometimes things go wrong. These guides will help you solve many common issues with the following: -* [Minikube issues](ts-minikube.md) -* [Networking issues](ts-networking.md) -* [Persistent Storage issues](ts-persistent-storage.md) +* [Database: Galera](ts-database.md#galera-cluster) +* [Development: Minikube](ts-minikube.md) +* [Networking: General](ts-networking.md) +* [Persistent Storage: Ceph](ts-persistent-storage.md#ceph) ## Getting Help diff --git a/docs/troubleshooting/ts-database.md b/docs/troubleshooting/ts-database.md new file mode 100644 index 00000000..66843284 --- /dev/null +++ b/docs/troubleshooting/ts-database.md @@ -0,0 +1,66 @@ +# Troubleshooting - Database Deployments + +This guide is to help users debug any general storage issues when deploying Charts in this repository. + +# Galera Cluster + +**CHART:** openstack-helm/mariadb (when `developer-mode: false`) + +MariaDB is a `StatefulSet` (`PetSets` have been retired in Kubernetes v1.5.0). As such, it initiates a 'seed' which is used to deploy MariaDB members via [affinity/anti-affinity](http://kubernetes.io/docs/user-guide/node-selection/) features. Ceph uses this as well. So what you will notice is the following behavior: + +``` +openstack mariadb-0 0/1 Running 0 28s 10.25.49.199 kubenode05 +openstack mariadb-seed-0ckf4 1/1 Running 0 48s 10.25.162.197 kubenode01 + + +NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE +openstack mariadb-0 1/1 Running 0 1m 10.25.49.199 kubenode05 +openstack mariadb-1 0/1 Pending 0 0s +openstack mariadb-1 0/1 Pending 0 0s kubenode04 +openstack mariadb-1 0/1 ContainerCreating 0 0s kubenode04 +openstack mariadb-1 0/1 Running 0 3s 10.25.178.74 kubenode04 +``` + +What you're seeing is the output of `kubectl get pods -o wide --all-namespaces`, which is used to monitor the seed host preparing each of the MariaDB/Galera members in order: mariadb-0, then mariadb-1, then mariadb-2. This process can take up to a few minutes, so be patient. + +To test MariaDB, do the following: + +``` +admin@kubenode01:~/projects/openstack-helm$ kubectl exec mariadb-0 -it -n openstack -- mysql -h mariadb.openstack -uroot -ppassword -e 'show databases;' ++--------------------+ +| Database | ++--------------------+ +| information_schema | +| keystone | +| mysql | +| performance_schema | ++--------------------+ +admin@kubenode01:~/projects/openstack-helm$ +``` + +Now you can see that MariaDB is loaded, with databases intact! If you're at this point, the rest of the installation is easy. You can run the following to check on Galera: + +``` +admin@kubenode01:~/projects/openstack-helm$ kubectl describe po/mariadb-0 -n openstack +Name: mariadb-0 +Namespace: openstack +Node: kubenode05/192.168.3.25 +Start Time: Fri, 23 Dec 2016 16:15:49 -0500 +Labels: app=mariadb + galera=enabled +Status: Running +IP: 10.25.49.199 +Controllers: StatefulSet/mariadb +... +... +... + FirstSeen LastSeen Count From SubObjectPath Type Reason Message + --------- -------- ----- ---- ------------- -------- ------ ------- + 5s 5s 1 {default-scheduler } Normal Scheduled Successfully assigned mariadb-0 to kubenode05 + 3s 3s 1 {kubelet kubenode05} spec.containers{mariadb} Normal Pulling pulling image "quay.io/stackanetes/stackanetes-mariadb:newton" + 2s 2s 1 {kubelet kubenode05} spec.containers{mariadb} Normal Pulled Successfully pulled image "quay.io/stackanetes/stackanetes-mariadb:newton" + 2s 2s 1 {kubelet kubenode05} spec.containers{mariadb} Normal Created Created container with docker id f702bd7c11ef; Security:[seccomp=unconfined] + 2s 2s 1 {kubelet kubenode05} spec.containers{mariadb} Normal Started Started container with docker id f702bd7c11ef +``` + +So you can see that galera is enabled. diff --git a/docs/troubleshooting/ts-persistent-storage.md b/docs/troubleshooting/ts-persistent-storage.md index f9c7a99e..524f6cc6 100644 --- a/docs/troubleshooting/ts-persistent-storage.md +++ b/docs/troubleshooting/ts-persistent-storage.md @@ -2,4 +2,83 @@ This guide is to help users debug any general storage issues when deploying Charts in this repository. -# Diagnosing the problem +# Ceph + +**CHART:** openstack-helm/ceph + +### Ceph Validating PVC +To validate persistent volume claim (PVC) creation, we've placed a test manifest in the `./test/` directory. Deploy this pvc and explore the deployment: +``` +admin@kubenode01:~$ kubectl get pvc -o wide --all-namespaces -w +NAMESPACE NAME STATUS VOLUME CAPACITY ACCESSMODES AGE +ceph pvc-test Bound pvc-bc768dea-c93e-11e6-817f-001fc69c26d1 1Gi RWO 9h +admin@kubenode01:~$ +``` +The output above indicates that the PVC is 'bound' correctly. Now digging deeper: +``` +admin@kubenode01:~/projects/openstack-helm$ kubectl describe pvc pvc-test -n ceph +Name: pvc-test +Namespace: ceph +StorageClass: general +Status: Bound +Volume: pvc-bc768dea-c93e-11e6-817f-001fc69c26d1 +Labels: +Capacity: 1Gi +Access Modes: RWO +No events. +admin@kubenode01:~/projects/openstack-helm$ +``` +We can see that we have a VolumeID, and the 'capacity' is 1GB. It is a 'general' storage class. It is just a simple test. You can safely delete this test by issuing the following: +``` +admin@kubenode01:~/projects/openstack-helm$ kubectl delete pvc pvc-test -n ceph +persistentvolumeclaim "pvc-test" deleted +admin@kubenode01:~/projects/openstack-helm$ +``` + +### Ceph Validating StorageClass +Next we can look at the storage class, to make sure that it was created correctly: +``` +admin@kubenode01:~$ kubectl describe storageclass/general +Name: general +IsDefaultClass: No +Annotations: +Provisioner: kubernetes.io/rbd +Parameters: adminId=admin,adminSecretName=pvc-ceph-conf-combined-storageclass,adminSecretNamespace=ceph,monitors=ceph-mon.ceph:6789,pool=rbd,userId=admin,userSecretName=pvc-ceph-client-key +No events. +admin@kubenode01:~$ +``` +The parameters is what we're looking for here. If we see parameters passed to the StorageClass correctly, we will see the `ceph-mon.ceph:6789` hostname/port, things like `userid`, and appropriate secrets used for volume claims. This all looks great, and it time to Ceph itself. + +### Ceph Validation +Most commonly, we want to validate that Ceph is working correctly. This can be done with the following ceph command: +``` +admin@kubenode01:~$ kubectl exec -t -i ceph-mon-0 -n ceph -- ceph status + cluster 046de582-f8ee-4352-9ed4-19de673deba0 + health HEALTH_OK + monmap e3: 3 mons at {ceph-mon-392438295-6q04c=10.25.65.131:6789/0,ceph-mon-392438295-ksrb2=10.25.49.196:6789/0,ceph-mon-392438295-l0pzj=10.25.79.193:6789/0} + election epoch 6, quorum 0,1,2 ceph-mon-392438295-ksrb2,ceph-mon-392438295-6q04c,ceph-mon-392438295-l0pzj + fsmap e5: 1/1/1 up {0=mds-ceph-mds-2810413505-gtjgv=up:active} + osdmap e23: 5 osds: 5 up, 5 in + flags sortbitwise + pgmap v22012: 80 pgs, 3 pools, 12712 MB data, 3314 objects + 101 GB used, 1973 GB / 2186 GB avail + 80 active+clean +admin@kubenode01:~$ +``` +Use one of your Ceph Monitors to check the status of the cluster. A couple of things to note above; our health is 'HEALTH_OK', we have 3 mons, we've established a quorum, and we can see that our active mds is 'ceph-mds-2810413505-gtjgv'. We have a healthy environment. + +For Glance and Cinder to operate, you will need to create some storage pools for these systems. Additionally, Nova can be configured to use a pool as well, but this is off by default. + +``` +kubectl exec -n ceph -it ceph-mon-0 ceph osd pool create volumes 128 +kubectl exec -n ceph -it ceph-mon-0 ceph osd pool create images 128 +``` + +Nova storage would be added like this: +``` +kubectl exec -n ceph -it ceph-mon-0 ceph osd pool create vms 128 +``` + +The choosing the amount of storage is up to you and can be changed by replacing the 128 to meet your needs. + +We are now ready to install our next chart, MariaDB. diff --git a/glance/templates/bin/_post.sh.tpl b/glance/templates/bin/_post.sh.tpl deleted file mode 100644 index 8aa44495..00000000 --- a/glance/templates/bin/_post.sh.tpl +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Openstack-Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ex -export HOME=/tmp - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \ -service_type=image \ -description='Openstack Image' \ -endpoint_region='{{ .Values.keystone.glance_region_name }}' \ -url='{{ tuple "image" "admin" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }}' \ -interface=admin \ -region_name='{{ .Values.keystone.admin_region_name }}' \ -auth='{{ include "helm-toolkit.keystone_auth" . }}'" \ --e "{'openstack_glance_auth': {{ include "helm-toolkit.keystone_auth" . }}}" - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \ -service_type=image \ -description='Openstack Image' \ -endpoint_region='{{ .Values.keystone.glance_region_name }}' \ -url='{{ tuple "image" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }}' \ -interface=internal \ -region_name='{{ .Values.keystone.admin_region_name }}' \ -auth='{{ include "helm-toolkit.keystone_auth" . }}'" \ --e "{ 'openstack_glance_auth': {{ include "helm-toolkit.keystone_auth" . }} }" - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \ -service_type=image \ -description='Openstack Image' \ -endpoint_region='{{ .Values.keystone.glance_region_name }}' \ -url='{{ tuple "image" "public" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }}' \ -interface=public \ -region_name='{{ .Values.keystone.admin_region_name }}' \ -auth='{{ include "helm-toolkit.keystone_auth" . }}'" \ --e "{ 'openstack_glance_auth': {{ include "helm-toolkit.keystone_auth" . }} }" - -ansible localhost -vvv -m kolla_keystone_user -a "project=service \ -user={{ .Values.keystone.glance_user }} \ -password={{ .Values.keystone.glance_password }} \ -role=admin \ -region_name={{ .Values.keystone.admin_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" . }}'" \ --e "{ 'openstack_glance_auth': {{ include "helm-toolkit.keystone_auth" . }} }" diff --git a/glance/templates/configmap-bin.yaml b/glance/templates/configmap-bin.yaml index 8e2f2840..1cd58c1c 100644 --- a/glance/templates/configmap-bin.yaml +++ b/glance/templates/configmap-bin.yaml @@ -19,5 +19,9 @@ metadata: data: init.sh: |+ {{ tuple "bin/_init.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} - post.sh: |+ -{{ tuple "bin/_post.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} + ks-service.sh: |+ +{{- include "helm-toolkit.keystone_service" . | indent 4 }} + ks-endpoints.sh: |+ +{{- include "helm-toolkit.keystone_endpoints" . | indent 4 }} + ks-user.sh: |+ +{{- include "helm-toolkit.keystone_user" . | indent 4 }} diff --git a/glance/templates/job-db-init.yaml b/glance/templates/job-db-init.yaml index 7b2abe73..2e5029b3 100644 --- a/glance/templates/job-db-init.yaml +++ b/glance/templates/job-db-init.yaml @@ -13,7 +13,7 @@ # limitations under the License. {{- $envAll := . }} -{{- $dependencies := .Values.dependencies.init }} +{{- $dependencies := .Values.dependencies.db_init }} apiVersion: batch/v1 kind: Job metadata: diff --git a/glance/templates/job-ks-endpoints.yaml b/glance/templates/job-ks-endpoints.yaml new file mode 100644 index 00000000..f2b4b671 --- /dev/null +++ b/glance/templates/job-ks-endpoints.yaml @@ -0,0 +1,73 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $ksAdminSecret := $envAll.Values.keystone.admin_secret | default "glance-env-keystone-admin" }} +{{- $dependencies := .Values.dependencies.ks_endpoints }} +apiVersion: batch/v1 +kind: Job +metadata: + name: glance-ks-endpoints +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "image" }} +{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }} + - name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }} + image: {{ $envAll.Values.images.ks_endpoints }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + {{- if $envAll.Values.resources.enabled }} + resources: + requests: + memory: {{ $envAll.Values.resources.glance_ks_endpoints.requests.memory | quote }} + cpu: {{ $envAll.Values.resources.glance_ks_endpoints.requests.cpu | quote }} + limits: + memory: {{ $envAll.Values.resources.glance_ks_endpoints.limits.memory | quote }} + cpu: {{ $envAll.Values.resources.glance_ks_endpoints.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-endpoints.sh + volumeMounts: + - name: ks-endpoints-sh + mountPath: /tmp/ks-endpoints.sh + subPath: ks-endpoints.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SVC_ENDPOINT + value: {{ $osServiceEndPoint }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.keystone_endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} + - name: OS_SERVICE_ENDPOINT + value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "helm-toolkit.keystone_endpoint_uri_lookup" }} +{{- end }} +{{- end }} + volumes: + - name: ks-endpoints-sh + configMap: + name: glance-bin diff --git a/glance/templates/job-ks-service.yaml b/glance/templates/job-ks-service.yaml new file mode 100644 index 00000000..e49b0280 --- /dev/null +++ b/glance/templates/job-ks-service.yaml @@ -0,0 +1,67 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "glance-env-keystone-admin" }} +{{- $dependencies := .Values.dependencies.ks_service }} +apiVersion: batch/v1 +kind: Job +metadata: + name: glance-ks-service +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "image" }} + - name: {{ $osServiceType }}-ks-service-registration + image: {{ $envAll.Values.images.ks_service }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + {{- if $envAll.Values.resources.enabled }} + resources: + requests: + memory: {{ $envAll.Values.resources.glance_ks_service.requests.memory | quote }} + cpu: {{ $envAll.Values.resources.glance_ks_service.requests.cpu | quote }} + limits: + memory: {{ $envAll.Values.resources.glance_ks_service.limits.memory | quote }} + cpu: {{ $envAll.Values.resources.glance_ks_service.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-service.sh + volumeMounts: + - name: ks-service-sh + mountPath: /tmp/ks-service.sh + subPath: ks-service.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.keystone_endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} +{{- end }} + volumes: + - name: ks-service-sh + configMap: + name: glance-bin diff --git a/glance/templates/job-post.yaml b/glance/templates/job-ks-user.yaml similarity index 51% rename from glance/templates/job-post.yaml rename to glance/templates/job-ks-user.yaml index 54680a90..828f8930 100644 --- a/glance/templates/job-post.yaml +++ b/glance/templates/job-ks-user.yaml @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "glance-env-keystone-admin" }} +{{- $ksUserSecret := .Values.keystone.user_secret | default "glance-env-keystone-user" }} {{- $envAll := . }} -{{- $dependencies := .Values.dependencies.post }} +{{- $dependencies := .Values.dependencies.ks_user }} apiVersion: batch/v1 kind: Job metadata: - name: glance-post + name: glance-ks-user spec: template: metadata: @@ -26,33 +28,42 @@ spec: {{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} ]' spec: + restartPolicy: OnFailure nodeSelector: {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} - restartPolicy: OnFailure containers: - - name: glance-post - image: {{ .Values.images.post }} + - name: glance-ks-user + image: {{ .Values.images.ks_user }} imagePullPolicy: {{ .Values.images.pull_policy }} {{- if .Values.resources.enabled }} resources: - limits: - cpu: {{ .Values.resources.jobs.post.limits.cpu | quote }} - memory: {{ .Values.resources.jobs.post.limits.memory | quote }} requests: - cpu: {{ .Values.resources.jobs.post.requests.cpu | quote }} - memory: {{ .Values.resources.jobs.post.requests.memory | quote }} + memory: {{ .Values.resources.glance_ks_user.requests.memory | quote }} + cpu: {{ .Values.resources.glance_ks_user.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.glance_ks_user.limits.memory | quote }} + cpu: {{ .Values.resources.glance_ks_user.limits.cpu | quote }} {{- end }} command: - bash - - /tmp/post.sh + - /tmp/ks-user.sh volumeMounts: - - name: postsh - mountPath: /tmp/post.sh - subPath: post.sh + - name: ks-user-sh + mountPath: /tmp/ks-user.sh + subPath: ks-user.sh + readOnly: true env: - - name: ANSIBLE_LIBRARY - value: /usr/share/ansible/ +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "glance" +{{- with $env := dict "ksUserSecret" $ksUserSecret }} +{{- include "helm-toolkit.keystone_user_create_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_ROLE + value: {{ .Values.keystone.glance_user_role | quote }} volumes: - - name: postsh + - name: ks-user-sh configMap: name: glance-bin diff --git a/glance/templates/secret-keystone-admin.env.yaml b/glance/templates/secret-keystone-admin.env.yaml new file mode 100644 index 00000000..3f563c6b --- /dev/null +++ b/glance/templates/secret-keystone-admin.env.yaml @@ -0,0 +1,34 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: glance-env-keystone-admin +type: Opaque +data: + OS_AUTH_URL: | +{{ tuple "identity" "admin" "admin" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.admin_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.admin_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.admin_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.admin_password | b64enc | indent 4 }} diff --git a/glance/templates/secret-keystone-user.env.yaml b/glance/templates/secret-keystone-user.env.yaml new file mode 100644 index 00000000..c446688a --- /dev/null +++ b/glance/templates/secret-keystone-user.env.yaml @@ -0,0 +1,34 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: glance-env-keystone-user +type: Opaque +data: + OS_AUTH_URL: | +{{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.glance_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.glance_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.glance_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.glance_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.glance_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.glance_password | b64enc | indent 4 }} diff --git a/glance/values.yaml b/glance/values.yaml index 4d09b50c..9e936ee7 100644 --- a/glance/values.yaml +++ b/glance/values.yaml @@ -32,10 +32,12 @@ labels: images: db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton db_sync: quay.io/stackanetes/stackanetes-glance-api:newton + ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton api: quay.io/stackanetes/stackanetes-glance-api:newton registry: quay.io/stackanetes/stackanetes-glance-registry:newton - post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 pull_policy: "IfNotPresent" upgrades: @@ -129,48 +131,44 @@ resources: cpu: "500m" dependencies: - api: + db_init: + jobs: + - mariadb-seed + service: + - mariadb + db_sync: jobs: - glance-db-init + service: + - mariadb + ks_user: + service: + - keystone-api + ks_service: + service: + - keystone-api + ks_endpoints: + jobs: + - glance-ks-service + service: + - keystone-api + api: + jobs: - glance-db-sync - - keystone-db-sync + - glance-ks-user + - glance-ks-endpoints service: - keystone-api - mariadb registry: jobs: - - glance-db-init - glance-db-sync - - keystone-db-sync + - glance-ks-user + - glance-ks-endpoints service: - keystone-api - mariadb - glance-api - db_sync: - jobs: - - keystone-db-init - - keystone-db-sync - - glance-db-init - - mariadb-seed - service: - - mariadb - init: - jobs: - - mariadb-seed - service: - - mariadb - post: - jobs: - - glance-db-init - - glance-db-sync - - keystone-db-sync - - keystone-db-init - - mariadb-seed - service: - - mariadb - - keystone-api - - glance-api - - glance-registry # typically overriden by environmental # values, but should include all endpoints diff --git a/heat/templates/deployment-api.yaml b/heat/templates/deployment-api.yaml index 66e0343a..c0901356 100755 --- a/heat/templates/deployment-api.yaml +++ b/heat/templates/deployment-api.yaml @@ -49,10 +49,10 @@ spec: - --config-dir - /etc/heat/conf ports: - - containerPort: {{ .Values.service.api.port }} + - containerPort: {{ .Values.network.api.port }} readinessProbe: tcpSocket: - port: {{ .Values.service.api.port }} + port: {{ .Values.network.api.port }} volumeMounts: - name: pod-etc-heat mountPath: /etc/heat diff --git a/heat/templates/deployment-cfn.yaml b/heat/templates/deployment-cfn.yaml index 8bd0fcbd..84cd91f7 100644 --- a/heat/templates/deployment-cfn.yaml +++ b/heat/templates/deployment-cfn.yaml @@ -49,10 +49,10 @@ spec: - --config-dir - /etc/heat/conf ports: - - containerPort: {{ .Values.service.cfn.port }} + - containerPort: {{ .Values.network.cfn.port }} readinessProbe: tcpSocket: - port: {{ .Values.service.cfn.port }} + port: {{ .Values.network.cfn.port }} volumeMounts: - name: pod-etc-heat mountPath: /etc/heat diff --git a/heat/templates/deployment-cloudwatch.yaml b/heat/templates/deployment-cloudwatch.yaml index 97ccddd4..d1b99af4 100644 --- a/heat/templates/deployment-cloudwatch.yaml +++ b/heat/templates/deployment-cloudwatch.yaml @@ -49,10 +49,10 @@ spec: - --config-dir - /etc/heat/conf ports: - - containerPort: {{ .Values.service.cloudwatch.port }} + - containerPort: {{ .Values.network.cloudwatch.port }} readinessProbe: tcpSocket: - port: {{ .Values.service.cloudwatch.port }} + port: {{ .Values.network.cloudwatch.port }} volumeMounts: - name: pod-etc-heat mountPath: /etc/heat diff --git a/heat/templates/etc/_heat.conf.tpl b/heat/templates/etc/_heat.conf.tpl index 897900ad..f6d60e64 100644 --- a/heat/templates/etc/_heat.conf.tpl +++ b/heat/templates/etc/_heat.conf.tpl @@ -22,9 +22,9 @@ deferred_auth_method = "trusts" enable_stack_adopt = "True" enable_stack_abandon = "True" -heat_metadata_server_url = {{ .Values.service.cfn.proto }}://{{ .Values.service.cfn.name }}:{{ .Values.service.cfn.port }} -heat_waitcondition_server_url = {{ .Values.service.cfn.proto }}://{{ .Values.service.cfn.name }}:{{ .Values.service.cfn.port }}/v1/waitcondition -heat_watch_server_url = {{ .Values.service.cloudwatch.proto }}://{{ .Values.service.cloudwatch.name }}:{{ .Values.service.cloudwatch.port }} +heat_metadata_server_url = {{ tuple "cloudformation" "public" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | trimSuffix .Values.endpoints.cloudformation.path }} +heat_waitcondition_server_url = {{ tuple "cloudformation" "public" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }}/waitcondition +heat_watch_server_url = {{ tuple "cloudwatch" "public" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | trimSuffix "/" }} num_engine_workers = {{ .Values.resources.engine.workers }} @@ -47,7 +47,7 @@ max_retries = -1 signing_dir = "/var/cache/heat" memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" auth_version = v3 -auth_url = {{ include "helm-toolkit.endpoint_keystone_internal" . }} +auth_url = {{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} auth_type = password region_name = {{ .Values.keystone.heat_region_name }} project_domain_name = {{ .Values.keystone.heat_project_domain }} @@ -57,17 +57,17 @@ username = {{ .Values.keystone.heat_user }} password = {{ .Values.keystone.heat_password }} [heat_api] -bind_port = {{ .Values.service.api.port }} +bind_port = {{ .Values.network.api.port }} bind_host = 0.0.0.0 workers = {{ .Values.resources.api.workers }} [heat_api_cloudwatch] -bind_port = {{ .Values.service.cloudwatch.port }} +bind_port = {{ .Values.network.cloudwatch.port }} bind_host = 0.0.0.0 workers = {{ .Values.resources.cloudwatch.workers }} [heat_api_cfn] -bind_port = {{ .Values.service.cfn.port }} +bind_port = {{ .Values.network.cfn.port }} bind_host = 0.0.0.0 workers = {{ .Values.resources.cfn.workers }} @@ -88,9 +88,11 @@ auth_section = "trustee_keystone" signing_dir = "/var/cache/heat" memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" auth_version = v3 -auth_url = {{ include "helm-toolkit.endpoint_keystone_internal" . }} +auth_url = {{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} auth_type = password region_name = {{ .Values.keystone.heat_trustee_region_name }} +project_domain_name = {{ .Values.keystone.heat_trustee_project_domain }} +project_name = {{ .Values.keystone.heat_trustee_project_name }} user_domain_name = {{ .Values.keystone.heat_trustee_user_domain }} username = {{ .Values.keystone.heat_trustee_user }} password = {{ .Values.keystone.heat_trustee_password }} @@ -101,4 +103,4 @@ endpoint_type = internalURL [clients_keystone] endpoint_type = internalURL -auth_uri = {{ include "helm-toolkit.endpoint_keystone_internal" . }} +auth_uri = {{ tuple "identity" "internal" "api" . | include "endpoint_type_lookup_addr" }} diff --git a/heat/templates/job-ks-endpoints.yaml.yaml b/heat/templates/job-ks-endpoints.yaml.yaml index 9efbbff1..cda77c03 100644 --- a/heat/templates/job-ks-endpoints.yaml.yaml +++ b/heat/templates/job-ks-endpoints.yaml.yaml @@ -31,9 +31,9 @@ spec: nodeSelector: {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} containers: -{{- range $key1, $osServiceName := tuple "heat" "heat-cfn" }} +{{- range $key1, $osServiceType := tuple "orchestration" "cloudformation" }} {{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }} - - name: {{ $osServiceName }}-ks-endpoints-{{ $osServiceEndPoint }} + - name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }} image: {{ $envAll.Values.images.ks_endpoints }} imagePullPolicy: {{ $envAll.Values.images.pull_policy }} {{- if $envAll.Values.resources.enabled }} @@ -60,11 +60,11 @@ spec: - name: OS_SVC_ENDPOINT value: {{ $osServiceEndPoint }} - name: OS_SERVICE_NAME - value: {{ $osServiceName }} + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.keystone_endpoint_name_lookup" }} - name: OS_SERVICE_TYPE - value: {{ tuple $osServiceName $envAll | include "helm-toolkit.endpoint_type_lookup" }} + value: {{ $osServiceType }} - name: OS_SERVICE_ENDPOINT - value: {{ tuple $osServiceName $osServiceEndPoint "api" $envAll | include "helm-toolkit.endpoint_uri_lookup" }} + value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "helm-toolkit.keystone_endpoint_uri_lookup" }} {{- end }} {{- end }} volumes: diff --git a/heat/templates/job-ks-service.yaml b/heat/templates/job-ks-service.yaml index 4d432154..5da519c1 100644 --- a/heat/templates/job-ks-service.yaml +++ b/heat/templates/job-ks-service.yaml @@ -31,8 +31,8 @@ spec: nodeSelector: {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} containers: -{{- range $key1, $osServiceName := tuple "heat" "heat-cfn" }} - - name: {{ $osServiceName }}-ks-service-registration +{{- range $key1, $osServiceType := tuple "orchestration" "cloudformation" }} + - name: {{ $osServiceType }}-ks-service-registration image: {{ $envAll.Values.images.ks_service }} imagePullPolicy: {{ $envAll.Values.images.pull_policy }} {{- if $envAll.Values.resources.enabled }} @@ -57,11 +57,10 @@ spec: {{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} {{- end }} - name: OS_SERVICE_NAME - value: {{ $osServiceName }} + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.keystone_endpoint_name_lookup" }} - name: OS_SERVICE_TYPE - value: {{ tuple $osServiceName $envAll | include "helm-toolkit.endpoint_type_lookup" }} + value: {{ $osServiceType }} {{- end }} - volumes: - name: ks-service-sh configMap: diff --git a/heat/templates/secret-keystone-admin.env.yaml b/heat/templates/secret-keystone-admin.env.yaml index 6e07f8b0..f8d07a4a 100644 --- a/heat/templates/secret-keystone-admin.env.yaml +++ b/heat/templates/secret-keystone-admin.env.yaml @@ -19,7 +19,7 @@ metadata: type: Opaque data: OS_AUTH_URL: | -{{ .Values.keystone.auth_url | b64enc | indent 4 }} +{{ tuple "identity" "admin" "admin" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} OS_REGION_NAME: | {{ .Values.keystone.admin_region_name | b64enc | indent 4 }} OS_PROJECT_DOMAIN_NAME: | diff --git a/heat/templates/secret-keystone-stack-user.env.yaml b/heat/templates/secret-keystone-stack-user.env.yaml index 07747e6a..b4c61030 100644 --- a/heat/templates/secret-keystone-stack-user.env.yaml +++ b/heat/templates/secret-keystone-stack-user.env.yaml @@ -18,6 +18,8 @@ metadata: name: {{ .Values.keystone_secrets.stack }} type: Opaque data: + OS_AUTH_URL: | +{{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} OS_REGION_NAME: | {{ .Values.keystone.heat_stack_region_name | b64enc | indent 4 }} OS_DOMAIN_NAME: | diff --git a/heat/templates/secret-keystone-trustee.env.yaml b/heat/templates/secret-keystone-trustee.env.yaml index 118d45f0..c020204d 100644 --- a/heat/templates/secret-keystone-trustee.env.yaml +++ b/heat/templates/secret-keystone-trustee.env.yaml @@ -19,7 +19,7 @@ metadata: type: Opaque data: OS_AUTH_URL: | -{{ .Values.keystone.auth_url | b64enc | indent 4 }} +{{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} OS_REGION_NAME: | {{ .Values.keystone.heat_trustee_region_name | b64enc | indent 4 }} OS_PROJECT_DOMAIN_NAME: | diff --git a/heat/templates/secret-keystone-user.env.yaml b/heat/templates/secret-keystone-user.env.yaml index 1d5978d7..96dbea14 100644 --- a/heat/templates/secret-keystone-user.env.yaml +++ b/heat/templates/secret-keystone-user.env.yaml @@ -19,7 +19,7 @@ metadata: type: Opaque data: OS_AUTH_URL: | -{{ .Values.keystone.auth_url | b64enc | indent 4 }} +{{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} OS_REGION_NAME: | {{ .Values.keystone.heat_region_name | b64enc | indent 4 }} OS_PROJECT_DOMAIN_NAME: | diff --git a/heat/templates/service-api.yaml b/heat/templates/service-api.yaml index 0529361d..efde9ca0 100644 --- a/heat/templates/service-api.yaml +++ b/heat/templates/service-api.yaml @@ -15,9 +15,16 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.service.api.name }} + name: heat-api spec: ports: - - port: {{ .Values.service.api.port }} + - port: {{ .Values.network.api.port }} + {{ if .Values.network.api.node_port.enabled }} + nodePort: {{ .Values.network.api.node_port.port }} + {{ end }} selector: app: heat-api + {{ if .Values.network.api.node_port.enabled }} + type: NodePort + {{ end }} + \ No newline at end of file diff --git a/heat/templates/service-cfn.yaml b/heat/templates/service-cfn.yaml index 37d722fc..2620d07a 100644 --- a/heat/templates/service-cfn.yaml +++ b/heat/templates/service-cfn.yaml @@ -15,9 +15,16 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.service.cfn.name }} + name: heat-cfn spec: ports: - - port: {{ .Values.service.cfn.port }} + - port: {{ .Values.network.cfn.port }} + {{ if .Values.network.cfn.node_port.enabled }} + nodePort: {{ .Values.network.cfn.node_port.port }} + {{ end }} selector: app: heat-cfn + {{ if .Values.network.cfn.node_port.enabled }} + type: NodePort + {{ end }} + \ No newline at end of file diff --git a/heat/templates/service-cloudwatch.yaml b/heat/templates/service-cloudwatch.yaml index 5f7912c7..8f2c75dd 100644 --- a/heat/templates/service-cloudwatch.yaml +++ b/heat/templates/service-cloudwatch.yaml @@ -15,9 +15,16 @@ apiVersion: v1 kind: Service metadata: - name: {{ .Values.service.cloudwatch.name }} + name: heat-cloudwatch spec: ports: - - port: {{ .Values.service.cloudwatch.port }} + - port: {{ .Values.network.cloudwatch.port }} + {{ if .Values.network.cloudwatch.node_port.enabled }} + nodePort: {{ .Values.network.cloudwatch.node_port.port }} + {{ end }} selector: app: heat-cloudwatch + {{ if .Values.network.cloudwatch.node_port.enabled }} + type: NodePort + {{ end }} + \ No newline at end of file diff --git a/heat/values.yaml b/heat/values.yaml index 662b719e..61ff920e 100644 --- a/heat/values.yaml +++ b/heat/values.yaml @@ -29,7 +29,7 @@ labels: node_selector_value: enabled images: - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton db_sync: docker.io/kolla/ubuntu-source-heat-api:3.0.1 ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton @@ -48,8 +48,6 @@ keystone_secrets: stack: "heat-env-keystone-stack-user" keystone: - auth_uri: "http://keystone-api:5000" - auth_url: "http://keystone-api:35357" admin_user: "admin" admin_user_domain: "default" admin_password: "password" @@ -79,19 +77,28 @@ keystone: heat_stack_password: "password" heat_stack_region_name: "RegionOne" -service: +network: api: name: "heat-api" port: 8004 proto: "http" + node_port: + enabled: false + port: 30004 cfn: name: "heat-cfn" port: 8000 proto: "http" + node_port: + enabled: false + port: 30800 cloudwatch: name: "heat-cloudwatch" port: 8003 proto: "http" + node_port: + enabled: false + port: 30003 database: address: mariadb @@ -176,33 +183,34 @@ dependencies: # values, but should include all endpoints # required by this chart endpoints: - keystone: + identity: + name: keystone hosts: default: keystone-api path: /v3 - type: identity scheme: 'http' port: - admin: 35357 - public: 5000 - heat: + admin: 35357 + api: 5000 + orchestration: + name: heat hosts: default: heat-api path: '/v1/%(project_id)s' - type: orchestration scheme: 'http' port: api: 8004 - heat_cfn: + cloudformation: + name: heat-cfn hosts: default: heat-cfn path: /v1 - type: cloudformation scheme: 'http' port: api: 8000 # Cloudwatch does not get an entry in the keystone service catalog - heat_cloudwatch: + cloudwatch: + name: heat-cloudwatch hosts: default: heat-cloudwatch path: null diff --git a/helm-toolkit/templates/_endpoints.tpl b/helm-toolkit/templates/_endpoints.tpl index 015670dd..d3bbf97f 100644 --- a/helm-toolkit/templates/_endpoints.tpl +++ b/helm-toolkit/templates/_endpoints.tpl @@ -16,116 +16,6 @@ # endpoints #----------------------------------------- -# this should be a generic function leveraging a tuple -# for input, e.g. { endpoint keystone internal . } -# however, constructing this appears to be a -# herculean effort in gotpl - -{{- define "helm-toolkit.endpoint_keystone_internal" -}} -{{- $fqdn := .Release.Namespace -}} -{{- if .Values.endpoints.fqdn -}} -{{- $fqdn := .Values.endpoints.fqdn -}} -{{- end -}} -{{- with .Values.endpoints.keystone -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.public}}{{.path}} -{{- end -}} -{{- end -}} - -{{- define "helm-toolkit.endpoint_keystone_admin" -}} -{{- $fqdn := .Release.Namespace -}} -{{- if .Values.endpoints.fqdn -}} -{{- $fqdn := .Values.endpoints.fqdn -}} -{{- end -}} -{{- with .Values.endpoints.keystone -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.admin}}{{.path}} -{{- end -}} -{{- end -}} - -{{- define "helm-toolkit.endpoint_nova_api_internal" -}} -{{- $fqdn := .Release.Namespace -}} -{{- if .Values.endpoints.fqdn -}} -{{- $fqdn := .Values.endpoints.fqdn -}} -{{- end -}} -{{- with .Values.endpoints.nova -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}} -{{- end -}} -{{- end -}} - -{{- define "helm-toolkit.endpoint_nova_metadata_internal" -}} -{{- $fqdn := .Release.Namespace -}} -{{- if .Values.endpoints.fqdn -}} -{{- $fqdn := .Values.endpoints.fqdn -}} -{{- end -}} -{{- with .Values.endpoints.nova -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.metadata}}{{.path}} -{{- end -}} -{{- end -}} - -{{- define "helm-toolkit.endpoint_nova_novncproxy_internal" -}} -{{- $fqdn := .Release.Namespace -}} -{{- if .Values.endpoints.fqdn -}} -{{- $fqdn := .Values.endpoints.fqdn -}} -{{- end -}} -{{- with .Values.endpoints.nova -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.novncproxy}}{{.path}} -{{- end -}} -{{- end -}} - -{{- define "helm-toolkit.endpoint_glance_api_internal" -}} -{{- $fqdn := .Release.Namespace -}} -{{- if .Values.endpoints.fqdn -}} -{{- $fqdn := .Values.endpoints.fqdn -}} -{{- end -}} -{{- with .Values.endpoints.glance -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}} -{{- end -}} -{{- end -}} - -{{- define "helm-toolkit.endpoint_glance_registry_internal" -}} -{{- $fqdn := .Release.Namespace -}} -{{- if .Values.endpoints.fqdn -}} -{{- $fqdn := .Values.endpoints.fqdn -}} -{{- end -}} -{{- with .Values.endpoints.glance -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.registry}}{{.path}} -{{- end -}} -{{- end -}} - -{{- define "helm-toolkit.endpoint_neutron_api_internal" -}} -{{- $fqdn := .Release.Namespace -}} -{{- if .Values.endpoints.fqdn -}} -{{- $fqdn := .Values.endpoints.fqdn -}} -{{- end -}} -{{- with .Values.endpoints.neutron -}} - {{.scheme}}://{{.hosts.internal | default .hosts.default}}.{{ $fqdn }}:{{.port.api}}{{.path}} -{{- end -}} -{{- end -}} - -# this function returns the endpoint uri for a service, it takes an tuple -# input in the form: service-name, endpoint-class, port-name. eg: -# { tuple "heat" "public" "api" . | include "helm-toolkit.endpoint_uri_lookup" } -# will return the appropriate URI. Once merged this should phase out the above. - -{{- define "helm-toolkit.endpoint_uri_lookup" -}} -{{- $name := index . 0 -}} -{{- $endpoint := index . 1 -}} -{{- $port := index . 2 -}} -{{- $context := index . 3 -}} -{{- $nameNorm := $name | replace "-" "_" }} -{{- $endpointMap := index $context.Values.endpoints $nameNorm }} -{{- $fqdn := $context.Release.Namespace -}} -{{- if $context.Values.endpoints.fqdn -}} -{{- $fqdn := $context.Values.endpoints.fqdn -}} -{{- end -}} -{{- with $endpointMap -}} -{{- $endpointScheme := .scheme }} -{{- $endpointHost := index .hosts $endpoint | default .hosts.default}} -{{- $endpointPort := index .port $port | default .port.default }} -{{- $endpointPath := .path }} -{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath | quote -}} -{{- end -}} -{{- end -}} - # this function returns the endpoint uri for a service, it takes an tuple # input in the form: service-type, endpoint-class, port-name. eg: # { tuple "orchestration" "public" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" } @@ -213,25 +103,6 @@ {{- end -}} {{- end -}} -#------------------------------- -# endpoint type lookup -#------------------------------- - -# this function is used in endpoint management templates -# it returns the service type for an openstack service eg: -# { tuple heat . | include "ks_endpoint_type" } -# will return "orchestration" - -{{- define "helm-toolkit.endpoint_type_lookup" -}} -{{- $name := index . 0 -}} -{{- $context := index . 1 -}} -{{- $nameNorm := $name | replace "-" "_" }} -{{- $endpointMap := index $context.Values.endpoints $nameNorm }} -{{- $endpointType := index $endpointMap "type" }} -{{- $endpointType | quote -}} -{{- end -}} - - #------------------------------- # endpoint name lookup #------------------------------- diff --git a/helm-toolkit/templates/_hosts.tpl b/helm-toolkit/templates/_hosts.tpl index b2a41cd4..502593ca 100644 --- a/helm-toolkit/templates/_hosts.tpl +++ b/helm-toolkit/templates/_hosts.tpl @@ -29,7 +29,7 @@ #----------------------------------------- # infrastructure services -{{- define "helm-toolkit.rabbitmq_host"}}memcached.{{.Release.Namespace}}.svc.{{ include "helm-toolkit.region" . }}.{{ include "helm-toolkit.tld" . }}{{- end}} +{{- define "helm-toolkit.rabbitmq_host"}}rabbitmq.{{.Release.Namespace}}.svc.{{ include "helm-toolkit.region" . }}.{{ include "helm-toolkit.tld" . }}{{- end}} {{- define "helm-toolkit.mariadb_host"}}mariadb.{{.Release.Namespace}}.svc.{{ include "helm-toolkit.region" . }}.{{ include "helm-toolkit.tld" . }}{{- end}} {{- define "helm-toolkit.postgresql_host"}}postgresql.{{.Release.Namespace}}.svc.{{ include "helm-toolkit.region" . }}.{{ include "helm-toolkit.tld" . }}{{- end}} diff --git a/horizon/templates/etc/_local_settings.tpl b/horizon/templates/etc/_local_settings.tpl index ead052ec..0becfebb 100644 --- a/horizon/templates/etc/_local_settings.tpl +++ b/horizon/templates/etc/_local_settings.tpl @@ -144,7 +144,7 @@ SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION': '{{ include "helm-toolkit.rabbitmq_host" . }}' + 'LOCATION': '{{ .Values.memcached.host }}:{{ .Values.memcached.port }}' } } @@ -165,7 +165,7 @@ EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # ('http://cluster2.example.com:5000/v2.0', 'cluster2'), #] -OPENSTACK_KEYSTONE_URL = "{{ include "helm-toolkit.endpoint_keystone_internal" . }}" +OPENSTACK_KEYSTONE_URL = "{{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }}" OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_" # Enables keystone web single-sign-on if set to True. diff --git a/horizon/values.yaml b/horizon/values.yaml index af3ee684..9bf251ef 100644 --- a/horizon/values.yaml +++ b/horizon/values.yaml @@ -20,7 +20,7 @@ replicas: 1 images: - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 horizon: quay.io/stackanetes/stackanetes-horizon:newton pull_policy: "IfNotPresent" @@ -40,6 +40,10 @@ network: node_port: 30000 enable_node_port: false +memcached: + host: memcached + port: 11211 + local_settings: horizon_secret_key: 9aee62c0-5253-4a86-b189-e0fb71fa503c debug: "True" @@ -60,17 +64,16 @@ resources: memory: "128Mi" cpu: "500m" -# mits:typically overriden by environmental +# typically overriden by environmental # values, but should include all endpoints # required by this chart endpoints: - keystone: + identity: + name: keystone hosts: default: keystone-api - path: /v2.0 - type: identity + path: /v3 scheme: 'http' port: - admin: 35357 - public: 5000 - + admin: 35357 + api: 5000 diff --git a/keystone/templates/etc/_keystone.conf.tpl b/keystone/templates/etc/_keystone.conf.tpl index 499f3485..bac37db4 100644 --- a/keystone/templates/etc/_keystone.conf.tpl +++ b/keystone/templates/etc/_keystone.conf.tpl @@ -135,6 +135,7 @@ # From keystone # +<<<<<<< HEAD # Using this feature is *NOT* recommended. Instead, use the `keystone-manage # bootstrap` command. The value of this option is treated as a "shared secret" # that can be used to bootstrap Keystone through the API. This "token" does not @@ -715,9 +716,6 @@ # from .auth.keystone.oauth1 {{ if not .auth.keystone.oauth1 }}#{{ end }}oauth1 = {{ .auth.keystone.oauth1 | default "" }} - -[cache] - # # From oslo.cache # @@ -3680,3 +3678,4 @@ {{- end -}} + diff --git a/keystone/values.yaml b/keystone/values.yaml index 53f0c82a..a9a1edad 100644 --- a/keystone/values.yaml +++ b/keystone/values.yaml @@ -27,7 +27,7 @@ images: db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton db_sync: quay.io/stackanetes/stackanetes-keystone-api:newton api: quay.io/stackanetes/stackanetes-keystone-api:newton - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 pull_policy: "IfNotPresent" upgrades: diff --git a/maas/values.yaml b/maas/values.yaml index 5c7f8e92..35d76443 100644 --- a/maas/values.yaml +++ b/maas/values.yaml @@ -39,7 +39,7 @@ dependencies: images: maas_region: quay.io/attcomdev/maas-region:2.1.2-2 maas_rack: quay.io/attcomdev/maas-rack:2.1.2-2 - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 pull_policy: Always jobs: diff --git a/neutron/templates/bin/_post.sh.tpl b/neutron/templates/bin/_post.sh.tpl deleted file mode 100644 index 2bf6eb6c..00000000 --- a/neutron/templates/bin/_post.sh.tpl +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Openstack-Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ex -export HOME=/tmp - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \ -service_type=network \ -description='Openstack Networking' \ -endpoint_region={{ .Values.keystone.neutron_region_name }} \ -url='{{ include "helm-toolkit.endpoint_neutron_api_internal" . }}' \ -interface=admin \ -region_name={{ .Values.keystone.admin_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" .}}'" \ --e "{'openstack_neutron_auth':{{ include "helm-toolkit.keystone_auth" .}}}" - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \ -service_type=network \ -description='Openstack Networking' \ -endpoint_region={{ .Values.keystone.neutron_region_name }} \ -url='{{ include "helm-toolkit.endpoint_neutron_api_internal" . }}' \ -interface=internal \ -region_name={{ .Values.keystone.admin_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" .}}'" \ --e "{'openstack_neutron_auth':{{ include "helm-toolkit.keystone_auth" .}}}" - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \ -service_type=network \ -description='Openstack Networking' \ -endpoint_region={{ .Values.keystone.neutron_region_name }} \ -url='{{ include "helm-toolkit.endpoint_neutron_api_internal" . }}' \ -interface=public \ -region_name={{ .Values.keystone.admin_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" .}}'" \ --e "{'openstack_neutron_auth':{{ include "helm-toolkit.keystone_auth" .}}}" - -ansible localhost -vvv -m kolla_keystone_user -a "project=service \ -user={{ .Values.keystone.neutron_user }} \ -password={{ .Values.keystone.neutron_password }} \ -role=admin \ -region_name={{ .Values.keystone.neutron_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" .}}'" \ --e "{'openstack_neutron_auth':{{ include "helm-toolkit.keystone_auth" .}}}" diff --git a/neutron/templates/configmap-bin.yaml b/neutron/templates/configmap-bin.yaml index fe94baa3..5db74eb6 100644 --- a/neutron/templates/configmap-bin.yaml +++ b/neutron/templates/configmap-bin.yaml @@ -17,6 +17,12 @@ kind: ConfigMap metadata: name: neutron-bin data: + ks-service.sh: |+ +{{- include "helm-toolkit.keystone_service" . | indent 4 }} + ks-endpoints.sh: |+ +{{- include "helm-toolkit.keystone_endpoints" . | indent 4 }} + ks-user.sh: |+ +{{- include "helm-toolkit.keystone_user" . | indent 4 }} init.sh: | {{ tuple "bin/_init.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} neutron-openvswitch-agent.sh: | @@ -27,5 +33,3 @@ data: {{ tuple "bin/_openvswitch-ensure-configured.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} openvswitch-vswitchd.sh: | {{ tuple "bin/_openvswitch-vswitchd.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} - post.sh: | -{{ tuple "bin/_post.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} diff --git a/neutron/templates/etc/_metadata-agent.ini.tpl b/neutron/templates/etc/_metadata-agent.ini.tpl index 2ba41194..ef243c81 100644 --- a/neutron/templates/etc/_metadata-agent.ini.tpl +++ b/neutron/templates/etc/_metadata-agent.ini.tpl @@ -16,16 +16,14 @@ debug = {{ .Values.metadata_agent.default.debug }} # Neutron credentials for API access +auth_url = {{ tuple "identity" "admin" "admin" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} auth_plugin = password -auth_url = {{ include "helm-toolkit.endpoint_keystone_admin" . }} -auth_uri = {{ include "helm-toolkit.endpoint_keystone_internal" . }} auth_region = {{ .Values.keystone.neutron_region_name }} -admin_tenant_name = service -project_domain_id = default -user_domain_id = default -project_name = service -username = {{ .Values.keystone.admin_user }} -password = {{ .Values.keystone.admin_password }} +project_domain_name = {{ .Values.keystone.neutron_project_domain }} +project_name = {{ .Values.keystone.neutron_project_name }} +user_domain_name = {{ .Values.keystone.neutron_user_domain }} +username = {{ .Values.keystone.neutron_user }} +password = {{ .Values.keystone.neutron_password }} endpoint_type = adminURL # Nova metadata service IP and port diff --git a/neutron/templates/etc/_neutron.conf.tpl b/neutron/templates/etc/_neutron.conf.tpl index 716705f0..4adcf328 100644 --- a/neutron/templates/etc/_neutron.conf.tpl +++ b/neutron/templates/etc/_neutron.conf.tpl @@ -47,13 +47,14 @@ router_auto_schedule = True transport_url = rabbit://{{ .Values.rabbitmq.admin_user }}:{{ .Values.rabbitmq.admin_password }}@{{ .Values.rabbitmq.address }}:{{ .Values.rabbitmq.port }} [nova] -auth_url = {{ include "helm-toolkit.endpoint_keystone_internal" . }} -auth_plugin = password -project_domain_id = default -user_domain_id = default -endpoint_type = internal +memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" +auth_version = v3 +auth_url = {{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} +auth_type = password region_name = {{ .Values.keystone.nova_region_name }} -project_name = service +project_domain_name = {{ .Values.keystone.nova_project_domain }} +project_name = {{ .Values.keystone.nova_project_name }} +user_domain_name = {{ .Values.keystone.nova_user_domain }} username = {{ .Values.keystone.nova_user }} password = {{ .Values.keystone.nova_password }} @@ -73,11 +74,14 @@ connection = mysql+pymysql://{{ .Values.database.neutron_user }}:{{ .Values.data max_retries = -1 [keystone_authtoken] -auth_url = {{ include "helm-toolkit.endpoint_keystone_internal" . }} +memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" +auth_version = v3 +auth_url = {{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service +region_name = {{ .Values.keystone.neutron_region_name }} +project_domain_name = {{ .Values.keystone.neutron_project_domain }} +project_name = {{ .Values.keystone.neutron_project_name }} +user_domain_name = {{ .Values.keystone.neutron_user_domain }} username = {{ .Values.keystone.neutron_user }} password = {{ .Values.keystone.neutron_password }} diff --git a/neutron/templates/job-ks-endpoints.yaml b/neutron/templates/job-ks-endpoints.yaml new file mode 100644 index 00000000..fae3ea3c --- /dev/null +++ b/neutron/templates/job-ks-endpoints.yaml @@ -0,0 +1,73 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $ksAdminSecret := $envAll.Values.keystone.admin_secret | default "neutron-env-keystone-admin" }} +{{- $dependencies := .Values.dependencies.ks_endpoints }} +apiVersion: batch/v1 +kind: Job +metadata: + name: neutron-ks-endpoints +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "network" }} +{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }} + - name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }} + image: {{ $envAll.Values.images.ks_endpoints }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + {{- if $envAll.Values.resources.enabled }} + resources: + requests: + memory: {{ $envAll.Values.resources.neutron_ks_endpoints.requests.memory | quote }} + cpu: {{ $envAll.Values.resources.neutron_ks_endpoints.requests.cpu | quote }} + limits: + memory: {{ $envAll.Values.resources.neutron_ks_endpoints.limits.memory | quote }} + cpu: {{ $envAll.Values.resources.neutron_ks_endpoints.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-endpoints.sh + volumeMounts: + - name: ks-endpoints-sh + mountPath: /tmp/ks-endpoints.sh + subPath: ks-endpoints.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SVC_ENDPOINT + value: {{ $osServiceEndPoint }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.keystone_endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} + - name: OS_SERVICE_ENDPOINT + value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "helm-toolkit.keystone_endpoint_uri_lookup" }} +{{- end }} +{{- end }} + volumes: + - name: ks-endpoints-sh + configMap: + name: neutron-bin diff --git a/neutron/templates/job-ks-service.yaml b/neutron/templates/job-ks-service.yaml new file mode 100644 index 00000000..c013dce6 --- /dev/null +++ b/neutron/templates/job-ks-service.yaml @@ -0,0 +1,67 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "neutron-env-keystone-admin" }} +{{- $dependencies := .Values.dependencies.ks_service }} +apiVersion: batch/v1 +kind: Job +metadata: + name: neutron-ks-service +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "network" }} + - name: {{ $osServiceType }}-ks-service-registration + image: {{ $envAll.Values.images.ks_service }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + {{- if $envAll.Values.resources.enabled }} + resources: + requests: + memory: {{ $envAll.Values.resources.neutron_ks_service.requests.memory | quote }} + cpu: {{ $envAll.Values.resources.neutron_ks_service.requests.cpu | quote }} + limits: + memory: {{ $envAll.Values.resources.neutron_ks_service.limits.memory | quote }} + cpu: {{ $envAll.Values.resources.neutron_ks_service.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-service.sh + volumeMounts: + - name: ks-service-sh + mountPath: /tmp/ks-service.sh + subPath: ks-service.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.keystone_endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} +{{- end }} + volumes: + - name: ks-service-sh + configMap: + name: neutron-bin diff --git a/neutron/templates/job-post.yaml b/neutron/templates/job-ks-user.yaml similarity index 51% rename from neutron/templates/job-post.yaml rename to neutron/templates/job-ks-user.yaml index a8c23e31..71983450 100644 --- a/neutron/templates/job-post.yaml +++ b/neutron/templates/job-ks-user.yaml @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "neutron-env-keystone-admin" }} +{{- $ksUserSecret := .Values.keystone.user_secret | default "neutron-env-keystone-user" }} {{- $envAll := . }} -{{- $dependencies := .Values.dependencies.db_sync }} +{{- $dependencies := .Values.dependencies.ks_user }} apiVersion: batch/v1 kind: Job metadata: - name: neutron-post + name: neutron-ks-user spec: template: metadata: @@ -30,29 +32,38 @@ spec: nodeSelector: {{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }} containers: - - name: neutron-post - image: {{ .Values.images.post }} + - name: neutron-ks-user + image: {{ .Values.images.ks_user }} imagePullPolicy: {{ .Values.images.pull_policy }} - command: - - bash - - /tmp/post.sh {{- if .Values.resources.enabled }} resources: - limits: - cpu: {{ .Values.resources.jobs.post.limits.cpu | quote }} - memory: {{ .Values.resources.jobs.post.limits.memory | quote }} requests: - cpu: {{ .Values.resources.jobs.post.requests.cpu | quote }} - memory: {{ .Values.resources.jobs.post.requests.memory | quote }} + memory: {{ .Values.resources.neutron_ks_user.requests.memory | quote }} + cpu: {{ .Values.resources.neutron_ks_user.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.neutron_ks_user.limits.memory | quote }} + cpu: {{ .Values.resources.neutron_ks_user.limits.cpu | quote }} {{- end }} - env: - - name: ANSIBLE_LIBRARY - value: /usr/share/ansible/ + command: + - bash + - /tmp/ks-user.sh volumeMounts: - - name: postsh - mountPath: /tmp/post.sh - subPath: post.sh + - name: ks-user-sh + mountPath: /tmp/ks-user.sh + subPath: ks-user.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "neutron" +{{- with $env := dict "ksUserSecret" $ksUserSecret }} +{{- include "helm-toolkit.keystone_user_create_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_ROLE + value: {{ .Values.keystone.neutron_user_role | quote }} volumes: - - name: postsh + - name: ks-user-sh configMap: name: neutron-bin diff --git a/neutron/templates/secret-keystone-admin.env.yaml b/neutron/templates/secret-keystone-admin.env.yaml new file mode 100644 index 00000000..4cee2c87 --- /dev/null +++ b/neutron/templates/secret-keystone-admin.env.yaml @@ -0,0 +1,34 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: neutron-env-keystone-admin +type: Opaque +data: + OS_AUTH_URL: | +{{ tuple "identity" "admin" "admin" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.admin_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.admin_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.admin_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.admin_password | b64enc | indent 4 }} diff --git a/neutron/templates/secret-keystone-user.env.yaml b/neutron/templates/secret-keystone-user.env.yaml new file mode 100644 index 00000000..3faafe10 --- /dev/null +++ b/neutron/templates/secret-keystone-user.env.yaml @@ -0,0 +1,34 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: neutron-env-keystone-user +type: Opaque +data: + OS_AUTH_URL: | +{{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.neutron_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.neutron_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.neutron_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.neutron_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.neutron_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.neutron_password | b64enc | indent 4 }} diff --git a/neutron/values.yaml b/neutron/values.yaml index 6dd479c7..b11edcdb 100644 --- a/neutron/values.yaml +++ b/neutron/values.yaml @@ -23,6 +23,9 @@ replicas: images: db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton db_sync: quay.io/stackanetes/stackanetes-neutron-server:newton + ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton server: quay.io/stackanetes/stackanetes-neutron-server:newton dhcp: quay.io/stackanetes/stackanetes-neutron-dhcp-agent:newton metadata: quay.io/stackanetes/stackanetes-neutron-metadata-agent:newton @@ -30,8 +33,7 @@ images: neutron_openvswitch_agent: quay.io/stackanetes/stackanetes-neutron-openvswitch-agent:newton openvswitch_db_server: quay.io/attcomdev/openvswitch-vswitchd:latest openvswitch_vswitchd: quay.io/attcomdev/openvswitch-vswitchd:latest - post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 pull_policy: "IfNotPresent" upgrades: @@ -81,7 +83,8 @@ network: metadata: 8775 memcached: - address: "memcached:11211" + host: memcached + port: 11211 rabbitmq: address: rabbitmq @@ -91,18 +94,26 @@ rabbitmq: keystone: admin_user: "admin" + admin_user_domain: "default" admin_password: "password" admin_project_name: "admin" + admin_project_domain: "default" admin_region_name: "RegionOne" - domain_name: "default" - tenant_name: "admin" neutron_user: "neutron" + neutron_user_domain: "default" + neutron_user_role: "admin" neutron_password: "password" + neutron_project_name: "service" + neutron_project_domain: "default" neutron_region_name: "RegionOne" nova_user: "nova" + nova_user_domain: "default" + nova_user_role: "admin" nova_password: "password" + nova_project_name: "service" + nova_project_domain: "default" nova_region_name: "RegionOne" database: @@ -151,9 +162,32 @@ ml2: - "physnet1:br-physnet1" dependencies: + db_init: + jobs: + - mariadb-seed + service: + - mariadb + db_sync: + jobs: + - neutron-db-init + service: + - mariadb + ks_user: + service: + - keystone-api + ks_service: + service: + - keystone-api + ks_endpoints: + jobs: + - neutron-ks-service + service: + - keystone-api server: jobs: - neutron-db-sync + - neutron-ks-user + - neutron-ks-endpoints service: - rabbitmq - mariadb @@ -161,57 +195,29 @@ dependencies: - memcached dhcp: service: - - neutron-server - rabbitmq + - neutron-server - nova-api - jobs: - - neutron-db-init - - nova-post daemonset: - ovs-agent metadata: service: - rabbitmq + - neutron-server - nova-api - jobs: - - neutron-db-init - - nova-post daemonset: - ovs-agent ovs_agent: - jobs: - - neutron-post - - nova-post service: - - keystone-api - rabbitmq - neutron-server l3: service: - - neutron-server - rabbitmq + - neutron-server - nova-api - jobs: - - nova-db-init - - neutron-db-init - - nova-post daemonset: - ovs-agent - db_sync: - jobs: - - neutron-db-init - service: - - mariadb - db_init: - jobs: - - mariadb-seed - service: - - mariadb - post: - service: - - keystone-api - jobs: - - neutron-db-sync resources: enabled: false @@ -293,39 +299,30 @@ resources: # values, but should include all endpoints # required by this chart endpoints: - glance: - hosts: - default: glance-api - type: image - path: null - scheme: 'http' - port: - api: 9292 - registry: 9191 - nova: + compute: + name: nova hosts: default: nova-api path: "/v2/%(tenant_id)s" - type: compute scheme: 'http' port: api: 8774 metadata: 8775 novncproxy: 6080 - keystone: + identity: + name: keystone hosts: default: keystone-api path: /v3 - type: identity scheme: 'http' port: - admin: 35357 - public: 5000 - neutron: + admin: 35357 + api: 5000 + network: + name: neutron hosts: default: neutron-server path: null - type: network scheme: 'http' port: api: 9696 diff --git a/nova/templates/bin/_post.sh.tpl b/nova/templates/bin/_post.sh.tpl deleted file mode 100644 index e20aaf8b..00000000 --- a/nova/templates/bin/_post.sh.tpl +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -# Copyright 2017 The Openstack-Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ex -export HOME=/tmp - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=nova \ -service_type=compute \ -description='Openstack Compute' \ -endpoint_region={{ .Values.keystone.nova_region_name }} \ -url='{{ include "helm-toolkit.endpoint_nova_api_internal" . }}' \ -interface=admin \ -region_name={{ .Values.keystone.admin_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" .}}'" \ --e "{'openstack_nova_auth':{{ include "helm-toolkit.keystone_auth" .}}}" - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=nova \ -service_type=compute \ -description='Openstack Compute' \ -endpoint_region={{ .Values.keystone.nova_region_name }} \ -url='{{ include "helm-toolkit.endpoint_nova_api_internal" . }}' \ -interface=internal \ -region_name={{ .Values.keystone.admin_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" .}}'" \ --e "{'openstack_nova_auth':{{ include "helm-toolkit.keystone_auth" .}}}" - -ansible localhost -vvv -m kolla_keystone_service -a "service_name=nova \ -service_type=compute \ -description='Openstack Compute' \ -endpoint_region={{ .Values.keystone.nova_region_name }} \ -url='{{ include "helm-toolkit.endpoint_nova_api_internal" . }}' \ -interface=public \ -region_name={{ .Values.keystone.admin_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" .}}'" \ --e "{'openstack_nova_auth':{{ include "helm-toolkit.keystone_auth" .}}}" - -ansible localhost -vvv -m kolla_keystone_user -a "project=service \ -user={{ .Values.keystone.nova_user }} \ -password={{ .Values.keystone.nova_password }} \ -role=admin \ -region_name={{ .Values.keystone.nova_region_name }} \ -auth='{{ include "helm-toolkit.keystone_auth" .}}'" \ --e "{'openstack_nova_auth':{{ include "helm-toolkit.keystone_auth" .}}}" - -cat </tmp/openrc -export OS_USERNAME={{.Values.keystone.admin_user}} -export OS_PASSWORD={{.Values.keystone.admin_password}} -export OS_PROJECT_DOMAIN_NAME={{.Values.keystone.domain_name}} -export OS_USER_DOMAIN_NAME={{.Values.keystone.domain_name}} -export OS_PROJECT_NAME={{.Values.keystone.admin_project_name}} -export OS_AUTH_URL={{include "helm-toolkit.endpoint_keystone_internal" .}} -export OS_AUTH_STRATEGY=keystone -export OS_REGION_NAME={{.Values.keystone.admin_region_name}} -export OS_INSECURE=1 -EOF - -. /tmp/openrc -env -openstack --debug role create _member_ --or-show diff --git a/nova/templates/configmap-bin.yaml b/nova/templates/configmap-bin.yaml index aa55edae..8235102a 100644 --- a/nova/templates/configmap-bin.yaml +++ b/nova/templates/configmap-bin.yaml @@ -19,9 +19,13 @@ metadata: data: db-sync.sh: | {{ tuple "bin/_db-sync.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} + ks-service.sh: |+ +{{- include "helm-toolkit.keystone_service" . | indent 4 }} + ks-endpoints.sh: |+ +{{- include "helm-toolkit.keystone_endpoints" . | indent 4 }} + ks-user.sh: |+ +{{- include "helm-toolkit.keystone_user" . | indent 4 }} init.sh: | {{ tuple "bin/_init.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} - post.sh: | -{{ tuple "bin/_post.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} libvirt.sh: | {{ tuple "bin/_libvirt.sh.tpl" . | include "helm-toolkit.template" | indent 4 }} diff --git a/nova/templates/etc/_nova.conf.tpl b/nova/templates/etc/_nova.conf.tpl index 845a39db..68c4dcf8 100644 --- a/nova/templates/etc/_nova.conf.tpl +++ b/nova/templates/etc/_nova.conf.tpl @@ -57,23 +57,26 @@ lock_path = /var/lib/nova/tmp workers = {{ .Values.nova.default.conductor_workers }} [glance] -api_servers = {{ include "helm-toolkit.endpoint_glance_api_internal" . }} +api_servers = {{ tuple "image" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} num_retries = 3 [cinder] catalog_info = volume:cinder:internalURL [neutron] -url = {{ include "helm-toolkit.endpoint_neutron_api_internal" . }} +url = {{ tuple "network" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} metadata_proxy_shared_secret = {{ .Values.neutron.metadata_secret }} service_metadata_proxy = True -auth_url = {{ include "helm-toolkit.endpoint_keystone_admin" . }} +memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" +auth_version = v3 +auth_url = {{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} auth_type = password -project_domain_name = default -user_domain_id = default -project_name = service +region_name = {{ .Values.keystone.neutron_region_name }} +project_domain_name = {{ .Values.keystone.neutron_project_domain }} +project_name = {{ .Values.keystone.neutron_project_name }} +user_domain_name = {{ .Values.keystone.neutron_user_domain }} username = {{ .Values.keystone.neutron_user }} password = {{ .Values.keystone.neutron_password }} @@ -86,12 +89,14 @@ connection = mysql+pymysql://{{ .Values.database.nova_user }}:{{ .Values.databas max_retries = -1 [keystone_authtoken] -auth_uri = {{ include "helm-toolkit.endpoint_keystone_internal" . }} -auth_url = {{ include "helm-toolkit.endpoint_keystone_admin" . }} +memcached_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" +auth_version = v3 +auth_url = {{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" }} auth_type = password -project_domain_id = default -user_domain_id = default -project_name = service +region_name = {{ .Values.keystone.nova_region_name }} +project_domain_name = {{ .Values.keystone.nova_project_domain }} +project_name = {{ .Values.keystone.nova_project_name }} +user_domain_name = {{ .Values.keystone.nova_user_domain }} username = {{ .Values.keystone.nova_user }} password = {{ .Values.keystone.nova_password }} @@ -116,7 +121,7 @@ compute = auto [cache] enabled = True backend = oslo_cache.memcache_pool -memcache_servers = {{ .Values.memcached.address }} +memcache_servers = "{{ .Values.memcached.host }}:{{ .Values.memcached.port }}" [wsgi] api_paste_config = /etc/nova/api-paste.ini diff --git a/nova/templates/job-db-init.yaml b/nova/templates/job-db-init.yaml index 94009c4d..7107b975 100644 --- a/nova/templates/job-db-init.yaml +++ b/nova/templates/job-db-init.yaml @@ -13,7 +13,7 @@ # limitations under the License. {{- $envAll := . }} -{{- $dependencies := .Values.dependencies.init }} +{{- $dependencies := .Values.dependencies.db_init }} apiVersion: batch/v1 kind: Job metadata: diff --git a/nova/templates/job-ks-endpoints.yaml b/nova/templates/job-ks-endpoints.yaml new file mode 100644 index 00000000..3852a0e9 --- /dev/null +++ b/nova/templates/job-ks-endpoints.yaml @@ -0,0 +1,73 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $ksAdminSecret := $envAll.Values.keystone.admin_secret | default "nova-env-keystone-admin" }} +{{- $dependencies := .Values.dependencies.ks_endpoints }} +apiVersion: batch/v1 +kind: Job +metadata: + name: nova-ks-endpoints +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.control_node_selector_key }}: {{ .Values.labels.control_node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "compute" }} +{{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }} + - name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }} + image: {{ $envAll.Values.images.ks_endpoints }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + {{- if $envAll.Values.resources.enabled }} + resources: + requests: + memory: {{ $envAll.Values.resources.nova_ks_endpoints.requests.memory | quote }} + cpu: {{ $envAll.Values.resources.nova_ks_endpoints.requests.cpu | quote }} + limits: + memory: {{ $envAll.Values.resources.nova_ks_endpoints.limits.memory | quote }} + cpu: {{ $envAll.Values.resources.nova_ks_endpoints.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-endpoints.sh + volumeMounts: + - name: ks-endpoints-sh + mountPath: /tmp/ks-endpoints.sh + subPath: ks-endpoints.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SVC_ENDPOINT + value: {{ $osServiceEndPoint }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.keystone_endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} + - name: OS_SERVICE_ENDPOINT + value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "helm-toolkit.keystone_endpoint_uri_lookup" }} +{{- end }} +{{- end }} + volumes: + - name: ks-endpoints-sh + configMap: + name: nova-bin diff --git a/nova/templates/job-ks-service.yaml b/nova/templates/job-ks-service.yaml new file mode 100644 index 00000000..a30cdc16 --- /dev/null +++ b/nova/templates/job-ks-service.yaml @@ -0,0 +1,67 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "nova-env-keystone-admin" }} +{{- $dependencies := .Values.dependencies.ks_service }} +apiVersion: batch/v1 +kind: Job +metadata: + name: nova-ks-service +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.control_node_selector_key }}: {{ .Values.labels.control_node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "compute" }} + - name: {{ $osServiceType }}-ks-service-registration + image: {{ $envAll.Values.images.ks_service }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + {{- if $envAll.Values.resources.enabled }} + resources: + requests: + memory: {{ $envAll.Values.resources.nova_ks_service.requests.memory | quote }} + cpu: {{ $envAll.Values.resources.nova_ks_service.requests.cpu | quote }} + limits: + memory: {{ $envAll.Values.resources.nova_ks_service.limits.memory | quote }} + cpu: {{ $envAll.Values.resources.nova_ks_service.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-service.sh + volumeMounts: + - name: ks-service-sh + mountPath: /tmp/ks-service.sh + subPath: ks-service.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.keystone_endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} +{{- end }} + volumes: + - name: ks-service-sh + configMap: + name: nova-bin diff --git a/nova/templates/job-post.yaml b/nova/templates/job-ks-user.yaml similarity index 51% rename from nova/templates/job-post.yaml rename to nova/templates/job-ks-user.yaml index ab160ca7..43c62851 100644 --- a/nova/templates/job-post.yaml +++ b/nova/templates/job-ks-user.yaml @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "nova-env-keystone-admin" }} +{{- $ksUserSecret := .Values.keystone.user_secret | default "nova-env-keystone-user" }} {{- $envAll := . }} -{{- $dependencies := .Values.dependencies.post }} +{{- $dependencies := .Values.dependencies.ks_user }} apiVersion: batch/v1 kind: Job metadata: - name: nova-post + name: nova-ks-user spec: template: metadata: @@ -30,35 +32,38 @@ spec: nodeSelector: {{ .Values.labels.control_node_selector_key }}: {{ .Values.labels.control_node_selector_value }} containers: - - name: nova-post - image: {{ .Values.images.post }} + - name: nova-ks-user + image: {{ .Values.images.ks_user }} imagePullPolicy: {{ .Values.images.pull_policy }} {{- if .Values.resources.enabled }} resources: requests: - memory: {{ .Values.resources.nova_post.requests.memory | quote }} - cpu: {{ .Values.resources.nova_post.requests.cpu | quote }} + memory: {{ .Values.resources.nova_ks_user.requests.memory | quote }} + cpu: {{ .Values.resources.nova_ks_user.requests.cpu | quote }} limits: - memory: {{ .Values.resources.nova_post.limits.memory | quote }} - cpu: {{ .Values.resources.nova_post.limits.cpu | quote }} + memory: {{ .Values.resources.nova_ks_user.limits.memory | quote }} + cpu: {{ .Values.resources.nova_ks_user.limits.cpu | quote }} {{- end }} command: - bash - - /tmp/post.sh - env: - - name: ANSIBLE_LIBRARY - value: /usr/share/ansible/ + - /tmp/ks-user.sh volumeMounts: - - name: novaconf - mountPath: /etc/nova/nova.conf - subPath: nova.conf - - name: nova-bin - mountPath: /tmp/post.sh - subPath: post.sh + - name: ks-user-sh + mountPath: /tmp/ks-user.sh + subPath: ks-user.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "nova" +{{- with $env := dict "ksUserSecret" $ksUserSecret }} +{{- include "helm-toolkit.keystone_user_create_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_ROLE + value: {{ .Values.keystone.nova_user_role | quote }} volumes: - - name: novaconf - configMap: - name: nova-etc - - name: nova-bin + - name: ks-user-sh configMap: name: nova-bin diff --git a/nova/templates/secret-keystone-admin.env.yaml b/nova/templates/secret-keystone-admin.env.yaml new file mode 100644 index 00000000..fc978bc7 --- /dev/null +++ b/nova/templates/secret-keystone-admin.env.yaml @@ -0,0 +1,34 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: nova-env-keystone-admin +type: Opaque +data: + OS_AUTH_URL: | +{{ tuple "identity" "admin" "admin" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.admin_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.admin_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.admin_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.admin_password | b64enc | indent 4 }} diff --git a/nova/templates/secret-keystone-user.env.yaml b/nova/templates/secret-keystone-user.env.yaml new file mode 100644 index 00000000..6bb58661 --- /dev/null +++ b/nova/templates/secret-keystone-user.env.yaml @@ -0,0 +1,34 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: nova-env-keystone-user +type: Opaque +data: + OS_AUTH_URL: | +{{ tuple "identity" "internal" "api" . | include "helm-toolkit.keystone_endpoint_uri_lookup" | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.nova_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.nova_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.nova_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.nova_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.nova_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.nova_password | b64enc | indent 4 }} diff --git a/nova/values.yaml b/nova/values.yaml index 981f114f..808ef434 100644 --- a/nova/values.yaml +++ b/nova/values.yaml @@ -29,6 +29,9 @@ compute_replicas: 1 images: db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton db_sync: quay.io/stackanetes/stackanetes-nova-api:newton + ks_user: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_service: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + ks_endpoints: quay.io/stackanetes/stackanetes-kolla-toolbox:newton api: quay.io/stackanetes/stackanetes-nova-api:newton conductor: quay.io/stackanetes/stackanetes-nova-conductor:newton scheduler: quay.io/stackanetes/stackanetes-nova-scheduler:newton @@ -36,8 +39,7 @@ images: consoleauth: quay.io/stackanetes/stackanetes-nova-consoleauth:newton compute: quay.io/stackanetes/stackanetes-nova-compute:newton libvirt: quay.io/stackanetes/stackanetes-nova-libvirt:newton - post: quay.io/stackanetes/stackanetes-kolla-toolbox:newton - dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 pull_policy: "IfNotPresent" upgrades: @@ -86,18 +88,26 @@ database: keystone: admin_user: "admin" + admin_user_domain: "default" admin_password: "password" admin_project_name: "admin" + admin_project_domain: "default" admin_region_name: "RegionOne" - domain_name: "default" - tenant_name: "admin" neutron_user: "neutron" + neutron_user_domain: "default" + neutron_user_role: "admin" neutron_password: "password" + neutron_project_name: "service" + neutron_project_domain: "default" neutron_region_name: "RegionOne" nova_user: "nova" + nova_user_domain: "default" + nova_user_role: "admin" nova_password: "password" + nova_project_name: "service" + nova_project_domain: "default" nova_region_name: "RegionOne" rabbitmq: @@ -118,47 +128,41 @@ neutron: metadata_secret: "password" memcached: - address: "memcached:11211" + host: memcached + port: 11211 dependencies: api: jobs: - - keystone-db-sync - - nova-db-init - nova-db-sync + - nova-ks-user + - nova-ks-endpoints service: - mariadb - db_sync: + db_init: jobs: - - nova-db-init - - keystone-db-init - mariadb-seed service: - mariadb db_sync: jobs: - nova-db-init - - keystone-db-init - - mariadb-seed - - keystone-db-sync service: - mariadb - post: - jobs: - - nova-db-init - - keystone-db-init - - mariadb-seed + ks_user: service: - - mariadb - keystone-api - init: - jobs: - - mariadb-seed + ks_service: service: - - mariadb + - keystone-api + ks_endpoints: + jobs: + - nova-ks-service + service: + - keystone-api compute: jobs: - - nova-post + - nova-db-sync service: - keystone-api - nova-api @@ -166,17 +170,12 @@ dependencies: - ovs-agent libvirt: jobs: - - nova-db-init - - nova-post - nova-db-sync service: - keystone-api - nova-api consoleauth: jobs: - - mariadb-seed - - keystone-db-sync - - nova-db-init - nova-db-sync service: - mariadb @@ -195,39 +194,39 @@ dependencies: # values, but should include all endpoints # required by this chart endpoints: - glance: - hosts: - default: glance-api - type: image - path: null - scheme: 'http' - port: - api: 9292 - registry: 9191 - nova: + compute: + name: nova hosts: default: nova-api path: "/v2/%(tenant_id)s" - type: compute scheme: 'http' port: api: 8774 metadata: 8775 novncproxy: 6080 - keystone: + identity: + name: keystone hosts: default: keystone-api path: /v3 - type: identity scheme: 'http' port: - admin: 35357 - public: 5000 - neutron: + api: 5000 + admin: 35357 + image: + name: glance + hosts: + default: glance-api + path: null + scheme: 'http' + port: + api: 9292 + registry: 9191 + network: + name: neutron hosts: default: neutron-server path: null - type: network scheme: 'http' port: api: 9696 diff --git a/openstack/.helmignore b/openstack/.helmignore deleted file mode 100644 index 302b9211..00000000 --- a/openstack/.helmignore +++ /dev/null @@ -1,27 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj - -bin/ -etc/ -patches/ -*.py -Makefile \ No newline at end of file diff --git a/openstack/Chart.yaml b/openstack/Chart.yaml deleted file mode 100755 index 19e9e7eb..00000000 --- a/openstack/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2017 The Openstack-Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -apiVersion: v1 -description: A Helm chart for Kubernetes -name: openstack -version: 0.1.0 diff --git a/openstack/requirements.yaml b/openstack/requirements.yaml deleted file mode 100644 index 0e3319d6..00000000 --- a/openstack/requirements.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2017 The Openstack-Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -dependencies: - - name: helm-toolkit - version: 0.1.0 - repository: http://localhost:8879/charts - - name: memcached - repository: http://localhost:8879/charts - version: 0.1.0 - - name: rabbitmq - repository: http://localhost:8879/charts - version: 0.1.0 - - name: mariadb - repository: http://localhost:8879/charts - version: 0.1.0 - - name: keystone - repository: http://localhost:8879/charts - version: 0.1.0 diff --git a/openstack/values.yaml b/openstack/values.yaml deleted file mode 100644 index d9aba7c6..00000000 --- a/openstack/values.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2017 The Openstack-Helm Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default values for openstack. -# This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value - -mariadb: - images: - mariadb: quay.io/stackanetes/stackanetes-mariadb:newton - volume: - size: 20Gi - labels: - control_node_label: openstack-control-plane - - images: - ceph_rbd_job: quay.io/attcomdev/ceph-daemon:latest - - labels: - control_node_label: openstack-control-plane diff --git a/rabbitmq/values.yaml b/rabbitmq/values.yaml index c353b815..adb02dbc 100644 --- a/rabbitmq/values.yaml +++ b/rabbitmq/values.yaml @@ -32,7 +32,7 @@ resources: labels: node_selector_key: openstack-control-plane node_selector_value: enabled - + upgrades: revision_history: 3 pod_replacement_strategy: RollingUpdate @@ -40,9 +40,9 @@ upgrades: max_unavailable: 1 max_surge: 3 auth: - default_user: openstack + default_user: rabbitmq default_pass: password - admin_user: rabbitmq + admin_user: admin admin_pass: password network: