From 285127daefa3b2c855196c29a1378dd659f311c8 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Tue, 14 Jun 2016 01:35:02 -0700 Subject: [PATCH] examples/bootkube: Update self-hosted Kubernetes deployment * scp kubeconfig to hosts rather than insecurely distributing credentials within Ignition configs. This is also easier than copy-pasting k8s secrets into machine metadata (slow). * Self-hosted Kubernetes machine configurations can be versioned without containing Kubernetes credentials * Use path-based activiation for the host kubelet * Update from Kubernetes v1.2.2 to v1.3.0-alpha.5_coreos.0. * Update host kubelet flags accordingly --- Documentation/bootkube.md | 17 +++++---- examples/groups/bootkube-install/node1.json | 3 -- examples/groups/bootkube-install/node2.json | 8 ++--- examples/groups/bootkube-install/node3.json | 8 ++--- examples/groups/bootkube/node1.json | 3 -- examples/groups/bootkube/node2.json | 8 ++--- examples/groups/bootkube/node3.json | 8 ++--- examples/ignition/bootkube-master.yaml | 38 +++++++++----------- examples/ignition/bootkube-worker.yaml | 39 +++++++++------------ 9 files changed, 58 insertions(+), 74 deletions(-) diff --git a/Documentation/bootkube.md b/Documentation/bootkube.md index 2a9d1e51..576ce3f0 100644 --- a/Documentation/bootkube.md +++ b/Documentation/bootkube.md @@ -5,7 +5,7 @@ The self-hosted Kubernetes examples provision a 3 node cluster with etcd, flanne ## Experimental -Self-hosted Kubernetes is under very active development by CoreOS. We're working on upstreaming the required Hyperkube patches. Be aware that a deployment with a single apiserver cannot tolerate its failure without intervention. We'll be improving this to allow CoreOS auto-updates. +Self-hosted Kubernetes is under very active development by CoreOS. We're working on upstreaming the required Hyperkube patches. Be aware that a deployment with a single apiserver cannot tolerate its failure. We'll be improving this to allow CoreOS auto-updates. ## Requirements @@ -34,14 +34,11 @@ Use the `bootkube` tool to render Kubernetes manifests and credentials into an ` bootkube render --asset-dir=assets --api-servers=https://172.15.0.21:443 --etcd-servers=http://172.15.0.21:2379 --api-server-alt-names=IP=172.15.0.21 -Copy the `certificate-authority-data`, `client-certificate-data`, and `client-key-data` from the generated `assets/auth/kubeconfig` file into each master group definition (e.g. under `examples/groups/bootkube` or `examples/groups/bootkube-install`). Also, add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys). +Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys). { "profile": "bootkube-worker", "metadata": { - "k8s_certificate_authority": "...", - "k8s_client_certificate": "...", - "k8s_client_key": "...", "ssh_authorized_keys": ["ssh-rsa pub-key-goes-here"] } } @@ -58,12 +55,18 @@ Create a network boot environment with `coreos/dnsmasq` and create VMs with `scr We're ready to use [bootkube](https://github.com/coreos/bootkube) to create a temporary control plane and bootstrap self-hosted Kubernetes cluster. This is a **one-time** procedure. -Copy the `bootkube` generated assets to any one of the master nodes. +Secure copy the `bootkube` generated assets to any one of the master nodes. scp -r assets core@172.15.0.21:/home/core/assets scp $(which bootkube) core@172.15.0.21:/home/core -Connect to that Kubernetes master node, +Secure copy the `kubeconfig` to `/etc/kuberentes/kubeconfig` on **every** node (repeat for 172.15.0.22, 172.15.0.23). + + scp assets/auth/kubeconfig core@172.15.0.21:/home/core/kubeconfig + ssh core@172.15.0.21 + sudo mv kubeconfig /etc/kubernetes/kubeconfig + +Connect to the Kubernetes master node, ssh core@172.15.0.21 diff --git a/examples/groups/bootkube-install/node1.json b/examples/groups/bootkube-install/node1.json index dcbdd958..157e55b6 100644 --- a/examples/groups/bootkube-install/node1.json +++ b/examples/groups/bootkube-install/node1.json @@ -14,9 +14,6 @@ "k8s_master_endpoint": "https://172.15.0.21:443", "k8s_pod_network": "10.2.0.0/16", "k8s_service_ip_range": "10.3.0.0/24", - "k8s_certificate_authority": "ADD ME", - "k8s_client_certificate": "ADD ME", - "k8s_client_key": "ADD ME", "k8s_etcd_endpoints": "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379", "networkd_address": "172.15.0.21/16", "networkd_dns": "172.15.0.3", diff --git a/examples/groups/bootkube-install/node2.json b/examples/groups/bootkube-install/node2.json index 36c766cc..0602dea0 100644 --- a/examples/groups/bootkube-install/node2.json +++ b/examples/groups/bootkube-install/node2.json @@ -14,11 +14,11 @@ "k8s_master_endpoint": "https://172.15.0.21:443", "k8s_pod_network": "10.2.0.0/16", "k8s_service_ip_range": "10.3.0.0/24", - "k8s_certificate_authority": "ADD ME", - "k8s_client_certificate": "ADD ME", - "k8s_client_key": "ADD ME", "networkd_address": "172.15.0.22/16", "networkd_dns": "172.15.0.3", - "networkd_gateway": "172.15.0.1" + "networkd_gateway": "172.15.0.1", + "ssh_authorized_keys": [ + "ADD ME" + ] } } diff --git a/examples/groups/bootkube-install/node3.json b/examples/groups/bootkube-install/node3.json index 4a552a5e..f2a9cc4a 100644 --- a/examples/groups/bootkube-install/node3.json +++ b/examples/groups/bootkube-install/node3.json @@ -14,11 +14,11 @@ "k8s_master_endpoint": "https://172.15.0.21:443", "k8s_pod_network": "10.2.0.0/16", "k8s_service_ip_range": "10.3.0.0/24", - "k8s_certificate_authority": "ADD ME", - "k8s_client_certificate": "ADD ME", - "k8s_client_key": "ADD ME", "networkd_address": "172.15.0.23/16", "networkd_dns": "172.15.0.3", - "networkd_gateway": "172.15.0.1" + "networkd_gateway": "172.15.0.1", + "ssh_authorized_keys": [ + "ADD ME" + ] } } diff --git a/examples/groups/bootkube/node1.json b/examples/groups/bootkube/node1.json index d6fc5438..e3815997 100644 --- a/examples/groups/bootkube/node1.json +++ b/examples/groups/bootkube/node1.json @@ -14,9 +14,6 @@ "k8s_master_endpoint": "https://172.15.0.21:443", "k8s_pod_network": "10.2.0.0/16", "k8s_service_ip_range": "10.3.0.0/24", - "k8s_certificate_authority": "ADD ME", - "k8s_client_certificate": "ADD ME", - "k8s_client_key": "ADD ME", "networkd_address": "172.15.0.21/16", "networkd_dns": "172.15.0.3", "networkd_gateway": "172.15.0.1", diff --git a/examples/groups/bootkube/node2.json b/examples/groups/bootkube/node2.json index 4f15b241..0acbd84f 100644 --- a/examples/groups/bootkube/node2.json +++ b/examples/groups/bootkube/node2.json @@ -13,12 +13,12 @@ "k8s_master_endpoint": "https://172.15.0.21:443", "k8s_pod_network": "10.2.0.0/16", "k8s_service_ip_range": "10.3.0.0/24", - "k8s_certificate_authority": "ADD ME", - "k8s_client_certificate": "ADD ME", - "k8s_client_key": "ADD ME", "networkd_address": "172.15.0.22/16", "networkd_dns": "172.15.0.3", "networkd_gateway": "172.15.0.1", - "pxe": "true" + "pxe": "true", + "ssh_authorized_keys": [ + "ADD ME" + ] } } diff --git a/examples/groups/bootkube/node3.json b/examples/groups/bootkube/node3.json index 723b6aea..105a5bd0 100644 --- a/examples/groups/bootkube/node3.json +++ b/examples/groups/bootkube/node3.json @@ -13,12 +13,12 @@ "k8s_master_endpoint": "https://172.15.0.21:443", "k8s_pod_network": "10.2.0.0/16", "k8s_service_ip_range": "10.3.0.0/24", - "k8s_certificate_authority": "ADD ME", - "k8s_client_certificate": "ADD ME", - "k8s_client_key": "ADD ME", "networkd_address": "172.15.0.23/16", "networkd_dns": "172.15.0.3", "networkd_gateway": "172.15.0.1", - "pxe": "true" + "pxe": "true", + "ssh_authorized_keys": [ + "ADD ME" + ] } } diff --git a/examples/ignition/bootkube-master.yaml b/examples/ignition/bootkube-master.yaml index 21e3506e..d656c6ed 100644 --- a/examples/ignition/bootkube-master.yaml +++ b/examples/ignition/bootkube-master.yaml @@ -30,22 +30,29 @@ systemd: [Unit] Requires=flanneld.service After=flanneld.service - - name: kubelet.service + - name: kubelet.path enable: true + contents: | + [Unit] + Description=Watch for kubeconfig + [Path] + PathExists=/etc/kubernetes/kubeconfig + [Install] + WantedBy=multi-user.target + - name: kubelet.service contents: | [Unit] Description=Kubelet via Hyperkube ACI Requires=flanneld.service After=flanneld.service [Service] - Environment=KUBELET_ACI=quay.io/aaron_levy/hyperkube - Environment=KUBELET_VERSION=v1.2.2_runonce.0 + Environment=KUBELET_ACI=quay.io/coreos/hyperkube + Environment=KUBELET_VERSION=v1.3.0-alpha.5_coreos.0 ExecStart=/usr/lib/coreos/kubelet-wrapper \ - --runonce \ - --runonce-timeout=60s \ --api-servers={{.k8s_master_endpoint}} \ --kubeconfig=/etc/kubernetes/kubeconfig \ --lock-file=/var/run/lock/kubelet.lock \ + --exit-on-lock-contention \ --allow-privileged \ --hostname-override={{.ipv4_address}} \ --node-labels=master=true \ @@ -77,25 +84,9 @@ storage: format: "ext4" {{end}} files: - - path: /etc/kubernetes/kubeconfig + - path: /etc/kubernetes/empty mode: 0644 contents: | - apiVersion: v1 - kind: Config - clusters: - - name: local - cluster: - server: {{.k8s_master_endpoint}} - certificate-authority-data: {{.k8s_certificate_authority}} - users: - - name: kubelet - user: - client-certificate-data: {{.k8s_client_certificate}} - client-key-data: {{.k8s_client_key}} - contexts: - - context: - cluster: local - user: kubelet - path: /opt/init-flannel mode: 0544 contents: | @@ -123,6 +114,8 @@ storage: fi } init_flannel + +{{ if not (index . "skip_networkd") }} networkd: units: - name: 10-static.network @@ -133,6 +126,7 @@ networkd: Gateway={{.networkd_gateway}} DNS={{.networkd_dns}} Address={{.networkd_address}} +{{end}} {{ if index . "ssh_authorized_keys" }} passwd: diff --git a/examples/ignition/bootkube-worker.yaml b/examples/ignition/bootkube-worker.yaml index fc1ad431..abc49ebf 100644 --- a/examples/ignition/bootkube-worker.yaml +++ b/examples/ignition/bootkube-worker.yaml @@ -25,29 +25,36 @@ systemd: [Unit] Requires=flanneld.service After=flanneld.service - - name: kubelet.service + - name: kubelet.path enable: true + contents: | + [Unit] + Description=Watch for kubeconfig + [Path] + PathExists=/etc/kubernetes/kubeconfig + [Install] + WantedBy=multi-user.target + - name: kubelet.service contents: | [Unit] Description=Kubelet via Hyperkube ACI Requires=flanneld.service After=flanneld.service [Service] - Environment=KUBELET_ACI=quay.io/aaron_levy/hyperkube - Environment=KUBELET_VERSION=v1.2.2_runonce.0 + Environment=KUBELET_ACI=quay.io/coreos/hyperkube + Environment=KUBELET_VERSION=v1.3.0-alpha.5_coreos.0 ExecStart=/usr/lib/coreos/kubelet-wrapper \ - --runonce \ - --runonce-timeout=60s \ --api-servers={{.k8s_master_endpoint}} \ --kubeconfig=/etc/kubernetes/kubeconfig \ --lock-file=/var/run/lock/kubelet.lock \ + --exit-on-lock-contention \ --allow-privileged \ --hostname-override={{.ipv4_address}} \ --minimum-container-ttl-duration=3m0s \ --cluster_dns={{.k8s_dns_service_ip}} \ --cluster_domain=cluster.local Restart=always - RestartSec=10 + RestartSec=5 [Install] WantedBy=multi-user.target @@ -71,26 +78,11 @@ storage: format: "ext4" {{end}} files: - - path: /etc/kubernetes/kubeconfig + - path: /etc/kubernetes/empty mode: 0644 contents: | - apiVersion: v1 - kind: Config - clusters: - - name: local - cluster: - server: {{.k8s_master_endpoint}} - certificate-authority-data: {{.k8s_certificate_authority}} - users: - - name: kubelet - user: - client-certificate-data: {{.k8s_client_certificate}} - client-key-data: {{.k8s_client_key}} - contexts: - - context: - cluster: local - user: kubelet +{{ if not (index . "skip_networkd") }} networkd: units: - name: 10-static.network @@ -101,6 +93,7 @@ networkd: Gateway={{.networkd_gateway}} DNS={{.networkd_dns}} Address={{.networkd_address}} +{{end}} {{ if index . "ssh_authorized_keys" }} passwd: