From 6692148b875764f607c50ba27d1bfaf524472422 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Wed, 12 Oct 2016 02:30:51 -0700 Subject: [PATCH] examples: Update self-hosted Kubernetes to v1.4.0 * Render with dghubble/bootkube fork which supports DNS * Start with bootkube v0.2.0 with rkt * Update on-host hyperkube to v1.4.0_coreos.0 --- CHANGES.md | 2 +- Documentation/bootkube.md | 52 +++++++++---------- Documentation/kubernetes.md | 6 +-- examples/README.md | 4 +- examples/groups/bootkube-install/install.json | 4 +- examples/groups/rktnetes-install/install.json | 2 +- examples/ignition/bootkube-controller.yaml | 4 +- examples/ignition/bootkube-worker.yaml | 2 +- examples/profiles/bootkube-controller.json | 4 +- examples/profiles/bootkube-worker.json | 4 +- 10 files changed, 42 insertions(+), 42 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index ae064398..c0be9f54 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -11,7 +11,7 @@ * Add Kubernetes example with rkt container runtime (i.e. rktnetes) * Upgrade Kubernetes v1.4.0 (static manifest) example clusters * Upgrade Kubernetes v1.4.0 (rktnetes) example clusters -* Upgrade Kubernetes v1.3.4 (self-hosted) example cluster +* Upgrade Kubernetes v1.4.0 (self-hosted) example cluster * Add etcd3 example cluster (PXE in-RAM or install to disk) * Use DNS names (instead of IPs) in example clusters (except bootkube) diff --git a/Documentation/bootkube.md b/Documentation/bootkube.md index 201770d6..52854c0c 100644 --- a/Documentation/bootkube.md +++ b/Documentation/bootkube.md @@ -1,11 +1,7 @@ # Self-Hosted Kubernetes -The self-hosted Kubernetes example provisions a 3 node Kubernetes v1.3.4 cluster with etcd, flannel, and a special "runonce" host Kublet. The CoreOS [bootkube](https://github.com/coreos/bootkube) tool is used to bootstrap kubelet, apiserver, scheduler, and controller-manager as pods, which can be managed via kubectl. `bootkube start` is run on any controller (i.e. master) to create a temporary control-plane and start Kubernetes components initially. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). - -## Experimental - -Self-hosted Kubernetes is under very active development by CoreOS. +The self-hosted Kubernetes example provisions a 3 node "self-hosted" Kubernetes v1.4.0 cluster. On-host kubelets wait for an apiserver to become reachable, then yield to kubelet pods scheduled via daemonset. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run on any controller to bootstrap a temporary apiserver which schedules control plane components as pods before exiting. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). ## Requirements @@ -15,7 +11,7 @@ Ensure that you've gone through the [bootcfg with rkt](getting-started-rkt.md) o * Create a network boot environment with `coreos/dnsmasq` * Create the example libvirt client VMs -Build and install the [fork of bootkube](https://github.com/dghubble/bootkube), which supports DNS names (needed until Kubernetes 1.4). +Build and install the [fork of bootkube](https://github.com/dghubble/bootkube), which supports DNS names. ## Examples @@ -28,7 +24,7 @@ The [examples](../examples) statically assign IP addresses to libvirt client VMs Download the CoreOS image assets referenced in the target [profile](../examples/profiles). - ./scripts/get-coreos alpha 1153.0.0 ./examples/assets + ./scripts/get-coreos beta 1185.1.0 ./examples/assets Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys). @@ -52,7 +48,7 @@ Client machines should boot and provision themselves. Local client VMs should ne ## bootkube -We're ready to use [bootkube](https://github.com/coreos/bootkube) to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster. +We're ready to use [bootkube](https://github.com/kubernetes-incubator/bootkube) to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster. Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every** node (i.e. 172.15.0.21-23 for metal0 or 172.17.0.21-23 for docker0). @@ -68,34 +64,38 @@ Secure copy the `bootkube` generated assets to any controller node and run `boot Watch the temporary control plane logs until the scheduled kubelet takes over in place of the runonce host kubelet. - I0425 12:38:23.746330 29538 status.go:87] Pod status kubelet: Running - I0425 12:38:23.746361 29538 status.go:87] Pod status kube-apiserver: Running - I0425 12:38:23.746370 29538 status.go:87] Pod status kube-scheduler: Running - I0425 12:38:23.746378 29538 status.go:87] Pod status kube-controller-manager: Running + [ 299.241291] bootkube[5]: Pod Status: kube-api-checkpoint Running + [ 299.241618] bootkube[5]: Pod Status: kube-apiserver Running + [ 299.241804] bootkube[5]: Pod Status: kube-scheduler Running + [ 299.241993] bootkube[5]: Pod Status: kube-controller-manager Running + [ 299.311743] bootkube[5]: All self-hosted control plane components successfully started -You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. They contain a `kubeconfig` and may need to be re-used if the last apiserver were to fail and bootstrapping were needed. +You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` and may need to be re-used if the last apiserver were to fail and bootstrapping were needed. ## Verify [Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the kubelet, apiserver, scheduler, and controller-manager are running as pods. - $ kubectl --kubeconfig=assets/auth/kubeconfig get nodes + $ KUBECONFIG=assets/auth/kubeconfig + $ kubectl get nodes NAME STATUS AGE node1.example.com Ready 3m node2.example.com Ready 3m node3.example.com Ready 3m - $ kubectl --kubeconfig=assets/auth/kubeconfig get pods --all-namespaces - kube-system kube-api-checkpoint-172.15.0.21 1/1 Running 0 2m - kube-system kube-apiserver-wq4mh 2/2 Running 0 2m - kube-system kube-controller-manager-2834499578-y9cnl 1/1 Running 0 2m - kube-system kube-dns-v11-2259792283-5tpld 4/4 Running 0 2m - kube-system kube-proxy-8zr1b 1/1 Running 0 2m - kube-system kube-proxy-i9cgw 1/1 Running 0 2m - kube-system kube-proxy-n6qg3 1/1 Running 0 2m - kube-system kube-scheduler-4136156790-v9892 1/1 Running 0 2m - kube-system kubelet-9wilx 1/1 Running 0 2m - kube-system kubelet-a6mmj 1/1 Running 0 2m - kube-system kubelet-eomnb 1/1 Running 0 2m + $ kubectl get pods --all-namespaces + NAMESPACE NAME READY STATUS RESTARTS AGE + kube-system kube-api-checkpoint-node1.example.com 1/1 Running 0 4m + kube-system kube-apiserver-iffsz 2/2 Running 0 5m + kube-system kube-controller-manager-1148212084-1zx9g 1/1 Running 0 6m + kube-system kube-dns-v19-1003772375-evndl 3/3 Running 0 6m + kube-system kube-proxy-36jj8 1/1 Running 0 5m + kube-system kube-proxy-fdt2t 1/1 Running 0 6m + kube-system kube-proxy-sttgn 1/1 Running 0 5m + kube-system kube-scheduler-1921762579-z6jn6 1/1 Running 0 6m + kube-system kubelet-1ibsf 1/1 Running 0 6m + kube-system kubelet-65h6j 1/1 Running 0 5m + kube-system kubelet-d1qql 1/1 Running 0 5m Try deleting pods to see that the cluster is resilient to failures and machine restarts (CoreOS auto-updates). + diff --git a/Documentation/kubernetes.md b/Documentation/kubernetes.md index 2e1bdec0..b57dd59b 100644 --- a/Documentation/kubernetes.md +++ b/Documentation/kubernetes.md @@ -47,8 +47,8 @@ Client machines should boot and provision themselves. Local client VMs should ne [Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster created on rkt `metal0` or `docker0`. - $ cd /path/to/coreos-baremetal - $ kubectl --kubeconfig=examples/assets/tls/kubeconfig get nodes + $ KUBECONFIG=examples/assets/tls/kubeconfig + $ kubectl get nodes NAME STATUS AGE node1.example.com Ready 3m node2.example.com Ready 3m @@ -56,7 +56,7 @@ Client machines should boot and provision themselves. Local client VMs should ne Get all pods. - $ kubectl --kubeconfig=examples/assets/tls/kubeconfig get pods --all-namespaces + $ kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system heapster-v1.2.0-4088228293-k3yn8 2/2 Running 0 3m kube-system kube-apiserver-node1.example.com 1/1 Running 0 4m diff --git a/examples/README.md b/examples/README.md index d7730f5a..51909585 100644 --- a/examples/README.md +++ b/examples/README.md @@ -16,8 +16,8 @@ These examples network boot and provision machines into CoreOS clusters using `b | k8s-install | Kubernetes cluster, installed to disk | alpha/1153.0.0 | Disk | [tutorial](../Documentation/kubernetes.md) | | rktnetes | Kubernetes cluster with rkt container runtime, 1 master, workers, TLS auth (experimental) | beta/1185.0.0 | Disk | [tutorial](../Documentation/rktnetes.md) | | rktnetes-install | Kubernetes cluster with rkt container runtime, installed to disk (experimental) | beta/1185.0.0 | Disk | [tutorial](../Documentation/rktnetes.md) | -| bootkube | iPXE boot a self-hosted Kubernetes cluster (with bootkube) | alpha/1153.0.0 | Disk | [tutorial](../Documentation/bootkube.md) | -| bootkube-install | Install a self-hosted Kubernetes cluster (with bootkube) | alpha/1153.0.0 | Disk | [tutorial](../Documentation/bootkube.md) | +| bootkube | iPXE boot a self-hosted Kubernetes cluster (with bootkube) | beta/1185.1.0 | Disk | [tutorial](../Documentation/bootkube.md) | +| bootkube-install | Install a self-hosted Kubernetes cluster (with bootkube) | beta/1185.1.0 | Disk | [tutorial](../Documentation/bootkube.md) | | torus | Torus distributed storage | alpha/1153.0.0 | Disk | [tutorial](../Documentation/torus.md) | ## Tutorials diff --git a/examples/groups/bootkube-install/install.json b/examples/groups/bootkube-install/install.json index e9fb08bb..8aaacd15 100644 --- a/examples/groups/bootkube-install/install.json +++ b/examples/groups/bootkube-install/install.json @@ -3,8 +3,8 @@ "name": "CoreOS Install", "profile": "install-reboot", "metadata": { - "coreos_channel": "alpha", - "coreos_version": "1153.0.0", + "coreos_channel": "beta", + "coreos_version": "1185.1.0", "ignition_endpoint": "http://bootcfg.foo:8080/ignition", "baseurl": "http://bootcfg.foo:8080/assets/coreos" } diff --git a/examples/groups/rktnetes-install/install.json b/examples/groups/rktnetes-install/install.json index 4e5dc307..8aaacd15 100644 --- a/examples/groups/rktnetes-install/install.json +++ b/examples/groups/rktnetes-install/install.json @@ -3,7 +3,7 @@ "name": "CoreOS Install", "profile": "install-reboot", "metadata": { - "coreos_channel": "alpha", + "coreos_channel": "beta", "coreos_version": "1185.1.0", "ignition_endpoint": "http://bootcfg.foo:8080/ignition", "baseurl": "http://bootcfg.foo:8080/assets/coreos" diff --git a/examples/ignition/bootkube-controller.yaml b/examples/ignition/bootkube-controller.yaml index 6df153c1..6c51c180 100644 --- a/examples/ignition/bootkube-controller.yaml +++ b/examples/ignition/bootkube-controller.yaml @@ -46,7 +46,7 @@ systemd: [Service] Environment="RKT_OPTS=--volume=resolv,kind=host,source=/etc/resolv.conf --mount volume=resolv,target=/etc/resolv.conf --volume var-log,kind=host,source=/var/log --mount volume=var-log,target=/var/log" Environment=KUBELET_ACI=quay.io/coreos/hyperkube - Environment=KUBELET_VERSION=v1.3.4_coreos.0 + Environment=KUBELET_VERSION=v1.4.0_coreos.0 ExecStartPre=/usr/bin/systemctl is-active flanneld.service ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests ExecStartPre=/bin/mkdir -p /srv/kubernetes/manifests @@ -111,7 +111,7 @@ storage: # Wrapper for bootkube start set -e BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}" - BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.1.4}" + BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.2.0}" BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/home/core/assets}" exec /usr/bin/rkt run \ --trust-keys-from-https \ diff --git a/examples/ignition/bootkube-worker.yaml b/examples/ignition/bootkube-worker.yaml index 32e53896..2a596895 100644 --- a/examples/ignition/bootkube-worker.yaml +++ b/examples/ignition/bootkube-worker.yaml @@ -37,7 +37,7 @@ systemd: [Service] Environment="RKT_OPTS=--volume=resolv,kind=host,source=/etc/resolv.conf --mount volume=resolv,target=/etc/resolv.conf --volume var-log,kind=host,source=/var/log --mount volume=var-log,target=/var/log" Environment=KUBELET_ACI=quay.io/coreos/hyperkube - Environment=KUBELET_VERSION=v1.3.4_coreos.0 + Environment=KUBELET_VERSION=v1.4.0_coreos.0 ExecStartPre=/usr/bin/systemctl is-active flanneld.service ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests ExecStartPre=/bin/mkdir -p /srv/kubernetes/manifests diff --git a/examples/profiles/bootkube-controller.json b/examples/profiles/bootkube-controller.json index ef9dcba4..2317c9e8 100644 --- a/examples/profiles/bootkube-controller.json +++ b/examples/profiles/bootkube-controller.json @@ -2,8 +2,8 @@ "id": "bootkube-controller", "name": "bootkube Ready Controller", "boot": { - "kernel": "/assets/coreos/1153.0.0/coreos_production_pxe.vmlinuz", - "initrd": ["/assets/coreos/1153.0.0/coreos_production_pxe_image.cpio.gz"], + "kernel": "/assets/coreos/1185.1.0/coreos_production_pxe.vmlinuz", + "initrd": ["/assets/coreos/1185.1.0/coreos_production_pxe_image.cpio.gz"], "cmdline": { "root": "/dev/sda1", "coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}", diff --git a/examples/profiles/bootkube-worker.json b/examples/profiles/bootkube-worker.json index 50034510..c4375216 100644 --- a/examples/profiles/bootkube-worker.json +++ b/examples/profiles/bootkube-worker.json @@ -2,8 +2,8 @@ "id": "bootkube-worker", "name": "bootkube Ready Worker", "boot": { - "kernel": "/assets/coreos/1153.0.0/coreos_production_pxe.vmlinuz", - "initrd": ["/assets/coreos/1153.0.0/coreos_production_pxe_image.cpio.gz"], + "kernel": "/assets/coreos/1185.1.0/coreos_production_pxe.vmlinuz", + "initrd": ["/assets/coreos/1185.1.0/coreos_production_pxe_image.cpio.gz"], "cmdline": { "root": "/dev/sda1", "coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}",