diff --git a/CHANGES.md b/CHANGES.md index 6c7d43d4..bd41b057 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,12 +1,16 @@ # coreos-baremetal bootcfg +## Latest + #### Examples -* Upgrade Kubernetes v1.4.3 (static manifest) example clusters -* Upgrade Kubernetes v1.4.3 (rktnetes) example clusters -* Upgrade Kubernetes v1.4.3 (self-hosted) example cluster +* Upgrade Kubernetes v1.4.6 (static manifest) example clusters +* Upgrade Kubernetes v1.4.6 (rktnetes) example clusters +* Upgrade Kubernetes v1.4.6 (self-hosted) example cluster -## Latest +## v0.4.1 (2016-10-17) + +#### Improvements * Add ARM and AMR64 release architectures (#309) * Add guide for installing bootcfg on CoreOS (#306) diff --git a/Documentation/bootkube.md b/Documentation/bootkube.md index e5f4205f..902a132b 100644 --- a/Documentation/bootkube.md +++ b/Documentation/bootkube.md @@ -1,7 +1,7 @@ # Self-Hosted Kubernetes -The self-hosted Kubernetes example provisions a 3 node "self-hosted" Kubernetes v1.4.3 cluster. On-host kubelets wait for an apiserver to become reachable, then yield to kubelet pods scheduled via daemonset. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run on any controller to bootstrap a temporary apiserver which schedules control plane components as pods before exiting. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). +The self-hosted Kubernetes example provisions a 3 node "self-hosted" Kubernetes v1.4.6 cluster. On-host kubelets wait for an apiserver to become reachable, then yield to kubelet pods scheduled via daemonset. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run on any controller to bootstrap a temporary apiserver which schedules control plane components as pods before exiting. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). ## Requirements @@ -13,6 +13,9 @@ Ensure that you've gone through the [bootcfg with rkt](getting-started-rkt.md) o Build and install the [fork of bootkube](https://github.com/dghubble/bootkube), which supports DNS names. + $ bootkube version + Version: bd5a87af28f84898272519894b09d16c5e5df441 + ## Examples The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. The examples can be used for physical machines if you update the MAC addresses. See [network setup](network-setup.md) and [deployment](deployment.md). @@ -38,7 +41,7 @@ Add your SSH public key to each machine group definition [as shown](../examples/ Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Later, `bootkube` will schedule these manifests during bootstrapping and the credentials will be used to access your cluster. # If running with docker, use 172.17.0.21 instead of 172.15.0.21 - bootkube render --asset-dir=assets --api-servers=https://172.15.0.21:443 --etcd-servers=http://node1.example.com:2379 --api-server-alt-names=DNS=node1.example.com,IP=172.15.0.21 + bootkube render --asset-dir=assets --api-servers=https://172.15.0.21:443 --api-server-alt-names=DNS=node1.example.com,IP=172.15.0.21 ## Containers @@ -48,7 +51,7 @@ Client machines should boot and provision themselves. Local client VMs should ne ## bootkube -We're ready to use [bootkube](https://github.com/kubernetes-incubator/bootkube) to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster. +We're ready to use bootkube to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster. Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every** node (i.e. 172.15.0.21-23 for metal0 or 172.17.0.21-23 for docker0). @@ -70,7 +73,7 @@ Watch the temporary control plane logs until the scheduled kubelet takes over in [ 299.241993] bootkube[5]: Pod Status: kube-controller-manager Running [ 299.311743] bootkube[5]: All self-hosted control plane components successfully started -You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` and may need to be re-used if the last apiserver were to fail and bootstrapping were needed. +You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` used to access the cluster. ## Verify diff --git a/Documentation/img/kubernetes-dashboard.png b/Documentation/img/kubernetes-dashboard.png index 27c1d55d..a9f9bebe 100644 Binary files a/Documentation/img/kubernetes-dashboard.png and b/Documentation/img/kubernetes-dashboard.png differ diff --git a/Documentation/img/tectonic-console.png b/Documentation/img/tectonic-console.png deleted file mode 100644 index ea90c664..00000000 Binary files a/Documentation/img/tectonic-console.png and /dev/null differ diff --git a/Documentation/kubernetes.md b/Documentation/kubernetes.md index 37ce5b04..c83d9405 100644 --- a/Documentation/kubernetes.md +++ b/Documentation/kubernetes.md @@ -1,7 +1,7 @@ # Kubernetes -The Kubernetes example provisions a 3 node Kubernetes v1.4.3 cluster with one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). +The Kubernetes example provisions a 3 node Kubernetes v1.4.6 cluster with one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). ## Requirements @@ -72,16 +72,9 @@ Get all pods. Access the Kubernetes Dashboard with `kubeconfig` credentials by port forwarding to the dashboard pod. - $ kubectl --kubeconfig=examples/assets/tls/kubeconfig port-forward kubernetes-dashboard-v1.4.0-SOME-ID 9090 --namespace=kube-system + $ kubectl port-forward kubernetes-dashboard-v1.4.1-SOME-ID 9090 -n=kube-system Forwarding from 127.0.0.1:9090 -> 9090 Then visit [http://127.0.0.1:9090](http://127.0.0.1:9090/). Kubernetes Dashboard - -## Tectonic - -Sign up for [Tectonic Starter](https://tectonic.com/starter/) for free and deploy the [Tectonic Console](https://tectonic.com/enterprise/docs/latest/deployer/tectonic_console.html) with a few `kubectl` commands! - -Tectonic Console - diff --git a/Documentation/rktnetes.md b/Documentation/rktnetes.md index f1bab301..972d2dd6 100644 --- a/Documentation/rktnetes.md +++ b/Documentation/rktnetes.md @@ -1,6 +1,6 @@ # Kubernetes (with rkt) -The `rktnetes` example provisions a 3 node Kubernetes v1.4.3 cluster with [rkt](https://github.com/coreos/rkt) as the container runtime. The cluster has one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). +The `rktnetes` example provisions a 3 node Kubernetes v1.4.6 cluster with [rkt](https://github.com/coreos/rkt) as the container runtime. The cluster has one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). ## Requirements @@ -46,8 +46,8 @@ Client machines should boot and provision themselves. Local client VMs should ne [Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster created on rkt `metal0` or `docker0`. - $ cd /path/to/coreos-baremetal - $ kubectl --kubeconfig=examples/assets/tls/kubeconfig get nodes + $ KUBECONFIG=examples/assets/tls/kubeconfig + $ kubectl get nodes NAME STATUS AGE node1.example.com Ready 3m node2.example.com Ready 3m @@ -55,7 +55,7 @@ Client machines should boot and provision themselves. Local client VMs should ne Get all pods. - $ kubectl --kubeconfig=examples/assets/tls/kubeconfig get pods --all-namespaces + $ kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system heapster-v1.2.0-4088228293-k3yn8 2/2 Running 0 3m kube-system kube-apiserver-node1.example.com 1/1 Running 0 4m @@ -71,7 +71,7 @@ Get all pods. Access the Kubernetes Dashboard with `kubeconfig` credentials by port forwarding to the dashboard pod. - $ kubectl --kubeconfig=examples/assets/tls/kubeconfig port-forward kubernetes-dashboard-v1.4.0-SOME-ID 9090 --namespace=kube-system + $ kubectl port-forward kubernetes-dashboard-v1.4.1-SOME-ID 9090 -n=kube-system Forwarding from 127.0.0.1:9090 -> 9090 Then visit [http://127.0.0.1:9090](http://127.0.0.1:9090/). diff --git a/examples/ignition/bootkube-controller.yaml b/examples/ignition/bootkube-controller.yaml index 32c7fa43..c560d6c5 100644 --- a/examples/ignition/bootkube-controller.yaml +++ b/examples/ignition/bootkube-controller.yaml @@ -97,7 +97,7 @@ storage: contents: inline: | KUBELET_ACI=quay.io/coreos/hyperkube - KUBELET_VERSION=v1.4.3_coreos.0 + KUBELET_VERSION=v1.4.6_coreos.0 - path: /etc/hostname filesystem: root mode: 0644 @@ -122,7 +122,7 @@ storage: # Wrapper for bootkube start set -e BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}" - BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.2.2}" + BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.2.5}" BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/home/core/assets}" exec /usr/bin/rkt run \ --trust-keys-from-https \ diff --git a/examples/ignition/bootkube-worker.yaml b/examples/ignition/bootkube-worker.yaml index 30e4c80a..c7dc2029 100644 --- a/examples/ignition/bootkube-worker.yaml +++ b/examples/ignition/bootkube-worker.yaml @@ -87,7 +87,7 @@ storage: contents: inline: | KUBELET_ACI=quay.io/coreos/hyperkube - KUBELET_VERSION=v1.4.3_coreos.0 + KUBELET_VERSION=v1.4.6_coreos.0 - path: /etc/hostname filesystem: root mode: 0644 diff --git a/examples/ignition/k8s-controller.yaml b/examples/ignition/k8s-controller.yaml index d5c95831..0fe7e9be 100644 --- a/examples/ignition/k8s-controller.yaml +++ b/examples/ignition/k8s-controller.yaml @@ -64,7 +64,7 @@ systemd: Requires=k8s-assets.target After=k8s-assets.target [Service] - Environment=KUBELET_VERSION=v1.4.3_coreos.0 + Environment=KUBELET_VERSION=v1.4.6_coreos.0 Environment="RKT_OPTS=--volume dns,kind=host,source=/etc/resolv.conf \ --mount volume=dns,target=/etc/resolv.conf \ --volume var-log,kind=host,source=/var/log \ @@ -150,7 +150,7 @@ storage: hostNetwork: true containers: - name: kube-proxy - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - proxy @@ -178,7 +178,7 @@ storage: hostNetwork: true containers: - name: kube-apiserver - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - apiserver @@ -238,7 +238,7 @@ storage: spec: containers: - name: kube-controller-manager - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - controller-manager @@ -284,7 +284,7 @@ storage: hostNetwork: true containers: - name: kube-scheduler - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - scheduler diff --git a/examples/ignition/k8s-worker.yaml b/examples/ignition/k8s-worker.yaml index bacaa997..fdfbc38d 100644 --- a/examples/ignition/k8s-worker.yaml +++ b/examples/ignition/k8s-worker.yaml @@ -58,7 +58,7 @@ systemd: Requires=k8s-assets.target After=k8s-assets.target [Service] - Environment=KUBELET_VERSION=v1.4.3_coreos.0 + Environment=KUBELET_VERSION=v1.4.6_coreos.0 Environment="RKT_OPTS=--volume dns,kind=host,source=/etc/resolv.conf \ --mount volume=dns,target=/etc/resolv.conf \ --volume var-log,kind=host,source=/var/log \ @@ -157,7 +157,7 @@ storage: hostNetwork: true containers: - name: kube-proxy - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - proxy diff --git a/examples/ignition/rktnetes-controller.yaml b/examples/ignition/rktnetes-controller.yaml index 1fbb8b47..da193906 100644 --- a/examples/ignition/rktnetes-controller.yaml +++ b/examples/ignition/rktnetes-controller.yaml @@ -64,7 +64,7 @@ systemd: Requires=k8s-assets.target After=k8s-assets.target [Service] - Environment=KUBELET_VERSION=v1.4.3_coreos.0 + Environment=KUBELET_VERSION=v1.4.6_coreos.0 Environment="RKT_OPTS=--volume dns,kind=host,source=/etc/resolv.conf \ --mount volume=dns,target=/etc/resolv.conf \ --volume rkt,kind=host,source=/opt/bin/host-rkt \ @@ -187,7 +187,7 @@ storage: hostNetwork: true containers: - name: kube-proxy - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - proxy @@ -221,7 +221,7 @@ storage: hostNetwork: true containers: - name: kube-apiserver - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - apiserver @@ -281,7 +281,7 @@ storage: spec: containers: - name: kube-controller-manager - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - controller-manager @@ -327,7 +327,7 @@ storage: hostNetwork: true containers: - name: kube-scheduler - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - scheduler diff --git a/examples/ignition/rktnetes-worker.yaml b/examples/ignition/rktnetes-worker.yaml index e24dfece..689c982f 100644 --- a/examples/ignition/rktnetes-worker.yaml +++ b/examples/ignition/rktnetes-worker.yaml @@ -58,7 +58,7 @@ systemd: Requires=k8s-assets.target After=k8s-assets.target [Service] - Environment=KUBELET_VERSION=v1.4.3_coreos.0 + Environment=KUBELET_VERSION=v1.4.6_coreos.0 Environment="RKT_OPTS=--volume dns,kind=host,source=/etc/resolv.conf \ --mount volume=dns,target=/etc/resolv.conf \ --volume rkt,kind=host,source=/opt/bin/host-rkt \ @@ -193,7 +193,7 @@ storage: hostNetwork: true containers: - name: kube-proxy - image: quay.io/coreos/hyperkube:v1.4.3_coreos.0 + image: quay.io/coreos/hyperkube:v1.4.6_coreos.0 command: - /hyperkube - proxy