From dabba64850078bc6820f851c6dd52a04facf59bb Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Mon, 22 May 2017 16:07:02 -0700 Subject: [PATCH] examples: Remove static Kubernetes and rktnetes example clusters * Static Kubernetes / rktnetes examples are no longer going to be maintained by this repo or upgraded to Kubernetes v1.6. This is not considered a deprecation bc the reference clusters are examples. * Remove static Kubernetes cluster examples so users don't choose it * Self-hosted Kubernetes (bootkube) is now the standard recommended Kubernetes cluster configuration --- CHANGES.md | 1 + Documentation/getting-started-docker.md | 2 +- Documentation/getting-started-rkt.md | 2 +- Documentation/kubernetes.md | 88 -- Jenkinsfile | 12 - README.md | 2 - contrib/squid/README.md | 1 - examples/README.md | 10 +- examples/groups/k8s-install/install.json | 11 - examples/groups/k8s-install/node1.json | 20 - examples/groups/k8s-install/node2.json | 18 - examples/groups/k8s-install/node3.json | 18 - examples/groups/k8s/node1.json | 20 - examples/groups/k8s/node2.json | 18 - examples/groups/k8s/node3.json | 18 - examples/groups/rktnetes-install/install.json | 11 - examples/groups/rktnetes-install/node1.json | 20 - examples/groups/rktnetes-install/node2.json | 18 - examples/groups/rktnetes-install/node3.json | 18 - examples/groups/rktnetes/node1.json | 20 - examples/groups/rktnetes/node2.json | 18 - examples/groups/rktnetes/node3.json | 18 - examples/ignition/install-shutdown.yaml | 37 - examples/ignition/k8s-controller.yaml | 778 ------------------ examples/ignition/k8s-worker.yaml | 268 ------ examples/profiles/install-shutdown.json | 16 - examples/profiles/k8s-controller.json | 17 - examples/profiles/k8s-worker.json | 17 - 28 files changed, 6 insertions(+), 1491 deletions(-) delete mode 100644 Documentation/kubernetes.md delete mode 100644 examples/groups/k8s-install/install.json delete mode 100644 examples/groups/k8s-install/node1.json delete mode 100644 examples/groups/k8s-install/node2.json delete mode 100644 examples/groups/k8s-install/node3.json delete mode 100644 examples/groups/k8s/node1.json delete mode 100644 examples/groups/k8s/node2.json delete mode 100644 examples/groups/k8s/node3.json delete mode 100644 examples/groups/rktnetes-install/install.json delete mode 100644 examples/groups/rktnetes-install/node1.json delete mode 100644 examples/groups/rktnetes-install/node2.json delete mode 100644 examples/groups/rktnetes-install/node3.json delete mode 100644 examples/groups/rktnetes/node1.json delete mode 100644 examples/groups/rktnetes/node2.json delete mode 100644 examples/groups/rktnetes/node3.json delete mode 100644 examples/ignition/install-shutdown.yaml delete mode 100644 examples/ignition/k8s-controller.yaml delete mode 100644 examples/ignition/k8s-worker.yaml delete mode 100644 examples/profiles/install-shutdown.json delete mode 100644 examples/profiles/k8s-controller.json delete mode 100644 examples/profiles/k8s-worker.json diff --git a/CHANGES.md b/CHANGES.md index 23c0112e..aff728e4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,6 +8,7 @@ Notable changes between releases. * Upgrade self-hosted Kubernetes cluster examples to v1.6.4 * Add NoSchedule taint to self-hosted Kubernetes controllers +* Remove static Kubernetes and rktnetes cluster examples ## v0.6.0 (2017-04-25) diff --git a/Documentation/getting-started-docker.md b/Documentation/getting-started-docker.md index d3a1097d..5b254a85 100644 --- a/Documentation/getting-started-docker.md +++ b/Documentation/getting-started-docker.md @@ -117,4 +117,4 @@ $ sudo ./scripts/libvirt destroy ## Going further -Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](kubernetes.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`. +Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](bootkube.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`. diff --git a/Documentation/getting-started-rkt.md b/Documentation/getting-started-rkt.md index 0c706f15..a5b686a5 100644 --- a/Documentation/getting-started-rkt.md +++ b/Documentation/getting-started-rkt.md @@ -180,4 +180,4 @@ Press ^] three times to stop any rkt pod. ## Going further -Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](kubernetes.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`. +Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](bootkube.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`. diff --git a/Documentation/kubernetes.md b/Documentation/kubernetes.md deleted file mode 100644 index ea5e434d..00000000 --- a/Documentation/kubernetes.md +++ /dev/null @@ -1,88 +0,0 @@ -# Kubernetes - -The Kubernetes example provisions a 3 node Kubernetes v1.5.5 cluster with one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs). - -## Requirements - -Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md) or [matchbox with docker](getting-started-docker.md) guide and understand the basics. In particular, you should be able to: - -* Use rkt or Docker to start `matchbox` -* Create a network boot environment with `coreos/dnsmasq` -* Create the example libvirt client VMs -* `/etc/hosts` entries for `node[1-3].example.com` (or pass custom names to `k8s-certgen`) - -## Examples - -The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. VMs are setup on the `metal0` CNI bridge for rkt or the `docker0` bridge for Docker. The examples can be used for physical machines if you update the MAC addresses. See [network setup](network-setup.md) and [deployment](deployment.md). - -* [k8s](../examples/groups/k8s) - iPXE boot a Kubernetes cluster -* [k8s-install](../examples/groups/k8s-install) - Install a Kubernetes cluster to disk -* [Lab examples](https://github.com/dghubble/metal) - Lab hardware examples - -### Assets - -Download the CoreOS image assets referenced in the target [profile](../examples/profiles). - -```sh -$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets -``` - -Optionally, add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys). - -Generate a root CA and Kubernetes TLS assets for components (`admin`, `apiserver`, `worker`) with SANs for `node1.example.com`, etc. - -```sh -$ rm -rf examples/assets/tls -$ ./scripts/tls/k8s-certgen -``` - -**Note**: TLS assets are served to any machines which request them, which requires a trusted network. Alternately, provisioning may be tweaked to require TLS assets be securely copied to each host. - -## Containers - -Use rkt or docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with rkt](getting-started-rkt.md) or [matchbox with Docker](getting-started-docker.md) for help. - -Client machines should boot and provision themselves. Local client VMs should network boot CoreOS in about a 1 minute and the Kubernetes API should be available after 3-4 minutes (each node downloads a ~160MB Hyperkube). If you chose `k8s-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision Kubernetes clusters on physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.). - -## Verify - -[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster created on rkt `metal0` or `docker0`. - -```sh -$ KUBECONFIG=examples/assets/tls/kubeconfig -$ kubectl get nodes -NAME STATUS AGE -node1.example.com Ready 3m -node2.example.com Ready 3m -node3.example.com Ready 3m -``` - -Get all pods. - -```sh -$ kubectl get pods --all-namespaces -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system heapster-v1.2.0-4088228293-5xbgg 2/2 Running 0 41m -kube-system kube-apiserver-node1.example.com 1/1 Running 0 40m -kube-system kube-controller-manager-node1.example.com 1/1 Running 0 40m -kube-system kube-dns-782804071-326dd 4/4 Running 0 41m -kube-system kube-dns-autoscaler-2715466192-8bm78 1/1 Running 0 41m -kube-system kube-proxy-node1.example.com 1/1 Running 0 41m -kube-system kube-proxy-node2.example.com 1/1 Running 0 41m -kube-system kube-proxy-node3.example.com 1/1 Running 0 40m -kube-system kube-scheduler-node1.example.com 1/1 Running 0 40m -kube-system kubernetes-dashboard-3543765157-2nqgh 1/1 Running 0 41m -``` - -## Kubernetes Dashboard - -Access the Kubernetes Dashboard with `kubeconfig` credentials by port forwarding to the dashboard pod. - -```sh -$ kubectl port-forward kubernetes-dashboard-SOME-ID 9090 -n=kube-system -Forwarding from 127.0.0.1:9090 -> 9090 -``` - -Then visit [http://127.0.0.1:9090](http://127.0.0.1:9090/). - -Kubernetes Dashboard diff --git a/Jenkinsfile b/Jenkinsfile index 9b49cf4c..956ba807 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -18,18 +18,6 @@ parallel ( } } }, - k8s: { - node('fedora && bare-metal') { - stage('k8s') { - timeout(time:12, unit:'MINUTES') { - checkout scm - sh '''#!/bin/bash -e - export ASSETS_DIR=~/assets; ./tests/smoke/k8s - ''' - } - } - } - }, bootkube: { node('fedora && bare-metal') { stage('bootkube') { diff --git a/README.md b/README.md index b4233ae5..4ac7b2a1 100644 --- a/README.md +++ b/README.md @@ -48,8 +48,6 @@ Create [example](examples) clusters on-premise or locally with [QEMU/KVM](script * [etcd3](Documentation/getting-started-rkt.md) - Install a 3-node etcd3 cluster * [Kubernetes](Documentation/bootkube.md) - Install a 3-node self-hosted Kubernetes v1.6.4 cluster -* Static [Kubernetes](Documentation/kubernetes.md) (discouraged, static systemd units) -* Static [rktnetes](Documentation/rktnetes.md) (discouraged, static systemd units) ## Contrib diff --git a/contrib/squid/README.md b/contrib/squid/README.md index 74609b46..71d79758 100644 --- a/contrib/squid/README.md +++ b/contrib/squid/README.md @@ -87,7 +87,6 @@ cat policy/etcd3.json ``` (Optional) Configure Matchbox to not serve static assets by providing an empty assets-path value. -**Note**: Asset serving is required for [Kubernetes TLS](https://github.com/coreos/matchbox/blob/master/Documentation/kubernetes.md#assets). ``` # /etc/systemd/system/matchbox.service.d/override.conf [Service] diff --git a/examples/README.md b/examples/README.md index 81efc7e6..8f571586 100644 --- a/examples/README.md +++ b/examples/README.md @@ -25,14 +25,10 @@ These examples mount raw Matchbox objects into a Matchbox server's `/var/lib/mat | simple | CoreOS with autologin, using iPXE | stable/1298.7.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) | | simple-install | CoreOS Install, using iPXE | stable/1298.7.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) | | grub | CoreOS via GRUB2 Netboot | stable/1298.7.0 | RAM | NA | -| etcd3 | A 3 node etcd3 cluster with proxies | stable/1298.7.0 | RAM | None | +| etcd3 | PXE boot 3 node etcd3 cluster with proxies | stable/1298.7.0 | RAM | None | | etcd3-install | Install a 3 node etcd3 cluster to disk | stable/1298.7.0 | Disk | None | -| k8s | Kubernetes cluster with 1 master, 2 workers, and TLS-authentication | stable/1298.7.0 | Disk | [tutorial](../Documentation/kubernetes.md) | -| k8s-install | Kubernetes cluster, installed to disk | stable/1298.7.0 | Disk | [tutorial](../Documentation/kubernetes.md) | -| rktnetes | Kubernetes cluster with rkt container runtime, 1 master, workers, TLS auth (experimental) | stable/1298.7.0 | Disk | [tutorial](../Documentation/rktnetes.md) | -| rktnetes-install | Kubernetes cluster with rkt container runtime, installed to disk (experimental) | stable/1298.7.0 | Disk | [tutorial](../Documentation/rktnetes.md) | -| bootkube | iPXE boot a self-hosted Kubernetes cluster (with bootkube) | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) | -| bootkube-install | Install a self-hosted Kubernetes cluster (with bootkube) | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) | +| bootkube | PXE boot a self-hosted Kubernetes v1.6.4 cluster | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) | +| bootkube-install | Install a self-hosted Kubernetes v1.6.4 cluster | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) | ### Customization diff --git a/examples/groups/k8s-install/install.json b/examples/groups/k8s-install/install.json deleted file mode 100644 index e7ce3ce2..00000000 --- a/examples/groups/k8s-install/install.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "id": "coreos-install", - "name": "CoreOS Install", - "profile": "install-reboot", - "metadata": { - "coreos_channel": "stable", - "coreos_version": "1298.7.0", - "ignition_endpoint": "http://matchbox.foo:8080/ignition", - "baseurl": "http://matchbox.foo:8080/assets/coreos" - } -} diff --git a/examples/groups/k8s-install/node1.json b/examples/groups/k8s-install/node1.json deleted file mode 100644 index ee8a4399..00000000 --- a/examples/groups/k8s-install/node1.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "id": "node1", - "name": "k8s controller", - "profile": "k8s-controller", - "selector": { - "os": "installed", - "mac": "52:54:00:a1:9c:ae" - }, - "metadata": { - "container_runtime": "docker", - "domain_name": "node1.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "etcd_name": "node1", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379", - "k8s_pod_network": "10.2.0.0/16", - "k8s_service_ip_range": "10.3.0.0/24" - } -} diff --git a/examples/groups/k8s-install/node2.json b/examples/groups/k8s-install/node2.json deleted file mode 100644 index ec2b8e19..00000000 --- a/examples/groups/k8s-install/node2.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "id": "node2", - "name": "k8s worker", - "profile": "k8s-worker", - "selector": { - "os": "installed", - "mac": "52:54:00:b2:2f:86" - }, - "metadata": { - "container_runtime": "docker", - "domain_name": "node2.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_controller_endpoint": "https://node1.example.com", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379" - } -} diff --git a/examples/groups/k8s-install/node3.json b/examples/groups/k8s-install/node3.json deleted file mode 100644 index 334f9029..00000000 --- a/examples/groups/k8s-install/node3.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "id": "node3", - "name": "k8s worker", - "profile": "k8s-worker", - "selector": { - "os": "installed", - "mac": "52:54:00:c3:61:77" - }, - "metadata": { - "container_runtime": "docker", - "domain_name": "node3.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_controller_endpoint": "https://node1.example.com", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379" - } -} diff --git a/examples/groups/k8s/node1.json b/examples/groups/k8s/node1.json deleted file mode 100644 index 2d3e1424..00000000 --- a/examples/groups/k8s/node1.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "id": "node1", - "name": "k8s controller", - "profile": "k8s-controller", - "selector": { - "mac": "52:54:00:a1:9c:ae" - }, - "metadata": { - "container_runtime": "docker", - "domain_name": "node1.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "etcd_name": "node1", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379", - "k8s_pod_network": "10.2.0.0/16", - "k8s_service_ip_range": "10.3.0.0/24", - "pxe": "true" - } -} diff --git a/examples/groups/k8s/node2.json b/examples/groups/k8s/node2.json deleted file mode 100644 index 032ca8e2..00000000 --- a/examples/groups/k8s/node2.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "id": "node2", - "name": "k8s worker", - "profile": "k8s-worker", - "selector": { - "mac": "52:54:00:b2:2f:86" - }, - "metadata": { - "container_runtime": "docker", - "domain_name": "node2.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_controller_endpoint": "https://node1.example.com", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379", - "pxe": "true" - } -} diff --git a/examples/groups/k8s/node3.json b/examples/groups/k8s/node3.json deleted file mode 100644 index 386f9380..00000000 --- a/examples/groups/k8s/node3.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "id": "node3", - "name": "k8s worker", - "profile": "k8s-worker", - "selector": { - "mac": "52:54:00:c3:61:77" - }, - "metadata": { - "container_runtime": "docker", - "domain_name": "node3.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_controller_endpoint": "https://node1.example.com", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379", - "pxe": "true" - } -} diff --git a/examples/groups/rktnetes-install/install.json b/examples/groups/rktnetes-install/install.json deleted file mode 100644 index e7ce3ce2..00000000 --- a/examples/groups/rktnetes-install/install.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "id": "coreos-install", - "name": "CoreOS Install", - "profile": "install-reboot", - "metadata": { - "coreos_channel": "stable", - "coreos_version": "1298.7.0", - "ignition_endpoint": "http://matchbox.foo:8080/ignition", - "baseurl": "http://matchbox.foo:8080/assets/coreos" - } -} diff --git a/examples/groups/rktnetes-install/node1.json b/examples/groups/rktnetes-install/node1.json deleted file mode 100644 index 07ad5f10..00000000 --- a/examples/groups/rktnetes-install/node1.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "id": "node1", - "name": "k8s controller", - "profile": "k8s-controller", - "selector": { - "mac": "52:54:00:a1:9c:ae", - "os": "installed" - }, - "metadata": { - "container_runtime": "rkt", - "domain_name": "node1.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "etcd_name": "node1", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379", - "k8s_pod_network": "10.2.0.0/16", - "k8s_service_ip_range": "10.3.0.0/24" - } -} diff --git a/examples/groups/rktnetes-install/node2.json b/examples/groups/rktnetes-install/node2.json deleted file mode 100644 index 9e64c681..00000000 --- a/examples/groups/rktnetes-install/node2.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "id": "node2", - "name": "k8s worker", - "profile": "k8s-worker", - "selector": { - "mac": "52:54:00:b2:2f:86", - "os": "installed" - }, - "metadata": { - "container_runtime": "rkt", - "domain_name": "node2.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_controller_endpoint": "https://node1.example.com", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379" - } -} diff --git a/examples/groups/rktnetes-install/node3.json b/examples/groups/rktnetes-install/node3.json deleted file mode 100644 index 278f690b..00000000 --- a/examples/groups/rktnetes-install/node3.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "id": "node3", - "name": "k8s worker", - "profile": "k8s-worker", - "selector": { - "mac": "52:54:00:c3:61:77", - "os": "installed" - }, - "metadata": { - "container_runtime": "rkt", - "domain_name": "node3.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_controller_endpoint": "https://node1.example.com", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379" - } -} diff --git a/examples/groups/rktnetes/node1.json b/examples/groups/rktnetes/node1.json deleted file mode 100644 index bec546f1..00000000 --- a/examples/groups/rktnetes/node1.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "id": "node1", - "name": "k8s controller", - "profile": "k8s-controller", - "selector": { - "mac": "52:54:00:a1:9c:ae" - }, - "metadata": { - "container_runtime": "rkt", - "domain_name": "node1.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "etcd_name": "node1", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379", - "k8s_pod_network": "10.2.0.0/16", - "k8s_service_ip_range": "10.3.0.0/24", - "pxe": "true" - } -} diff --git a/examples/groups/rktnetes/node2.json b/examples/groups/rktnetes/node2.json deleted file mode 100644 index a18046bb..00000000 --- a/examples/groups/rktnetes/node2.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "id": "node2", - "name": "k8s worker", - "profile": "k8s-worker", - "selector": { - "mac": "52:54:00:b2:2f:86" - }, - "metadata": { - "container_runtime": "rkt", - "domain_name": "node2.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_controller_endpoint": "https://node1.example.com", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379", - "pxe": "true" - } -} diff --git a/examples/groups/rktnetes/node3.json b/examples/groups/rktnetes/node3.json deleted file mode 100644 index 2f73e156..00000000 --- a/examples/groups/rktnetes/node3.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "id": "node3", - "name": "k8s worker", - "profile": "k8s-worker", - "selector": { - "mac": "52:54:00:c3:61:77" - }, - "metadata": { - "container_runtime": "rkt", - "domain_name": "node3.example.com", - "etcd_initial_cluster": "node1=http://node1.example.com:2380", - "k8s_cert_endpoint": "http://matchbox.foo:8080/assets", - "k8s_controller_endpoint": "https://node1.example.com", - "k8s_dns_service_ip": "10.3.0.10", - "k8s_etcd_endpoints": "http://node1.example.com:2379", - "pxe": "true" - } -} diff --git a/examples/ignition/install-shutdown.yaml b/examples/ignition/install-shutdown.yaml deleted file mode 100644 index c08149d8..00000000 --- a/examples/ignition/install-shutdown.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -systemd: - units: - - name: installer.service - enable: true - contents: | - [Unit] - Requires=network-online.target - After=network-online.target - [Service] - Type=simple - ExecStart=/opt/installer - [Install] - WantedBy=multi-user.target -storage: - files: - - path: /opt/installer - filesystem: root - mode: 0500 - contents: - inline: | - #!/bin/bash -ex - curl --fail "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json - coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}} - udevadm settle - systemctl poweroff - -{{ if index . "ssh_authorized_keys" }} -passwd: - users: - - name: core - ssh_authorized_keys: - {{ range $element := .ssh_authorized_keys }} - - {{$element}} - {{end}} -{{end}} - diff --git a/examples/ignition/k8s-controller.yaml b/examples/ignition/k8s-controller.yaml deleted file mode 100644 index e52d79d6..00000000 --- a/examples/ignition/k8s-controller.yaml +++ /dev/null @@ -1,778 +0,0 @@ ---- -systemd: - units: - - name: etcd2.service - enable: true - dropins: - - name: 40-etcd-cluster.conf - contents: | - [Service] - Environment="ETCD_NAME={{.etcd_name}}" - Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379" - Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380" - Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379" - Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380" - Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}" - Environment="ETCD_STRICT_RECONFIG_CHECK=true" - - name: flanneld.service - dropins: - - name: 40-ExecStartPre-symlink.conf - contents: | - [Service] - EnvironmentFile=-/etc/flannel/options.env - ExecStartPre=/opt/init-flannel - - name: docker.service - dropins: - - name: 40-flannel.conf - contents: | - [Unit] - Requires=flanneld.service - After=flanneld.service - [Service] - EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env - - name: locksmithd.service - dropins: - - name: 40-etcd-lock.conf - contents: | - [Service] - Environment="REBOOT_STRATEGY=etcd-lock" - - name: k8s-certs@.service - contents: | - [Unit] - Description=Fetch Kubernetes certificate assets - Requires=network-online.target - After=network-online.target - [Service] - ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl - ExecStart=/usr/bin/bash -c "[ -f /etc/kubernetes/ssl/%i ] || curl --fail {{.k8s_cert_endpoint}}/tls/%i -o /etc/kubernetes/ssl/%i" - - name: k8s-assets.target - contents: | - [Unit] - Description=Load Kubernetes Assets - Requires=k8s-certs@apiserver.pem.service - After=k8s-certs@apiserver.pem.service - Requires=k8s-certs@apiserver-key.pem.service - After=k8s-certs@apiserver-key.pem.service - Requires=k8s-certs@ca.pem.service - After=k8s-certs@ca.pem.service - - name: kubelet.service - enable: true - contents: | - [Unit] - Description=Kubelet via Hyperkube ACI - Wants=flanneld.service - Requires=k8s-assets.target - After=k8s-assets.target - [Service] - Environment=KUBELET_VERSION=v1.5.5_coreos.0 - Environment="RKT_OPTS=--uuid-file-save=/var/run/kubelet-pod.uuid \ - --volume dns,kind=host,source=/etc/resolv.conf \ - --mount volume=dns,target=/etc/resolv.conf \ - {{ if eq .container_runtime "rkt" -}} - --volume rkt,kind=host,source=/opt/bin/host-rkt \ - --mount volume=rkt,target=/usr/bin/rkt \ - --volume var-lib-rkt,kind=host,source=/var/lib/rkt \ - --mount volume=var-lib-rkt,target=/var/lib/rkt \ - --volume stage,kind=host,source=/tmp \ - --mount volume=stage,target=/tmp \ - {{ end -}} - --volume var-log,kind=host,source=/var/log \ - --mount volume=var-log,target=/var/log" - ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests - ExecStartPre=/usr/bin/mkdir -p /var/log/containers - ExecStartPre=/usr/bin/systemctl is-active flanneld.service - ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid - ExecStart=/usr/lib/coreos/kubelet-wrapper \ - --api-servers=http://127.0.0.1:8080 \ - --register-schedulable=true \ - --cni-conf-dir=/etc/kubernetes/cni/net.d \ - --network-plugin=cni \ - --container-runtime={{.container_runtime}} \ - --rkt-path=/usr/bin/rkt \ - --rkt-stage1-image=coreos.com/rkt/stage1-coreos \ - --allow-privileged=true \ - --pod-manifest-path=/etc/kubernetes/manifests \ - --hostname-override={{.domain_name}} \ - --cluster_dns={{.k8s_dns_service_ip}} \ - --cluster_domain=cluster.local - ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid - Restart=always - RestartSec=10 - [Install] - WantedBy=multi-user.target - - name: k8s-addons.service - enable: true - contents: | - [Unit] - Description=Kubernetes Addons - [Service] - Type=oneshot - ExecStart=/opt/k8s-addons - [Install] - WantedBy=multi-user.target - {{ if eq .container_runtime "rkt" }} - - name: rkt-api.service - enable: true - contents: | - [Unit] - Before=kubelet.service - [Service] - ExecStart=/usr/bin/rkt api-service - Restart=always - RestartSec=10 - [Install] - RequiredBy=kubelet.service - - name: load-rkt-stage1.service - enable: true - contents: | - [Unit] - Description=Load rkt stage1 images - Documentation=http://github.com/coreos/rkt - Requires=network-online.target - After=network-online.target - Before=rkt-api.service - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image - [Install] - RequiredBy=rkt-api.service - {{ end }} - -storage: - {{ if index . "pxe" }} - disks: - - device: /dev/sda - wipe_table: true - partitions: - - label: ROOT - filesystems: - - name: root - mount: - device: "/dev/sda1" - format: "ext4" - create: - force: true - options: - - "-LROOT" - {{ end }} - files: - - path: /etc/kubernetes/cni/net.d/10-flannel.conf - filesystem: root - contents: - inline: | - { - "name": "podnet", - "type": "flannel", - "delegate": { - "isDefaultGateway": true - } - } - - path: /etc/kubernetes/cni/docker_opts_cni.env - filesystem: root - contents: - inline: | - DOCKER_OPT_BIP="" - DOCKER_OPT_IPMASQ="" - - path: /etc/sysctl.d/max-user-watches.conf - filesystem: root - contents: - inline: | - fs.inotify.max_user_watches=16184 - - path: /etc/kubernetes/manifests/kube-proxy.yaml - filesystem: root - contents: - inline: | - apiVersion: v1 - kind: Pod - metadata: - name: kube-proxy - namespace: kube-system - annotations: - rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly - spec: - hostNetwork: true - containers: - - name: kube-proxy - image: quay.io/coreos/hyperkube:v1.5.5_coreos.0 - command: - - /hyperkube - - proxy - - --master=http://127.0.0.1:8080 - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/ssl/certs - name: ssl-certs-host - readOnly: true - - mountPath: /var/run/dbus - name: dbus - readOnly: false - volumes: - - hostPath: - path: /usr/share/ca-certificates - name: ssl-certs-host - - hostPath: - path: /var/run/dbus - name: dbus - - path: /etc/kubernetes/manifests/kube-apiserver.yaml - filesystem: root - contents: - inline: | - apiVersion: v1 - kind: Pod - metadata: - name: kube-apiserver - namespace: kube-system - spec: - hostNetwork: true - containers: - - name: kube-apiserver - image: quay.io/coreos/hyperkube:v1.5.5_coreos.0 - command: - - /hyperkube - - apiserver - - --bind-address=0.0.0.0 - - --etcd-servers={{.k8s_etcd_endpoints}} - - --allow-privileged=true - - --service-cluster-ip-range={{.k8s_service_ip_range}} - - --secure-port=443 - - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota - - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem - - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem - - --client-ca-file=/etc/kubernetes/ssl/ca.pem - - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem - - --runtime-config=extensions/v1beta1/networkpolicies=true - - --anonymous-auth=false - livenessProbe: - httpGet: - host: 127.0.0.1 - port: 8080 - path: /healthz - initialDelaySeconds: 15 - timeoutSeconds: 15 - ports: - - containerPort: 443 - hostPort: 443 - name: https - - containerPort: 8080 - hostPort: 8080 - name: local - volumeMounts: - - mountPath: /etc/kubernetes/ssl - name: ssl-certs-kubernetes - readOnly: true - - mountPath: /etc/ssl/certs - name: ssl-certs-host - readOnly: true - volumes: - - hostPath: - path: /etc/kubernetes/ssl - name: ssl-certs-kubernetes - - hostPath: - path: /usr/share/ca-certificates - name: ssl-certs-host - - path: /etc/flannel/options.env - filesystem: root - contents: - inline: | - FLANNELD_ETCD_ENDPOINTS={{.k8s_etcd_endpoints}} - - path: /etc/kubernetes/manifests/kube-controller-manager.yaml - filesystem: root - contents: - inline: | - apiVersion: v1 - kind: Pod - metadata: - name: kube-controller-manager - namespace: kube-system - spec: - containers: - - name: kube-controller-manager - image: quay.io/coreos/hyperkube:v1.5.5_coreos.0 - command: - - /hyperkube - - controller-manager - - --master=http://127.0.0.1:8080 - - --leader-elect=true - - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem - - --root-ca-file=/etc/kubernetes/ssl/ca.pem - resources: - requests: - cpu: 200m - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /healthz - port: 10252 - initialDelaySeconds: 15 - timeoutSeconds: 15 - volumeMounts: - - mountPath: /etc/kubernetes/ssl - name: ssl-certs-kubernetes - readOnly: true - - mountPath: /etc/ssl/certs - name: ssl-certs-host - readOnly: true - hostNetwork: true - volumes: - - hostPath: - path: /etc/kubernetes/ssl - name: ssl-certs-kubernetes - - hostPath: - path: /usr/share/ca-certificates - name: ssl-certs-host - - path: /etc/kubernetes/manifests/kube-scheduler.yaml - filesystem: root - contents: - inline: | - apiVersion: v1 - kind: Pod - metadata: - name: kube-scheduler - namespace: kube-system - spec: - hostNetwork: true - containers: - - name: kube-scheduler - image: quay.io/coreos/hyperkube:v1.5.5_coreos.0 - command: - - /hyperkube - - scheduler - - --master=http://127.0.0.1:8080 - - --leader-elect=true - resources: - requests: - cpu: 100m - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /healthz - port: 10251 - initialDelaySeconds: 15 - timeoutSeconds: 15 - - path: /srv/kubernetes/manifests/kube-dns-autoscaler-deployment.yaml - filesystem: root - contents: - inline: | - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: kube-dns-autoscaler - namespace: kube-system - labels: - k8s-app: kube-dns-autoscaler - kubernetes.io/cluster-service: "true" - spec: - template: - metadata: - labels: - k8s-app: kube-dns-autoscaler - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - containers: - - name: autoscaler - image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0 - resources: - requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=kube-dns-autoscaler - - --mode=linear - - --target=Deployment/kube-dns - - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}} - - --logtostderr=true - - --v=2 - - path: /srv/kubernetes/manifests/kube-dns-deployment.yaml - filesystem: root - contents: - inline: | - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - spec: - strategy: - rollingUpdate: - maxSurge: 10% - maxUnavailable: 0 - selector: - matchLabels: - k8s-app: kube-dns - template: - metadata: - labels: - k8s-app: kube-dns - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - containers: - - name: kubedns - image: gcr.io/google_containers/kubedns-amd64:1.9 - resources: - limits: - memory: 170Mi - requests: - cpu: 100m - memory: 70Mi - livenessProbe: - httpGet: - path: /healthz-kubedns - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /readiness - port: 8081 - scheme: HTTP - initialDelaySeconds: 3 - timeoutSeconds: 5 - args: - - --domain=cluster.local. - - --dns-port=10053 - - --config-map=kube-dns - - --v=2 - env: - - name: PROMETHEUS_PORT - value: "10055" - ports: - - containerPort: 10053 - name: dns-local - protocol: UDP - - containerPort: 10053 - name: dns-tcp-local - protocol: TCP - - containerPort: 10055 - name: metrics - protocol: TCP - - name: dnsmasq - image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4 - livenessProbe: - httpGet: - path: /healthz-dnsmasq - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - --cache-size=1000 - - --no-resolv - - --server=127.0.0.1#10053 - - --log-facility=- - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - resources: - requests: - cpu: 150m - memory: 10Mi - - name: dnsmasq-metrics - image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0 - livenessProbe: - httpGet: - path: /metrics - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - --v=2 - - --logtostderr - ports: - - containerPort: 10054 - name: metrics - protocol: TCP - resources: - requests: - memory: 10Mi - - name: healthz - image: gcr.io/google_containers/exechealthz-amd64:1.2 - resources: - limits: - memory: 50Mi - requests: - cpu: 10m - memory: 50Mi - args: - - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null - - --url=/healthz-dnsmasq - - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null - - --url=/healthz-kubedns - - --port=8080 - - --quiet - ports: - - containerPort: 8080 - protocol: TCP - dnsPolicy: Default - - path: /srv/kubernetes/manifests/kube-dns-svc.yaml - filesystem: root - contents: - inline: | - apiVersion: v1 - kind: Service - metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "KubeDNS" - spec: - selector: - k8s-app: kube-dns - clusterIP: {{.k8s_dns_service_ip}} - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP - - path: /srv/kubernetes/manifests/heapster-deployment.yaml - filesystem: root - contents: - inline: | - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: heapster-v1.2.0 - namespace: kube-system - labels: - k8s-app: heapster - kubernetes.io/cluster-service: "true" - version: v1.2.0 - spec: - replicas: 1 - selector: - matchLabels: - k8s-app: heapster - version: v1.2.0 - template: - metadata: - labels: - k8s-app: heapster - version: v1.2.0 - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - containers: - - image: gcr.io/google_containers/heapster:v1.2.0 - name: heapster - livenessProbe: - httpGet: - path: /healthz - port: 8082 - scheme: HTTP - initialDelaySeconds: 180 - timeoutSeconds: 5 - command: - - /heapster - - --source=kubernetes.summary_api:'' - - image: gcr.io/google_containers/addon-resizer:1.6 - name: heapster-nanny - resources: - limits: - cpu: 50m - memory: 90Mi - requests: - cpu: 50m - memory: 90Mi - env: - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: MY_POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - command: - - /pod_nanny - - --cpu=80m - - --extra-cpu=4m - - --memory=200Mi - - --extra-memory=4Mi - - --threshold=5 - - --deployment=heapster-v1.2.0 - - --container=heapster - - --poll-period=300000 - - --estimator=exponential - - path: /srv/kubernetes/manifests/heapster-svc.yaml - filesystem: root - contents: - inline: | - kind: Service - apiVersion: v1 - metadata: - name: heapster - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "Heapster" - spec: - ports: - - port: 80 - targetPort: 8082 - selector: - k8s-app: heapster - - path: /srv/kubernetes/manifests/kube-dashboard-deployment.yaml - filesystem: root - contents: - inline: | - apiVersion: extensions/v1beta1 - kind: Deployment - metadata: - name: kubernetes-dashboard - namespace: kube-system - labels: - k8s-app: kubernetes-dashboard - kubernetes.io/cluster-service: "true" - spec: - selector: - matchLabels: - k8s-app: kubernetes-dashboard - template: - metadata: - labels: - k8s-app: kubernetes-dashboard - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - containers: - - name: kubernetes-dashboard - image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 50Mi - requests: - cpu: 100m - memory: 50Mi - ports: - - containerPort: 9090 - livenessProbe: - httpGet: - path: / - port: 9090 - initialDelaySeconds: 30 - timeoutSeconds: 30 - - path: /srv/kubernetes/manifests/kube-dashboard-svc.yaml - filesystem: root - contents: - inline: | - apiVersion: v1 - kind: Service - metadata: - name: kubernetes-dashboard - namespace: kube-system - labels: - k8s-app: kubernetes-dashboard - kubernetes.io/cluster-service: "true" - spec: - selector: - k8s-app: kubernetes-dashboard - ports: - - port: 80 - targetPort: 9090 - - path: /opt/init-flannel - filesystem: root - mode: 0544 - contents: - inline: | - #!/bin/bash -ex - function init_flannel { - echo "Waiting for etcd..." - while true - do - IFS=',' read -ra ES <<< "{{.k8s_etcd_endpoints}}" - for ETCD in "${ES[@]}"; do - echo "Trying: $ETCD" - if [ -n "$(curl --fail --silent "$ETCD/v2/machines")" ]; then - local ACTIVE_ETCD=$ETCD - break - fi - sleep 1 - done - if [ -n "$ACTIVE_ETCD" ]; then - break - fi - done - RES=$(curl --silent -X PUT -d "value={\"Network\":\"{{.k8s_pod_network}}\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false") - if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then - echo "Unexpected error configuring flannel pod network: $RES" - fi - } - init_flannel - {{ if eq .container_runtime "rkt" }} - - path: /opt/bin/host-rkt - filesystem: root - mode: 0544 - contents: - inline: | - #!/bin/sh - # This is bind mounted into the kubelet rootfs and all rkt shell-outs go - # through this rkt wrapper. It essentially enters the host mount namespace - # (which it is already in) only for the purpose of breaking out of the chroot - # before calling rkt. It makes things like rkt gc work and avoids bind mounting - # in certain rkt filesystem dependancies into the kubelet rootfs. This can - # eventually be obviated when the write-api stuff gets upstream and rkt gc is - # through the api-server. Related issue: - # https://github.com/coreos/rkt/issues/2878 - exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "$@" - {{ end }} - - path: /opt/k8s-addons - filesystem: root - mode: 0544 - contents: - inline: | - #!/bin/bash -ex - echo "Waiting for Kubernetes API..." - until curl --fail --silent "http://127.0.0.1:8080/version" - do - sleep 5 - done - echo "K8S: DNS addon" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-deployment.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-autoscaler-deployment.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" - echo "K8S: Heapster addon" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-deployment.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/heapster-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" - echo "K8S: Dashboard addon" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-deployment.yaml)" "http://127.0.0.1:8080/apis/extensions/v1beta1/namespaces/kube-system/deployments" - curl --silent -H "Content-Type: application/yaml" -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dashboard-svc.yaml)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" - -{{ if index . "ssh_authorized_keys" }} -passwd: - users: - - name: core - ssh_authorized_keys: - {{ range $element := .ssh_authorized_keys }} - - {{$element}} - {{end}} -{{end}} diff --git a/examples/ignition/k8s-worker.yaml b/examples/ignition/k8s-worker.yaml deleted file mode 100644 index 6f616d35..00000000 --- a/examples/ignition/k8s-worker.yaml +++ /dev/null @@ -1,268 +0,0 @@ ---- -systemd: - units: - - name: etcd2.service - enable: true - dropins: - - name: 40-etcd-cluster.conf - contents: | - [Service] - Environment="ETCD_PROXY=on" - Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379" - Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}" - - name: flanneld.service - dropins: - - name: 40-add-options.conf - contents: | - [Service] - EnvironmentFile=-/etc/flannel/options.env - - name: docker.service - dropins: - - name: 40-flannel.conf - contents: | - [Unit] - Requires=flanneld.service - After=flanneld.service - [Service] - EnvironmentFile=/etc/kubernetes/cni/docker_opts_cni.env - - name: locksmithd.service - dropins: - - name: 40-etcd-lock.conf - contents: | - [Service] - Environment="REBOOT_STRATEGY=etcd-lock" - - name: k8s-certs@.service - contents: | - [Unit] - Description=Fetch Kubernetes certificate assets - Requires=network-online.target - After=network-online.target - [Service] - ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl - ExecStart=/usr/bin/bash -c "[ -f /etc/kubernetes/ssl/%i ] || curl --fail {{.k8s_cert_endpoint}}/tls/%i -o /etc/kubernetes/ssl/%i" - - name: k8s-assets.target - contents: | - [Unit] - Description=Load Kubernetes Assets - Requires=k8s-certs@worker.pem.service - After=k8s-certs@worker.pem.service - Requires=k8s-certs@worker-key.pem.service - After=k8s-certs@worker-key.pem.service - Requires=k8s-certs@ca.pem.service - After=k8s-certs@ca.pem.service - - name: kubelet.service - enable: true - contents: | - [Unit] - Description=Kubelet via Hyperkube ACI - Requires=k8s-assets.target - After=k8s-assets.target - [Service] - Environment=KUBELET_VERSION=v1.5.5_coreos.0 - Environment="RKT_OPTS=--uuid-file-save=/var/run/kubelet-pod.uuid \ - --volume dns,kind=host,source=/etc/resolv.conf \ - --mount volume=dns,target=/etc/resolv.conf \ - {{ if eq .container_runtime "rkt" -}} - --volume rkt,kind=host,source=/opt/bin/host-rkt \ - --mount volume=rkt,target=/usr/bin/rkt \ - --volume var-lib-rkt,kind=host,source=/var/lib/rkt \ - --mount volume=var-lib-rkt,target=/var/lib/rkt \ - --volume stage,kind=host,source=/tmp \ - --mount volume=stage,target=/tmp \ - {{ end -}} - --volume var-log,kind=host,source=/var/log \ - --mount volume=var-log,target=/var/log" - ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests - ExecStartPre=/usr/bin/mkdir -p /var/log/containers - ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid - ExecStart=/usr/lib/coreos/kubelet-wrapper \ - --api-servers={{.k8s_controller_endpoint}} \ - --cni-conf-dir=/etc/kubernetes/cni/net.d \ - --network-plugin=cni \ - --container-runtime={{.container_runtime}} \ - --rkt-path=/usr/bin/rkt \ - --rkt-stage1-image=coreos.com/rkt/stage1-coreos \ - --register-node=true \ - --allow-privileged=true \ - --pod-manifest-path=/etc/kubernetes/manifests \ - --hostname-override={{.domain_name}} \ - --cluster_dns={{.k8s_dns_service_ip}} \ - --cluster_domain=cluster.local \ - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \ - --tls-cert-file=/etc/kubernetes/ssl/worker.pem \ - --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem - ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid - Restart=always - RestartSec=10 - [Install] - WantedBy=multi-user.target - {{ if eq .container_runtime "rkt" }} - - name: rkt-api.service - enable: true - contents: | - [Unit] - Before=kubelet.service - [Service] - ExecStart=/usr/bin/rkt api-service - Restart=always - RestartSec=10 - [Install] - RequiredBy=kubelet.service - - name: load-rkt-stage1.service - enable: true - contents: | - [Unit] - Description=Load rkt stage1 images - Documentation=http://github.com/coreos/rkt - Requires=network-online.target - After=network-online.target - Before=rkt-api.service - [Service] - Type=oneshot - RemainAfterExit=yes - ExecStart=/usr/bin/rkt fetch /usr/lib/rkt/stage1-images/stage1-coreos.aci /usr/lib/rkt/stage1-images/stage1-fly.aci --insecure-options=image - [Install] - RequiredBy=rkt-api.service - {{ end }} - -storage: - {{ if index . "pxe" }} - disks: - - device: /dev/sda - wipe_table: true - partitions: - - label: ROOT - filesystems: - - name: root - mount: - device: "/dev/sda1" - format: "ext4" - create: - force: true - options: - - "-LROOT" - {{end}} - files: - - path: /etc/kubernetes/cni/net.d/10-flannel.conf - filesystem: root - contents: - inline: | - { - "name": "podnet", - "type": "flannel", - "delegate": { - "isDefaultGateway": true - } - } - - path: /etc/kubernetes/cni/docker_opts_cni.env - filesystem: root - contents: - inline: | - DOCKER_OPT_BIP="" - DOCKER_OPT_IPMASQ="" - - path: /etc/sysctl.d/max-user-watches.conf - filesystem: root - contents: - inline: | - fs.inotify.max_user_watches=16184 - - path: /etc/kubernetes/worker-kubeconfig.yaml - filesystem: root - contents: - inline: | - apiVersion: v1 - kind: Config - clusters: - - name: local - cluster: - certificate-authority: /etc/kubernetes/ssl/ca.pem - users: - - name: kubelet - user: - client-certificate: /etc/kubernetes/ssl/worker.pem - client-key: /etc/kubernetes/ssl/worker-key.pem - contexts: - - context: - cluster: local - user: kubelet - name: kubelet-context - current-context: kubelet-context - - path: /etc/kubernetes/manifests/kube-proxy.yaml - filesystem: root - contents: - inline: | - apiVersion: v1 - kind: Pod - metadata: - name: kube-proxy - namespace: kube-system - annotations: - rkt.alpha.kubernetes.io/stage1-name-override: coreos.com/rkt/stage1-fly - spec: - hostNetwork: true - containers: - - name: kube-proxy - image: quay.io/coreos/hyperkube:v1.5.5_coreos.0 - command: - - /hyperkube - - proxy - - --master={{.k8s_controller_endpoint}} - - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/ssl/certs - name: "ssl-certs" - - mountPath: /etc/kubernetes/worker-kubeconfig.yaml - name: "kubeconfig" - readOnly: true - - mountPath: /etc/kubernetes/ssl - name: "etc-kube-ssl" - readOnly: true - - mountPath: /var/run/dbus - name: dbus - readOnly: false - volumes: - - name: "ssl-certs" - hostPath: - path: "/usr/share/ca-certificates" - - name: "kubeconfig" - hostPath: - path: "/etc/kubernetes/worker-kubeconfig.yaml" - - name: "etc-kube-ssl" - hostPath: - path: "/etc/kubernetes/ssl" - - hostPath: - path: /var/run/dbus - name: dbus - - path: /etc/flannel/options.env - filesystem: root - contents: - inline: | - FLANNELD_ETCD_ENDPOINTS={{.k8s_etcd_endpoints}} - {{ if eq .container_runtime "rkt" }} - - path: /opt/bin/host-rkt - filesystem: root - mode: 0544 - contents: - inline: | - #!/bin/sh - # This is bind mounted into the kubelet rootfs and all rkt shell-outs go - # through this rkt wrapper. It essentially enters the host mount namespace - # (which it is already in) only for the purpose of breaking out of the chroot - # before calling rkt. It makes things like rkt gc work and avoids bind mounting - # in certain rkt filesystem dependancies into the kubelet rootfs. This can - # eventually be obviated when the write-api stuff gets upstream and rkt gc is - # through the api-server. Related issue: - # https://github.com/coreos/rkt/issues/2878 - exec nsenter -m -u -i -n -p -t 1 -- /usr/bin/rkt "$@" - {{ end }} - -{{ if index . "ssh_authorized_keys" }} -passwd: - users: - - name: core - ssh_authorized_keys: - {{ range $element := .ssh_authorized_keys }} - - {{$element}} - {{end}} -{{end}} diff --git a/examples/profiles/install-shutdown.json b/examples/profiles/install-shutdown.json deleted file mode 100644 index 3d6a4386..00000000 --- a/examples/profiles/install-shutdown.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "id": "install-shutdown", - "name": "Install CoreOS and Shutdown", - "boot": { - "kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz", - "initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"], - "args": [ - "coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}", - "coreos.first_boot=yes", - "console=tty0", - "console=ttyS0", - "coreos.autologin" - ] - }, - "ignition_id": "install-shutdown.yaml" -} \ No newline at end of file diff --git a/examples/profiles/k8s-controller.json b/examples/profiles/k8s-controller.json deleted file mode 100644 index 09c2093d..00000000 --- a/examples/profiles/k8s-controller.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "id": "k8s-controller", - "name": "Kubernetes Controller", - "boot": { - "kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz", - "initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"], - "args": [ - "root=/dev/sda1", - "coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}", - "coreos.first_boot=yes", - "console=tty0", - "console=ttyS0", - "coreos.autologin" - ] - }, - "ignition_id": "k8s-controller.yaml" -} diff --git a/examples/profiles/k8s-worker.json b/examples/profiles/k8s-worker.json deleted file mode 100644 index 9127ac67..00000000 --- a/examples/profiles/k8s-worker.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "id": "k8s-worker", - "name": "Kubernetes Worker", - "boot": { - "kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz", - "initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"], - "args": [ - "root=/dev/sda1", - "coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}", - "coreos.first_boot=yes", - "console=tty0", - "console=ttyS0", - "coreos.autologin" - ] - }, - "ignition_id": "k8s-worker.yaml" -}