examples: Add k8s install to disk example

* Install a Kubernetes cluster with 1 master, 2 workers
* Setup a 3 node etcd cluster and enable update-engine
* Include fleet on the cluster as well
This commit is contained in:
Dalton Hubble
2016-03-21 11:50:50 -07:00
parent e43a018db3
commit 290be307ea
12 changed files with 136 additions and 19 deletions

View File

@@ -8,9 +8,10 @@ These examples show declarative configurations for network booting libvirt VMs i
| pxe | CoreOS via iPXE | alpha/962.0.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
| grub | CoreOS via GRUB2 Netboot | beta/899.6.0 | RAM | NA |
| pxe-disk | CoreOS via iPXE, with a root filesystem | alpha/962.0.0 | Disk | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
| coreos-install | 2-stage Ignition: Install CoreOS, provision etcd cluster | alpha/962.0.0 | Disk | [reference](https://coreos.com/os/docs/latest/installing-to-disk.html) |
| coreos-install | 2-stage Ignition: Install CoreOS, provision etcd cluster | alpha/983.0.0 | Disk | [reference](https://coreos.com/os/docs/latest/installing-to-disk.html) |
| etcd-rkt, etcd-docker | Cluster with 3 etcd nodes, 2 proxies | alpha/983.0.0 | RAM | [reference](https://coreos.com/os/docs/latest/cluster-architectures.html) |
| k8s-rkt, k8s-docker | Kubernetes cluster with 1 master and 2 workers, TLS-authentication | alpha/983.0.0 | Disk | [reference](https://github.com/coreos/coreos-kubernetes) |
| k8s-install | Install Kubernetes cluster with 1 master and 2 workers, TLS | alpha/983.0.0+ | Disk | [reference](https://github.com/coreos/coreos-kubernetes) |
## Experimental

View File

@@ -47,6 +47,11 @@ function init_config {
function get_certs {
DEST=/etc/kubernetes/ssl
mkdir -p $DEST
echo "Waiting for Certificate Endpoint..."
until curl --silent $CERT_ENDPOINT
do
sleep 5
done
curl $CERT_ENDPOINT/tls/apiserver.pem -o $DEST/apiserver.pem
curl $CERT_ENDPOINT/tls/apiserver-key.pem -o $DEST/apiserver-key.pem
curl $CERT_ENDPOINT/tls/ca.pem -o $DEST/ca.pem
@@ -574,7 +579,7 @@ get_certs
init_templates
init_flannel
systemctl stop update-engine; systemctl mask update-engine
{{if .autoupdate}}{{else}}systemctl stop update-engine; systemctl mask update-engine{{end}}
systemctl daemon-reload
systemctl enable kubelet; systemctl start kubelet

View File

@@ -36,6 +36,11 @@ function init_config {
function get_certs {
DEST=/etc/kubernetes/ssl
mkdir -p $DEST
echo "Waiting for Certificate Endpoint..."
until curl --silent $CERT_ENDPOINT
do
sleep 5
done
curl $CERT_ENDPOINT/tls/worker.pem -o $DEST/worker.pem
curl $CERT_ENDPOINT/tls/worker-key.pem -o $DEST/worker-key.pem
curl $CERT_ENDPOINT/tls/ca.pem -o $DEST/ca.pem
@@ -175,7 +180,7 @@ init_config
get_certs
init_templates
systemctl stop update-engine; systemctl mask update-engine
{{if .autoupdate}}{{else}}systemctl stop update-engine; systemctl mask update-engine{{end}}
systemctl daemon-reload
systemctl enable kubelet; systemctl start kubelet

View File

@@ -11,7 +11,8 @@ systemd:
[Service]
Type=oneshot
ExecStart=/usr/bin/curl {{.ignition_endpoint}}?{{.query}}&os=installed -o ignition.json
ExecStart=/usr/bin/coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json
ExecStart=/usr/bin/curl {{.cloud_endpoint}}?{{.query}}&os=installed -o cloud
ExecStart=/usr/bin/coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json {{if .cloud_endpoint}}-c cloud{{end}}
ExecStart=/usr/bin/udevadm settle
ExecStart=/usr/bin/systemctl reboot
[Install]

75
examples/k8s-install.yaml Normal file
View File

@@ -0,0 +1,75 @@
---
api_version: v1alpha1
groups:
- name: CoreOS Install
profile: coreos-install
metadata:
coreos_channel: alpha
coreos_version: 983.0.0
ignition_endpoint: http://bootcfg.foo:8080/ignition
cloud_endpoint: http://bootcfg.foo:8080/cloud
- name: Master Node
profile: k8s-master-install
require:
uuid: 16e7d8a7-bfa9-428b-9117-363341bb330b
os: installed
metadata:
ipv4_address: 172.15.0.21
autoupdate: "true"
networkd_name: ens3
networkd_gateway: 172.15.0.1
networkd_dns: 172.15.0.3
networkd_address: 172.15.0.21/16
k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379"
k8s_pod_network: 10.2.0.0/16
k8s_service_ip_range: 10.3.0.0/24
k8s_service_ip: 10.3.0.1
k8s_dns_service_ip: 10.3.0.10
k8s_cert_endpoint: http://bootcfg.foo:8080/assets
fleet_metadata: "role=etcd,name=node1"
etcd_name: node1
etcd_initial_cluster: "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
ssh_authorized_keys:
- name: Worker 1
profile: k8s-worker-install
require:
uuid: 264cd073-ca62-44b3-98c0-50aad5b5f819
os: installed
metadata:
ipv4_address: 172.15.0.22
autoupdate: "true"
networkd_name: ens3
networkd_gateway: 172.15.0.1
networkd_dns: 172.15.0.3
networkd_address: 172.15.0.22/16
k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379"
k8s_controller_endpoint: https://172.15.0.21
k8s_dns_service_ip: 10.3.0.1
k8s_cert_endpoint: http://bootcfg.foo:8080/assets
fleet_metadata: "role=etcd,name=node2"
etcd_name: node2
etcd_initial_cluster: "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
ssh_authorized_keys:
- name: Worker 2
profile: k8s-worker-install
require:
uuid: 39d2e747-2648-4d68-ae92-bbc70b245055
os: installed
metadata:
ipv4_address: 172.15.0.23
autoupdate: "true"
networkd_name: ens3
networkd_gateway: 172.15.0.1
networkd_dns: 172.15.0.3
networkd_address: 172.15.0.23/16
k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379"
k8s_controller_endpoint: https://172.15.0.21
k8s_dns_service_ip: 10.3.0.1
k8s_cert_endpoint: http://bootcfg.foo:8080/assets
fleet_metadata: "role=etcd,name=node3"
etcd_name: node3
etcd_initial_cluster: "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
ssh_authorized_keys:

View File

@@ -1,8 +1,8 @@
{
"id": "coreos-install",
"boot": {
"kernel": "/assets/coreos/962.0.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/962.0.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"],
"cmdline": {
"coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}",
"coreos.autologin": "",

View File

@@ -0,0 +1,15 @@
{
"id": "kubernetes-master",
"boot": {
"kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"],
"cmdline": {
"cloud-config-url": "http://bootcfg.foo:8080/cloud?uuid=${uuid}&mac=${net0/mac:hexhyp}",
"coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}",
"coreos.autologin": "",
"coreos.first_boot": ""
}
},
"cloud_id": "kubernetes-master.sh",
"ignition_id": "etcd.yaml"
}

View File

@@ -0,0 +1,15 @@
{
"id": "kubernetes-worker",
"boot": {
"kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"],
"cmdline": {
"cloud-config-url": "http://bootcfg.foo:8080/cloud?uuid=${uuid}&mac=${net0/mac:hexhyp}",
"coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}",
"coreos.autologin": "",
"coreos.first_boot": ""
}
},
"cloud_id": "kubernetes-worker.sh",
"ignition_id": "etcd.yaml"
}

View File

@@ -12,5 +12,5 @@
}
},
"cloud_id": "kubernetes-master.sh",
"ignition_id": "network.yaml"
"ignition_id": "etcd-root-fs.yaml"
}

View File

@@ -12,5 +12,5 @@
}
},
"cloud_id": "kubernetes-worker.sh",
"ignition_id": "network.yaml"
"ignition_id": "etcd-root-fs.yaml"
}

View File

@@ -3,10 +3,10 @@
## get-coreos
Run the `get-coreos` script to quickly download CoreOS kernel and initrd images, verify them, and move them into `assets`.
Run the `get-coreos` script to download CoreOS kernel and initrd images, verify them, and move them into `assets`.
./scripts/get-coreos # beta, 899.6.0
./scripts/get-coreos alpha 942.0.0
./scripts/get-coreos
./scripts/get-coreos channel version
This will create:
@@ -26,12 +26,12 @@ Create libvirt VM nodes which are configured to boot from the network or from di
$ sudo ./scripts/libvirt
USAGE: libvirt <command>
Commands:
create-docker create 4 libvirt nodes on the docker0 bridge
create-rkt create 4 libvirt nodes on a rkt CNI metal0 bridge
start start the 4 libvirt nodes
reboot reboot the 4 libvirt nodes
shutdown shutdown the 4 libvirt nodes
poweroff poweroff the 4 libvirt nodes
destroy destroy the 4 libvirt nodes
delete-disks delete the allocated disks
create-docker create libvirt nodes on the docker0 bridge
create-rkt create libvirt nodes on a rkt CNI metal0 bridge
start start the libvirt nodes
reboot reboot the libvirt nodes
shutdown shutdown the libvirt nodes
poweroff poweroff the libvirt nodes
destroy destroy the libvirt nodes
delete-disks delete the allocated disks