examples: Install CoreOS to disk and provision an etcd cluster

This commit is contained in:
Dalton Hubble
2016-02-02 10:40:39 -08:00
parent 9df3777ddd
commit fc4d669a38
10 changed files with 133 additions and 29 deletions

View File

@@ -18,7 +18,7 @@ Clone the [coreos-baremetal](https://github.com/coreos/coreos-baremetal) source
git clone https://github.com/coreos/coreos-baremetal.git
cd coreos-baremetal
Create 5 VM nodes which have known hardware attributes. The nodes will be attached to the `docker0` bridge where your containers run.
Create four VM nodes which have known hardware attributes. The nodes will be attached to the `docker0` bridge where your containers run.
sudo ./scripts/libvirt create-docker

View File

@@ -61,7 +61,7 @@ Take a look at [etcd-rkt.yaml](../examples/etcd-rkt.yaml) to get an idea of how
## Client VMs
Create 5 VM nodes which have known hardware attributes. The nodes will be attached to the `metal0` bridge where your pods run.
Create four VM nodes which have known hardware attributes. The nodes will be attached to the `metal0` bridge where your pods run.
sudo ./scripts/libvirt create-rkt

View File

@@ -91,13 +91,14 @@ Create 5 libvirt VM nodes configured to boot from the network. The `scripts/libv
sudo ./scripts/libvirt
USAGE: libvirt <command>
Commands:
create-docker create 5 libvirt nodes on the docker0 bridge
create-rkt create 5 libvirt nodes on a rkt CNI metal0 bridge
start start the 5 libvirt nodes
reboot reboot the 5 libvirt nodes
shutdown shutdown the 5 libvirt nodes
poweroff poweroff the 5 libvirt nodes
destroy destroy the 5 libvirt nodes
create-docker create 4 libvirt nodes on the docker0 bridge
create-rkt create 4 libvirt nodes on a rkt CNI metal0 bridge
start start the 4 libvirt nodes
reboot reboot the 4 libvirt nodes
shutdown shutdown the 4 libvirt nodes
poweroff poweroff the 4 libvirt nodes
destroy destroy the 4 libvirt nodes
remove-disks delete the allocated disks
You may use `virt-manager` to create your own VMs and view the console/state/attributes of existing VM nodes.

61
examples/coreos-disk.yaml Normal file
View File

@@ -0,0 +1,61 @@
---
api_version: v1alpha1
groups:
- name: CoreOS Install
spec: coreos-install
metadata:
coreos_channel: alpha
coreos_version: 935.0.0
ignition_endpoint: http://bootcfg.foo:8080/ignition
- name: etcd Node 1
spec: etcd
require:
uuid: 16e7d8a7-bfa9-428b-9117-363341bb330b
os: installed
metadata:
networkd_name: ens3
networkd_gateway: 172.17.0.1
networkd_dns: 172.17.0.3
networkd_address: 172.17.0.21/16
ipv4_address: 172.17.0.21
etcd_name: node1
etcd_initial_cluster: "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380"
- name: etcd Node 2
spec: etcd
require:
uuid: 264cd073-ca62-44b3-98c0-50aad5b5f819
os: installed
metadata:
networkd_name: ens3
networkd_gateway: 172.17.0.1
networkd_dns: 172.17.0.3
networkd_address: 172.17.0.22/16
ipv4_address: 172.17.0.22
etcd_name: node2
etcd_initial_cluster: "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380"
- name: etcd Node 3
spec: etcd
require:
uuid: 39d2e747-2648-4d68-ae92-bbc70b245055
os: installed
metadata:
networkd_name: ens3
networkd_gateway: 172.17.0.1
networkd_dns: 172.17.0.3
networkd_address: 172.17.0.23/16
ipv4_address: 172.17.0.23
etcd_name: node3
etcd_initial_cluster: "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380"
- name: etcd Proxy
spec: etcd_proxy
require:
os: installed
metadata:
networkd_name: ens3
networkd_gateway: 172.17.0.1
networkd_dns: 172.17.0.3
etcd_initial_cluster: "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380"

View File

@@ -0,0 +1 @@
{"ignitionVersion":1,"storage":{},"systemd":{"units":[{"name":"install.service","enable":true,"contents":"[Unit]\nRequires=network-online.target\nAfter=network-online.target\n[Service]\nType=oneshot\nExecStart=/usr/bin/coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}}\nExecStart=/usr/bin/udevadm settle\nExecStart=/usr/bin/mount /dev/disk/by-label/OEM /mnt\nExecStart=/bin/sh -c 'echo set linux_append=\\\\\"coreos.config.url=\\\"{{.ignition_endpoint}}?{{.query}}\u0026os=installed\\\"\\\\\" \u003e /mnt/grub.cfg'\nExecStart=/usr/bin/systemctl reboot\n[Install]\nWantedBy=multi-user.target\n"}]},"networkd":{},"passwd":{}}

View File

@@ -0,0 +1,19 @@
---
ignition_version: 1
systemd:
units:
- name: install.service
enable: true
contents: |
[Unit]
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}}
ExecStart=/usr/bin/udevadm settle
ExecStart=/usr/bin/mount /dev/disk/by-label/OEM /mnt
ExecStart=/bin/sh -c 'echo set linux_append=\\"coreos.config.url=\"{{.ignition_endpoint}}?{{.query}}&os=installed\"\\" > /mnt/grub.cfg'
ExecStart=/usr/bin/systemctl reboot
[Install]
WantedBy=multi-user.target

View File

@@ -1 +1 @@
{"ignitionVersion":1,"storage":{},"systemd":{"units":[{"name":"metadata.service","enable":true,"contents":"[Unit]\nDescription=Bare Metal Metadata Agent\n[Service]\nType=oneshot\nEnvironment=OUTPUT=/run/metadata/bootcfg\nExecStart=/usr/bin/mkdir --parent /run/metadata\nExecStart=/usr/bin/bash -c 'curl --url \"http://bootcfg.foo:8080/metadata?{{.query}}\" --retry 10 --output ${OUTPUT}'\n[Install]\nWantedBy=multi-user.target\n"},{"name":"etcd2.service","enable":true,"dropins":[{"name":"etcd-metadata.conf","contents":"[Unit]\nRequires=metadata.service\nAfter=metadata.service\n[Service]\nEnvironmentFile=/run/metadata/bootcfg\nExecStart=\nExecStart=/usr/bin/etcd2 \\\n --advertise-client-urls=http://${IPV4_ADDRESS}:2379 \\\n --initial-advertise-peer-urls=http://${IPV4_ADDRESS}:2380 \\\n --listen-client-urls=http://0.0.0.0:2379 \\\n --listen-peer-urls=http://${IPV4_ADDRESS}:2380 \\\n --initial-cluster=${ETCD_INITIAL_CLUSTER}\n"}]}]},"networkd":{"units":[{"name":"00-{{.networkd_name}}.network","contents":"[Match]\nName={{.networkd_name}}\n[Network]\nGateway={{.networkd_gateway}}\nDNS={{.networkd_dns}}\nAddress={{.networkd_address}}\n"}]},"passwd":{}}
{"ignitionVersion":1,"storage":{},"systemd":{"units":[{"name":"metadata.service","enable":true,"contents":"[Unit]\nDescription=Bare Metal Metadata Agent\n[Service]\nType=oneshot\nEnvironment=OUTPUT=/run/metadata/bootcfg\nExecStart=/usr/bin/mkdir --parent /run/metadata\nExecStart=/usr/bin/bash -c 'curl --url \"http://bootcfg.foo:8080/metadata?{{.query}}\" --retry 10 --output ${OUTPUT}'\n[Install]\nWantedBy=multi-user.target\n"},{"name":"etcd2.service","enable":true,"dropins":[{"name":"etcd-metadata.conf","contents":"[Unit]\nRequires=metadata.service\nAfter=metadata.service\n[Service]\nEnvironmentFile=/run/metadata/bootcfg\nExecStart=\nExecStart=/usr/bin/etcd2 \\\n --advertise-client-urls=http://${IPV4_ADDRESS}:2379 \\\n --initial-advertise-peer-urls=http://${IPV4_ADDRESS}:2380 \\\n --listen-client-urls=http://0.0.0.0:2379 \\\n --listen-peer-urls=http://${IPV4_ADDRESS}:2380 \\\n --initial-cluster=${ETCD_INITIAL_CLUSTER}\n"}]}]},"networkd":{"units":[{"name":"00-{{.networkd_name}}.network","contents":"[Match]\nName={{.networkd_name}}\n[Network]\nGateway={{.networkd_gateway}}\nDNS={{.networkd_dns}}\nDNS=8.8.8.8\nAddress={{.networkd_address}}\n"}]},"passwd":{}}

View File

@@ -40,4 +40,5 @@ networkd:
[Network]
Gateway={{.networkd_gateway}}
DNS={{.networkd_dns}}
DNS=8.8.8.8
Address={{.networkd_address}}

View File

@@ -0,0 +1,14 @@
{
"id": "coreos-install",
"boot": {
"kernel": "/assets/coreos/835.9.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/835.9.0/coreos_production_pxe_image.cpio.gz"],
"cmdline": {
"coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}",
"coreos.autologin": "",
"coreos.first_boot": ""
}
},
"cloud_id": "",
"ignition_id": "coreos-install.json"
}

View File

@@ -1,5 +1,5 @@
#!/bin/bash -e
# Manage 5 VM nodes which have a specific set of hardware attributes.
# Manage four VM nodes which have a specific set of hardware attributes.
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
@@ -15,6 +15,7 @@ function main {
"shutdown") shutdown;;
"poweroff") poweroff;;
"destroy") destroy;;
"delete-disks") delete_disks;;
*)
usage
exit 2
@@ -25,32 +26,31 @@ function main {
function usage {
echo "USAGE: ${0##*/} <command>"
echo "Commands:"
echo -e "\tcreate-docker\tcreate 5 libvirt nodes on the docker0 bridge"
echo -e "\tcreate-rkt\tcreate 5 libvirt nodes on a rkt CNI metal0 bridge"
echo -e "\tstart\t\tstart the 5 libvirt nodes"
echo -e "\treboot\t\treboot the 5 libvirt nodes"
echo -e "\tshutdown\tshutdown the 5 libvirt nodes"
echo -e "\tpoweroff\tpoweroff the 5 libvirt nodes"
echo -e "\tdestroy\t\tdestroy the 5 libvirt nodes"
echo -e "\tcreate-docker\tcreate 4 libvirt nodes on the docker0 bridge"
echo -e "\tcreate-rkt\tcreate 4 libvirt nodes on a rkt CNI metal0 bridge"
echo -e "\tstart\t\tstart the 4 libvirt nodes"
echo -e "\treboot\t\treboot the 4 libvirt nodes"
echo -e "\tshutdown\tshutdown the 4 libvirt nodes"
echo -e "\tpoweroff\tpoweroff the 4 libvirt nodes"
echo -e "\tdestroy\t\tdestroy the 4 libvirt nodes"
echo -e "\tdelete-disks\tdelete the allocated disks"
}
function create_docker {
virt-install --name node1 -u 16e7d8a7-bfa9-428b-9117-363341bb330b --pxe --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node2 -u 264cd073-ca62-44b3-98c0-50aad5b5f819 --pxe --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node3 -u 39d2e747-2648-4d68-ae92-bbc70b245055 --pxe --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node4 -u 4ed46e8e-db69-471e-b874-0990dd65649d --pxe --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node5 -u 53683e94-3273-4a49-9a82-d769b88e3ccf --pxe --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node1 -u 16e7d8a7-bfa9-428b-9117-363341bb330b --pxe --disk pool=default,size=6 --boot=hd,network --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
virt-install --name node2 -u 264cd073-ca62-44b3-98c0-50aad5b5f819 --pxe --disk pool=default,size=6 --boot=hd,network --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
virt-install --name node3 -u 39d2e747-2648-4d68-ae92-bbc70b245055 --pxe --disk pool=default,size=6 --boot=hd,network --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
virt-install --name node4 -u 4ed46e8e-db69-471e-b874-0990dd65649d --pxe --disk pool=default,size=6 --boot=hd,network --network=bridge:docker0 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
}
function create_rkt {
virt-install --name node1 -u 16e7d8a7-bfa9-428b-9117-363341bb330b --pxe --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node2 -u 264cd073-ca62-44b3-98c0-50aad5b5f819 --pxe --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node3 -u 39d2e747-2648-4d68-ae92-bbc70b245055 --pxe --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node4 -u 4ed46e8e-db69-471e-b874-0990dd65649d --pxe --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node5 -u 53683e94-3273-4a49-9a82-d769b88e3ccf --pxe --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --disk none --noautoconsole
virt-install --name node1 -u 16e7d8a7-bfa9-428b-9117-363341bb330b --pxe --disk pool=default,size=6 --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
virt-install --name node2 -u 264cd073-ca62-44b3-98c0-50aad5b5f819 --pxe --disk pool=default,size=6 --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
virt-install --name node3 -u 39d2e747-2648-4d68-ae92-bbc70b245055 --pxe --disk pool=default,size=6 --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
virt-install --name node4 -u 4ed46e8e-db69-471e-b874-0990dd65649d --pxe --disk pool=default,size=6 --network=bridge:metal0 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
}
nodes=(node1 node2 node3 node4 node5)
nodes=(node1 node2 node3 node4)
function start {
for node in ${nodes[@]}; do
@@ -82,4 +82,11 @@ function destroy {
done
}
function delete_disks {
echo "Deleting volumes... you may need to wait a few seconds or retry"
for node in ${nodes[@]}; do
virsh vol-delete --pool default $node.qcow2
done
}
main $@