diff --git a/Documentation/getting-started-docker.md b/Documentation/getting-started-docker.md index 2d7e5d2a..05e0a23d 100644 --- a/Documentation/getting-started-docker.md +++ b/Documentation/getting-started-docker.md @@ -63,6 +63,7 @@ Clean up the VM machines. sudo ./scripts/libvirt poweroff sudo ./scripts/libvirt destroy + sudo ./scripts/libvirt delete-disks ## Going Further diff --git a/Documentation/getting-started-rkt.md b/Documentation/getting-started-rkt.md index b0246a59..b1258083 100644 --- a/Documentation/getting-started-rkt.md +++ b/Documentation/getting-started-rkt.md @@ -24,7 +24,8 @@ Download the CoreOS PXE image assets to `assets/coreos`. The examples instruct m Define the `metal0` virtual bridge with [CNI](https://github.com/appc/cni). ```bash -sudo bash -c 'cat > /etc/rkt/net.d/20-metal.conf << EOF{ +sudo bash -c 'cat > /etc/rkt/net.d/20-metal.conf << EOF +{ "name": "metal0", "type": "bridge", "bridge": "metal0", @@ -43,6 +44,7 @@ EOF' Run the Config service (`bootcfg`) on the `metal0` network, with a known IP we'll use in later steps with DNS. + sudo rkt trust --prefix quay.io/coreos sudo rkt --insecure-options=image fetch docker://quay.io/coreos/bootcfg Currently, the insecure flag is needed since Docker images do not support signature verification. We'll ship an ACI soon to address this. @@ -65,10 +67,10 @@ Create four VM nodes which have known hardware attributes. The nodes will be att sudo ./scripts/libvirt create-rkt -In your Firewall Configuration, add `metal0` as a trusted interface. - ## Network +In your **Firewall Configuration**, add the `metal0` interface to the trusted zone. + Since the virtual network has no network boot services, use the `dnsmasq` ACI to set up an example iPXE environment which runs DHCP, DNS, and TFTP. The `dnsmasq` container can help test different network setups. Build the `dnsmasq.aci` ACI. @@ -101,6 +103,7 @@ Press ^] three times to stop a rkt pod. Clean up the VM machines. sudo ./scripts/libvirt poweroff sudo ./scripts/libvirt destroy + sudo ./scripts/libvirt delete-disks ## Going Further diff --git a/api/cloud.go b/api/cloud.go index fa58496d..a56091a8 100644 --- a/api/cloud.go +++ b/api/cloud.go @@ -36,7 +36,7 @@ func cloudHandler(store Store) ContextHandler { } // collect data for rendering - data := make(map[string]string) + data := make(map[string]interface{}) for k := range group.Metadata { data[k] = group.Metadata[k] } diff --git a/api/groups.go b/api/groups.go index 7dcdf5c4..8ac37b7f 100644 --- a/api/groups.go +++ b/api/groups.go @@ -20,7 +20,7 @@ type Group struct { // Spec identifier Spec string `yaml:"spec"` // Custom Metadata - Metadata map[string]string `yaml:"metadata"` + Metadata map[string]interface{} `yaml:"metadata"` // matcher conditions Matcher RequirementSet `yaml:"require"` } diff --git a/api/groups_test.go b/api/groups_test.go index abd6fc95..56d9932a 100644 --- a/api/groups_test.go +++ b/api/groups_test.go @@ -17,7 +17,7 @@ var ( testGroup = Group{ Name: "test group", Spec: "g1h2i3j4", - Metadata: map[string]string{ + Metadata: map[string]interface{}{ "k8s_version": "v1.1.2", "pod_network": "10.2.0.0/16", "service_name": "etcd2", diff --git a/api/ignition.go b/api/ignition.go index b3c0e153..257bca21 100644 --- a/api/ignition.go +++ b/api/ignition.go @@ -33,7 +33,7 @@ func ignitionHandler(store Store) ContextHandler { } // collect data for rendering Ignition Config - data := make(map[string]string) + data := make(map[string]interface{}) for k := range group.Metadata { data[k] = group.Metadata[k] } diff --git a/examples/etcd-rkt.yaml b/examples/etcd-rkt.yaml index 049331c2..f7cc39a7 100644 --- a/examples/etcd-rkt.yaml +++ b/examples/etcd-rkt.yaml @@ -13,6 +13,7 @@ groups: ipv4_address: 172.15.0.21 etcd_name: node1 etcd_initial_cluster: "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380" + ssh_authorized_keys: - name: etcd Node 2 spec: etcd diff --git a/examples/ignition/coreos-install.yaml b/examples/ignition/coreos-install.yaml index 8b9f7119..2cbf7246 100644 --- a/examples/ignition/coreos-install.yaml +++ b/examples/ignition/coreos-install.yaml @@ -17,3 +17,13 @@ systemd: ExecStart=/usr/bin/systemctl reboot [Install] WantedBy=multi-user.target + +{{ if .ssh_authorized_keys }} +passwd: + users: + - name: core + ssh_authorized_keys: + {{ range $element := .ssh_authorized_keys }} + - {{$element}} + {{end}} +{{end}} diff --git a/examples/ignition/etcd.yaml b/examples/ignition/etcd.yaml index b745c9e1..83b2d2f1 100644 --- a/examples/ignition/etcd.yaml +++ b/examples/ignition/etcd.yaml @@ -43,10 +43,12 @@ networkd: DNS=8.8.8.8 Address={{.networkd_address}} -{{ if .ssh_authorized_key }} +{{ if .ssh_authorized_keys }} passwd: users: - name: core ssh_authorized_keys: - - {{.ssh_authorized_key}} + {{ range $element := .ssh_authorized_keys }} + - {{$element}} + {{end}} {{end}} diff --git a/examples/ignition/etcd_proxy.yaml b/examples/ignition/etcd_proxy.yaml index e5480a05..93bfff7e 100644 --- a/examples/ignition/etcd_proxy.yaml +++ b/examples/ignition/etcd_proxy.yaml @@ -30,10 +30,12 @@ systemd: --listen-client-urls=http://localhost:2379 \ --initial-cluster=${ETCD_INITIAL_CLUSTER} -{{ if .ssh_authorized_key }} +{{ if .ssh_authorized_keys }} passwd: users: - name: core ssh_authorized_keys: - - {{.ssh_authorized_key}} + {{ range $element := .ssh_authorized_keys }} + - {{$element}} + {{end}} {{end}} diff --git a/examples/ignition/network.yaml b/examples/ignition/network.yaml index 2a1cb7ac..fdb0f3b1 100644 --- a/examples/ignition/network.yaml +++ b/examples/ignition/network.yaml @@ -26,10 +26,12 @@ networkd: DNS=8.8.8.8 Address={{.networkd_address}} -{{ if .ssh_authorized_key }} +{{ if .ssh_authorized_keys }} passwd: users: - name: core ssh_authorized_keys: - - {{.ssh_authorized_key}} + {{ range $element := .ssh_authorized_keys }} + - {{$element}} + {{end}} {{end}} diff --git a/examples/k8s-rkt.yaml b/examples/k8s-rkt.yaml index 932ffff1..bcfb8a20 100644 --- a/examples/k8s-rkt.yaml +++ b/examples/k8s-rkt.yaml @@ -18,6 +18,7 @@ groups: k8s_dns_service_ip: 10.3.0.10 k8s_advertise_ip: 172.15.0.21 k8s_cert_endpoint: http://bootcfg.foo:8080/assets + ssh_authorized_keys: - name: Worker Node spec: kubernetes-worker