From a93885fdcc6cdfc9bc975402c351f4d16a2f0c56 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Mon, 21 Mar 2016 01:01:48 -0700 Subject: [PATCH 1/8] examples: Rewrite k8s-worker from cloud-config to Ignition --- examples/ignition/k8s-worker.yaml | 193 ++++++++++++++++++ examples/k8s-docker.yaml | 6 +- examples/k8s-rkt.yaml | 6 +- .../profile.json | 5 +- 4 files changed, 203 insertions(+), 7 deletions(-) create mode 100644 examples/ignition/k8s-worker.yaml rename examples/profiles/{kubernetes-worker => k8s-worker}/profile.json (70%) diff --git a/examples/ignition/k8s-worker.yaml b/examples/ignition/k8s-worker.yaml new file mode 100644 index 00000000..bcdf736f --- /dev/null +++ b/examples/ignition/k8s-worker.yaml @@ -0,0 +1,193 @@ +--- +ignition_version: 1 +systemd: + units: + - name: metadata.service + enable: true + contents: | + [Unit] + Description=Bare Metal Metadata Agent + [Service] + Type=oneshot + Environment=OUTPUT=/run/metadata/bootcfg + ExecStart=/usr/bin/mkdir -p /run/metadata + ExecStart=/usr/bin/bash -c 'curl --url "http://bootcfg.foo:8080/metadata?{{.query}}" --retry 10 --output ${OUTPUT}' + [Install] + WantedBy=multi-user.target + - name: fleet.service + enable: true + dropins: + - name: fleet-metadata.conf + contents: | + [Service] + Environment="FLEET_METADATA={{.fleet_metadata}}" + - name: etcd2.service + enable: true + dropins: + - name: etcd-metadata.conf + contents: | + [Unit] + Requires=metadata.service + After=metadata.service + [Service] + # ETCD_NAME, ETCD_INITIAL_CLUSTER + EnvironmentFile=/run/metadata/bootcfg + ExecStart= + ExecStart=/usr/bin/etcd2 \ + --advertise-client-urls=http://${IPV4_ADDRESS}:2379 \ + --initial-advertise-peer-urls=http://${IPV4_ADDRESS}:2380 \ + --listen-client-urls=http://0.0.0.0:2379 \ + --listen-peer-urls=http://${IPV4_ADDRESS}:2380 + - name: kubelet.service + enable: true + contents: | + [Unit] + Description=Kubelet via Hyperkube ACI + Requires=k8stls.service + After=k8stls.service + [Service] + ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests + Environment=KUBELET_VERSION={{.k8s_version}} + ExecStart=/usr/lib/coreos/kubelet-wrapper \ + --api_servers={{.k8s_controller_endpoint}} \ + --register-node=true \ + --allow-privileged=true \ + --config=/etc/kubernetes/manifests \ + --hostname-override={{.ipv4_address}} \ + --cluster_dns={{.k8s_dns_service_ip}} \ + --cluster_domain=cluster.local \ + --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \ + --tls-cert-file=/etc/kubernetes/ssl/worker.pem \ + --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + - name: k8stls.service + enable: true + contents: | + [Unit] + Description=Acquire Kubernetes TLS CA and Certificate + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker.pem -o /etc/kubernetes/ssl/worker.pem + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker-key.pem -o /etc/kubernetes/ssl/worker-key.pem + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem + [Install] + WantedBy=multi-user.target + - name: flanneld.service + dropins: + - name: 40-ExecStartPre-symlink.conf + contents: | + [Service] + ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env + - name: docker.service + dropins: + - name: 40-flannel.conf + contents: | + [Unit] + Requires=flanneld.service + After=flanneld.service + +storage: + disks: + - device: /dev/sda + wipe_table: true + partitions: + - label: ROOT + number: 0 + filesystems: + - device: "/dev/sda1" + format: "ext4" + create: + force: true + options: + - "-LROOT" + files: + - path: /etc/kubernetes/worker-kubeconfig.yaml + contents: | + apiVersion: v1 + kind: Config + clusters: + - name: local + cluster: + certificate-authority: /etc/kubernetes/ssl/ca.pem + users: + - name: kubelet + user: + client-certificate: /etc/kubernetes/ssl/worker.pem + client-key: /etc/kubernetes/ssl/worker-key.pem + contexts: + - context: + cluster: local + user: kubelet + name: kubelet-context + current-context: kubelet-context + - path: /etc/kubernetes/manifests/kube-proxy.yaml + contents: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-proxy + namespace: kube-system + spec: + hostNetwork: true + containers: + - name: kube-proxy + image: quay.io/coreos/hyperkube:{{.k8s_version}} + command: + - /hyperkube + - proxy + - --master={{.k8s_controller_endpoint}} + - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml + - --proxy-mode=iptables + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/ssl/certs + name: "ssl-certs" + - mountPath: /etc/kubernetes/worker-kubeconfig.yaml + name: "kubeconfig" + readOnly: true + - mountPath: /etc/kubernetes/ssl + name: "etc-kube-ssl" + readOnly: true + volumes: + - name: "ssl-certs" + hostPath: + path: "/usr/share/ca-certificates" + - name: "kubeconfig" + hostPath: + path: "/etc/kubernetes/worker-kubeconfig.yaml" + - name: "etc-kube-ssl" + hostPath: + path: "/etc/kubernetes/ssl" + - path: /etc/flannel/options.env + contents: | + FLANNELD_IFACE={{.ipv4_address}} + FLANNELD_ETCD_ENDPOINTS={{.k8s_etcd_endpoints}} + +networkd: + units: + - name: 00-{{.networkd_name}}.network + contents: | + [Match] + Name={{.networkd_name}} + [Network] + Gateway={{.networkd_gateway}} + DNS={{.networkd_dns}} + DNS=8.8.8.8 + Address={{.networkd_address}} + +{{ if .ssh_authorized_keys }} +passwd: + users: + - name: core + ssh_authorized_keys: + {{ range $element := .ssh_authorized_keys }} + - {{$element}} + {{end}} +{{end}} diff --git a/examples/k8s-docker.yaml b/examples/k8s-docker.yaml index 69e2b711..837e2c78 100644 --- a/examples/k8s-docker.yaml +++ b/examples/k8s-docker.yaml @@ -22,7 +22,7 @@ groups: etcd_initial_cluster: "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380" - name: Worker 1 - profile: kubernetes-worker + profile: k8s-worker require: uuid: 264cd073-ca62-44b3-98c0-50aad5b5f819 metadata: @@ -31,6 +31,7 @@ groups: networkd_gateway: 172.17.0.1 networkd_dns: 172.17.0.3 networkd_address: 172.17.0.22/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.17.0.21:2379,http://172.17.0.22:2379,http://172.17.0.23:2379" k8s_controller_endpoint: https://172.17.0.21 k8s_dns_service_ip: 10.3.0.1 @@ -40,7 +41,7 @@ groups: etcd_initial_cluster: "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380" - name: Worker 2 - profile: kubernetes-worker + profile: k8s-worker require: uuid: 39d2e747-2648-4d68-ae92-bbc70b245055 metadata: @@ -49,6 +50,7 @@ groups: networkd_gateway: 172.17.0.1 networkd_dns: 172.17.0.3 networkd_address: 172.17.0.23/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.17.0.21:2379,http://172.17.0.22:2379,http://172.17.0.23:2379" k8s_controller_endpoint: https://172.17.0.21 k8s_dns_service_ip: 10.3.0.1 diff --git a/examples/k8s-rkt.yaml b/examples/k8s-rkt.yaml index 5fa3b3b6..3f2848a4 100644 --- a/examples/k8s-rkt.yaml +++ b/examples/k8s-rkt.yaml @@ -23,7 +23,7 @@ groups: ssh_authorized_keys: - name: Worker 1 - profile: kubernetes-worker + profile: k8s-worker require: uuid: 264cd073-ca62-44b3-98c0-50aad5b5f819 metadata: @@ -32,6 +32,7 @@ groups: networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 networkd_address: 172.15.0.22/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379" k8s_controller_endpoint: https://172.15.0.21 k8s_dns_service_ip: 10.3.0.1 @@ -42,7 +43,7 @@ groups: ssh_authorized_keys: - name: Worker 2 - profile: kubernetes-worker + profile: k8s-worker require: uuid: 39d2e747-2648-4d68-ae92-bbc70b245055 metadata: @@ -51,6 +52,7 @@ groups: networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 networkd_address: 172.15.0.23/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379" k8s_controller_endpoint: https://172.15.0.21 k8s_dns_service_ip: 10.3.0.1 diff --git a/examples/profiles/kubernetes-worker/profile.json b/examples/profiles/k8s-worker/profile.json similarity index 70% rename from examples/profiles/kubernetes-worker/profile.json rename to examples/profiles/k8s-worker/profile.json index 4f6ea8e1..e6c28b23 100644 --- a/examples/profiles/kubernetes-worker/profile.json +++ b/examples/profiles/k8s-worker/profile.json @@ -5,12 +5,11 @@ "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], "cmdline": { "root": "/dev/sda1", - "cloud-config-url": "http://bootcfg.foo:8080/cloud?uuid=${uuid}&mac=${net0/mac:hexhyp}", "coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}", "coreos.autologin": "", "coreos.first_boot": "" } }, - "cloud_id": "kubernetes-worker.sh", - "ignition_id": "etcd-root-fs.yaml" + "cloud_id": "", + "ignition_id": "k8s-worker.yaml" } From e1fd5dc699c24d3720e9ae502efcc505223bfd83 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Mon, 21 Mar 2016 17:09:39 -0700 Subject: [PATCH 2/8] examples: Rewrite k8s-master from cloud-config to Ignition --- examples/ignition/k8s-master.yaml | 562 ++++++++++++++++++ examples/k8s-docker.yaml | 3 +- examples/k8s-rkt.yaml | 3 +- .../profile.json | 5 +- 4 files changed, 568 insertions(+), 5 deletions(-) create mode 100644 examples/ignition/k8s-master.yaml rename examples/profiles/{kubernetes-master => k8s-master}/profile.json (70%) diff --git a/examples/ignition/k8s-master.yaml b/examples/ignition/k8s-master.yaml new file mode 100644 index 00000000..be3cd099 --- /dev/null +++ b/examples/ignition/k8s-master.yaml @@ -0,0 +1,562 @@ +--- +ignition_version: 1 +systemd: + units: + - name: metadata.service + enable: true + contents: | + [Unit] + Description=Bare Metal Metadata Agent + [Service] + Type=oneshot + Environment=OUTPUT=/run/metadata/bootcfg + ExecStart=/usr/bin/mkdir --parent /run/metadata + ExecStart=/usr/bin/bash -c 'curl --url "http://bootcfg.foo:8080/metadata?{{.query}}" --retry 10 --output ${OUTPUT}' + [Install] + WantedBy=multi-user.target + - name: fleet.service + enable: true + dropins: + - name: fleet-metadata.conf + contents: | + [Service] + Environment="FLEET_METADATA={{.fleet_metadata}}" + - name: etcd2.service + enable: true + dropins: + - name: etcd-metadata.conf + contents: | + [Unit] + Requires=metadata.service + After=metadata.service + [Service] + # ETCD_NAME, ETCD_INITIAL_CLUSTER + EnvironmentFile=/run/metadata/bootcfg + ExecStart= + ExecStart=/usr/bin/etcd2 \ + --advertise-client-urls=http://${IPV4_ADDRESS}:2379 \ + --initial-advertise-peer-urls=http://${IPV4_ADDRESS}:2380 \ + --listen-client-urls=http://0.0.0.0:2379 \ + --listen-peer-urls=http://${IPV4_ADDRESS}:2380 + - name: k8s-addons.service + enable: true + contents: | + [Unit] + Description=Start Kubernetes DNS Controller and Service + Requires=kubelet.service + After=kubelet.service + [Service] + Type=oneshot + ExecStart=/opt/k8s-addons + [Install] + WantedBy=multi-user.target + - name: kubelet.service + enable: true + contents: | + [Unit] + Description=Kubelet via Hyperkube ACI + Requires=k8stls.service + After=k8stls.service + Requires=flanneld.service + After=flanneld.service + [Service] + ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests + Environment=KUBELET_VERSION={{.k8s_version}} + ExecStart=/usr/lib/coreos/kubelet-wrapper \ + --api_servers=http://127.0.0.1:8080 \ + --register-node=false \ + --allow-privileged=true \ + --config=/etc/kubernetes/manifests \ + --hostname-override={{.ipv4_address}} \ + --cluster_dns={{.k8s_dns_service_ip}} \ + --cluster_domain=cluster.local + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + - name: k8stls.service + enable: true + contents: | + [Unit] + Description=Acquire Kubernetes TLS CA and Certificate + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver.pem -o /etc/kubernetes/ssl/apiserver.pem + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver-key.pem -o /etc/kubernetes/ssl/apiserver-key.pem + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem + [Install] + WantedBy=multi-user.target + - name: flanneld.service + enable: true + dropins: + - name: 40-ExecStartPre-symlink.conf + contents: | + [Service] + ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env + ExecStartPre=/opt/init-flannel + - name: docker.service + dropins: + - name: 40-flannel.conf + contents: | + [Unit] + Requires=flanneld.service + After=flanneld.service +storage: + disks: + - device: /dev/sda + wipe_table: true + partitions: + - label: ROOT + number: 0 + filesystems: + - device: "/dev/sda1" + format: "ext4" + create: + force: true + options: + - "-LROOT" + files: + - path: /etc/kubernetes/manifests/kube-proxy.yaml + contents: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-proxy + namespace: kube-system + spec: + hostNetwork: true + containers: + - name: kube-proxy + image: quay.io/coreos/hyperkube:{{.k8s_version}} + command: + - /hyperkube + - proxy + - --master=http://127.0.0.1:8080 + - --proxy-mode=iptables + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + volumes: + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-host + - path: /etc/kubernetes/manifests/kube-apiserver.yaml + contents: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-apiserver + namespace: kube-system + spec: + hostNetwork: true + containers: + - name: kube-apiserver + image: quay.io/coreos/hyperkube:{{.k8s_version}} + command: + - /hyperkube + - apiserver + - --bind-address=0.0.0.0 + - --etcd-servers={{.k8s_etcd_endpoints}} + - --allow-privileged=true + - --service-cluster-ip-range={{.k8s_service_ip_range}} + - --secure-port=443 + - --advertise-address={{.ipv4_address}} + - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota + - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem + - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem + - --client-ca-file=/etc/kubernetes/ssl/ca.pem + - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem + ports: + - containerPort: 443 + hostPort: 443 + name: https + - containerPort: 8080 + hostPort: 8080 + name: local + volumeMounts: + - mountPath: /etc/kubernetes/ssl + name: ssl-certs-kubernetes + readOnly: true + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/ssl + name: ssl-certs-kubernetes + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-host + - path: /etc/kubernetes/manifests/kube-podmaster.yaml + contents: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-podmaster + namespace: kube-system + spec: + hostNetwork: true + containers: + - name: scheduler-elector + image: gcr.io/google_containers/podmaster:1.1 + command: + - /podmaster + - --etcd-servers={{.k8s_etcd_endpoints}} + - --key=scheduler + - --whoami={{.ipv4_address}} + - --source-file=/src/manifests/kube-scheduler.yaml + - --dest-file=/dst/manifests/kube-scheduler.yaml + volumeMounts: + - mountPath: /src/manifests + name: manifest-src + readOnly: true + - mountPath: /dst/manifests + name: manifest-dst + - name: controller-manager-elector + image: gcr.io/google_containers/podmaster:1.1 + command: + - /podmaster + - --etcd-servers={{.k8s_etcd_endpoints}} + - --key=controller + - --whoami={{.ipv4_address}} + - --source-file=/src/manifests/kube-controller-manager.yaml + - --dest-file=/dst/manifests/kube-controller-manager.yaml + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: /src/manifests + name: manifest-src + readOnly: true + - mountPath: /dst/manifests + name: manifest-dst + volumes: + - hostPath: + path: /srv/kubernetes/manifests + name: manifest-src + - hostPath: + path: /etc/kubernetes/manifests + name: manifest-dst + - path: /etc/flannel/options.env + contents: | + FLANNELD_IFACE={{.ipv4_address}} + FLANNELD_ETCD_ENDPOINTS={{.k8s_etcd_endpoints}} + - path: /srv/kubernetes/manifests/kube-controller-manager.yaml + contents: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-controller-manager + namespace: kube-system + spec: + containers: + - name: kube-controller-manager + image: quay.io/coreos/hyperkube:{{.k8s_version}} + command: + - /hyperkube + - controller-manager + - --master=http://127.0.0.1:8080 + - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem + - --root-ca-file=/etc/kubernetes/ssl/ca.pem + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + initialDelaySeconds: 15 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/kubernetes/ssl + name: ssl-certs-kubernetes + readOnly: true + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/ssl + name: ssl-certs-kubernetes + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-host + - path: /srv/kubernetes/manifests/kube-scheduler.yaml + contents: | + apiVersion: v1 + kind: Pod + metadata: + name: kube-scheduler + namespace: kube-system + spec: + hostNetwork: true + containers: + - name: kube-scheduler + image: quay.io/coreos/hyperkube:{{.k8s_version}} + command: + - /hyperkube + - scheduler + - --master=http://127.0.0.1:8080 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + initialDelaySeconds: 15 + timeoutSeconds: 1 + - path: /srv/kubernetes/manifests/kube-dns-rc.json + contents: | + { + "apiVersion": "v1", + "kind": "ReplicationController", + "metadata": { + "labels": { + "k8s-app": "kube-dns", + "kubernetes.io/cluster-service": "true", + "version": "v9" + }, + "name": "kube-dns-v9", + "namespace": "kube-system" + }, + "spec": { + "replicas": 1, + "selector": { + "k8s-app": "kube-dns", + "version": "v9" + }, + "template": { + "metadata": { + "labels": { + "k8s-app": "kube-dns", + "kubernetes.io/cluster-service": "true", + "version": "v9" + } + }, + "spec": { + "containers": [ + { + "command": [ + "/usr/local/bin/etcd", + "-data-dir", + "/var/etcd/data", + "-listen-client-urls", + "http://127.0.0.1:2379,http://127.0.0.1:4001", + "-advertise-client-urls", + "http://127.0.0.1:2379,http://127.0.0.1:4001", + "-initial-cluster-token", + "skydns-etcd" + ], + "image": "gcr.io/google_containers/etcd:2.0.9", + "name": "etcd", + "resources": { + "limits": { + "cpu": "100m", + "memory": "50Mi" + } + }, + "volumeMounts": [ + { + "mountPath": "/var/etcd/data", + "name": "etcd-storage" + } + ] + }, + { + "args": [ + "-domain=cluster.local" + ], + "image": "gcr.io/google_containers/kube2sky:1.11", + "name": "kube2sky", + "resources": { + "limits": { + "cpu": "100m", + "memory": "50Mi" + } + } + }, + { + "args": [ + "-machines=http://127.0.0.1:4001", + "-addr=0.0.0.0:53", + "-ns-rotate=false", + "-domain=cluster.local." + ], + "image": "gcr.io/google_containers/skydns:2015-10-13-8c72f8c", + "livenessProbe": { + "httpGet": { + "path": "/healthz", + "port": 8080, + "scheme": "HTTP" + }, + "initialDelaySeconds": 30, + "timeoutSeconds": 5 + }, + "name": "skydns", + "ports": [ + { + "containerPort": 53, + "name": "dns", + "protocol": "UDP" + }, + { + "containerPort": 53, + "name": "dns-tcp", + "protocol": "TCP" + } + ], + "readinessProbe": { + "httpGet": { + "path": "/healthz", + "port": 8080, + "scheme": "HTTP" + }, + "initialDelaySeconds": 1, + "timeoutSeconds": 5 + }, + "resources": { + "limits": { + "cpu": "100m", + "memory": "50Mi" + } + } + }, + { + "args": [ + "-cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null", + "-port=8080" + ], + "image": "gcr.io/google_containers/exechealthz:1.0", + "name": "healthz", + "ports": [ + { + "containerPort": 8080, + "protocol": "TCP" + } + ], + "resources": { + "limits": { + "cpu": "10m", + "memory": "20Mi" + } + } + } + ], + "dnsPolicy": "Default", + "volumes": [ + { + "emptyDir": {}, + "name": "etcd-storage" + } + ] + } + } + } + } + - path: /srv/kubernetes/manifests/kube-dns-svc.json + contents: | + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "name": "kube-dns", + "namespace": "kube-system", + "labels": { + "k8s-app": "kube-dns", + "kubernetes.io/name": "KubeDNS", + "kubernetes.io/cluster-service": "true" + } + }, + "spec": { + "clusterIP": "{{.k8s_dns_service_ip}}", + "ports": [ + { + "protocol": "UDP", + "name": "dns", + "port": 53 + }, + { + "protocol": "TCP", + "name": "dns-tcp", + "port": 53 + } + ], + "selector": { + "k8s-app": "kube-dns" + } + } + } + - path: /srv/kubernetes/manifests/kube-system.json + contents: | + { + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "kube-system" + } + } + - path: /opt/init-flannel + mode: 320 + contents: | + #!/bin/bash + function init_flannel { + echo "Waiting for etcd..." + while true + do + IFS=',' read -ra ES <<< "{{.k8s_etcd_endpoints}}" + for ETCD in "${ES[@]}"; do + echo "Trying: $ETCD" + if [ -n "$(curl --silent "$ETCD/v2/machines")" ]; then + local ACTIVE_ETCD=$ETCD + break + fi + sleep 1 + done + if [ -n "$ACTIVE_ETCD" ]; then + break + fi + done + RES=$(curl --silent -X PUT -d "value={\"Network\":\"{{.k8s_pod_network}}\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false") + if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then + echo "Unexpected error configuring flannel pod network: $RES" + fi + } + init_flannel + - path: /opt/k8s-addons + mode: 320 + contents: | + #!/bin/bash + echo "Waiting for Kubernetes API..." + until curl --silent "http://127.0.0.1:8080/version" + do + sleep 5 + done + echo "K8S: kube-system namespace" + curl --silent -XPOST -d"$(cat /srv/kubernetes/manifests/kube-system.json)" "http://127.0.0.1:8080/api/v1/namespaces" > /dev/null + echo "K8S: DNS addon" + curl --silent -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-rc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null + curl --silent -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-svc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null + +networkd: + units: + - name: 00-{{.networkd_name}}.network + contents: | + [Match] + Name={{.networkd_name}} + [Network] + Gateway={{.networkd_gateway}} + DNS={{.networkd_dns}} + DNS=8.8.8.8 + Address={{.networkd_address}} + +{{ if .ssh_authorized_keys }} +passwd: + users: + - name: core + ssh_authorized_keys: + {{ range $element := .ssh_authorized_keys }} + - {{$element}} + {{end}} +{{end}} diff --git a/examples/k8s-docker.yaml b/examples/k8s-docker.yaml index 837e2c78..031b833f 100644 --- a/examples/k8s-docker.yaml +++ b/examples/k8s-docker.yaml @@ -2,7 +2,7 @@ api_version: v1alpha1 groups: - name: Master Node - profile: kubernetes-master + profile: k8s-master require: uuid: 16e7d8a7-bfa9-428b-9117-363341bb330b metadata: @@ -11,6 +11,7 @@ groups: networkd_gateway: 172.17.0.1 networkd_dns: 172.17.0.3 networkd_address: 172.17.0.21/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.17.0.21:2379,http://172.17.0.22:2379,http://172.17.0.23:2379" k8s_pod_network: 10.2.0.0/16 k8s_service_ip_range: 10.3.0.0/24 diff --git a/examples/k8s-rkt.yaml b/examples/k8s-rkt.yaml index 3f2848a4..40f44f2e 100644 --- a/examples/k8s-rkt.yaml +++ b/examples/k8s-rkt.yaml @@ -2,7 +2,7 @@ api_version: v1alpha1 groups: - name: Master Node - profile: kubernetes-master + profile: k8s-master require: uuid: 16e7d8a7-bfa9-428b-9117-363341bb330b metadata: @@ -11,6 +11,7 @@ groups: networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 networkd_address: 172.15.0.21/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379" k8s_pod_network: 10.2.0.0/16 k8s_service_ip_range: 10.3.0.0/24 diff --git a/examples/profiles/kubernetes-master/profile.json b/examples/profiles/k8s-master/profile.json similarity index 70% rename from examples/profiles/kubernetes-master/profile.json rename to examples/profiles/k8s-master/profile.json index 5c997bab..be525c4f 100644 --- a/examples/profiles/kubernetes-master/profile.json +++ b/examples/profiles/k8s-master/profile.json @@ -5,12 +5,11 @@ "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], "cmdline": { "root": "/dev/sda1", - "cloud-config-url": "http://bootcfg.foo:8080/cloud?uuid=${uuid}&mac=${net0/mac:hexhyp}", "coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}", "coreos.autologin": "", "coreos.first_boot": "" } }, - "cloud_id": "kubernetes-master.sh", - "ignition_id": "etcd-root-fs.yaml" + "cloud_id": "", + "ignition_id": "k8s-master.yaml" } From ce806cb92a9a56bfb80c47b834a4e9fde5c384a0 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Mon, 21 Mar 2016 18:05:13 -0700 Subject: [PATCH 3/8] examples: Remove metadata agent from k8s-master and k8s-worker * Clusters nodes are statically declared, no need to load dynamic node metadata --- examples/ignition/k8s-master.yaml | 101 ++++++++---------- examples/ignition/k8s-worker.yaml | 101 ++++++++---------- .../profiles/k8s-master-install/profile.json | 2 +- examples/profiles/k8s-master/profile.json | 2 +- .../profiles/k8s-worker-install/profile.json | 2 +- examples/profiles/k8s-worker/profile.json | 2 +- 6 files changed, 88 insertions(+), 122 deletions(-) diff --git a/examples/ignition/k8s-master.yaml b/examples/ignition/k8s-master.yaml index be3cd099..4aef058f 100644 --- a/examples/ignition/k8s-master.yaml +++ b/examples/ignition/k8s-master.yaml @@ -2,52 +2,52 @@ ignition_version: 1 systemd: units: - - name: metadata.service - enable: true - contents: | - [Unit] - Description=Bare Metal Metadata Agent - [Service] - Type=oneshot - Environment=OUTPUT=/run/metadata/bootcfg - ExecStart=/usr/bin/mkdir --parent /run/metadata - ExecStart=/usr/bin/bash -c 'curl --url "http://bootcfg.foo:8080/metadata?{{.query}}" --retry 10 --output ${OUTPUT}' - [Install] - WantedBy=multi-user.target - - name: fleet.service - enable: true - dropins: - - name: fleet-metadata.conf - contents: | - [Service] - Environment="FLEET_METADATA={{.fleet_metadata}}" - name: etcd2.service enable: true dropins: - - name: etcd-metadata.conf + - name: 40-etcd-cluster.conf + contents: | + [Service] + Environment="ETCD_NAME={{.etcd_name}}" + Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.ipv4_address}}:2379" + Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.ipv4_address}}:2380" + Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379" + Environment="ETCD_LISTEN_PEER_URLS=http://{{.ipv4_address}}:2380" + Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}" + - name: fleet.service + enable: true + dropins: + - name: 40-fleet-metadata.conf + contents: | + [Service] + Environment="FLEET_METADATA={{.fleet_metadata}}" + - name: flanneld.service + dropins: + - name: 40-ExecStartPre-symlink.conf + contents: | + [Service] + ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env + ExecStartPre=/opt/init-flannel + - name: docker.service + dropins: + - name: 40-flannel.conf contents: | [Unit] - Requires=metadata.service - After=metadata.service - [Service] - # ETCD_NAME, ETCD_INITIAL_CLUSTER - EnvironmentFile=/run/metadata/bootcfg - ExecStart= - ExecStart=/usr/bin/etcd2 \ - --advertise-client-urls=http://${IPV4_ADDRESS}:2379 \ - --initial-advertise-peer-urls=http://${IPV4_ADDRESS}:2380 \ - --listen-client-urls=http://0.0.0.0:2379 \ - --listen-peer-urls=http://${IPV4_ADDRESS}:2380 - - name: k8s-addons.service + Requires=flanneld.service + After=flanneld.service + - name: k8stls.service enable: true contents: | [Unit] - Description=Start Kubernetes DNS Controller and Service - Requires=kubelet.service - After=kubelet.service + Description=Acquire Kubernetes TLS CA and Certificate + Requires=network-online.target + After=network-online.target [Service] Type=oneshot - ExecStart=/opt/k8s-addons + ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver.pem -o /etc/kubernetes/ssl/apiserver.pem + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver-key.pem -o /etc/kubernetes/ssl/apiserver-key.pem + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem [Install] WantedBy=multi-user.target - name: kubelet.service @@ -74,36 +74,19 @@ systemd: RestartSec=10 [Install] WantedBy=multi-user.target - - name: k8stls.service + - name: k8s-addons.service enable: true contents: | [Unit] - Description=Acquire Kubernetes TLS CA and Certificate - Requires=network-online.target - After=network-online.target + Description=Start Kubernetes DNS Controller and Service + Requires=kubelet.service + After=kubelet.service [Service] Type=oneshot - ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver.pem -o /etc/kubernetes/ssl/apiserver.pem - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver-key.pem -o /etc/kubernetes/ssl/apiserver-key.pem - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem + ExecStart=/opt/k8s-addons [Install] WantedBy=multi-user.target - - name: flanneld.service - enable: true - dropins: - - name: 40-ExecStartPre-symlink.conf - contents: | - [Service] - ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env - ExecStartPre=/opt/init-flannel - - name: docker.service - dropins: - - name: 40-flannel.conf - contents: | - [Unit] - Requires=flanneld.service - After=flanneld.service + storage: disks: - device: /dev/sda diff --git a/examples/ignition/k8s-worker.yaml b/examples/ignition/k8s-worker.yaml index bcdf736f..8bf98026 100644 --- a/examples/ignition/k8s-worker.yaml +++ b/examples/ignition/k8s-worker.yaml @@ -2,42 +2,53 @@ ignition_version: 1 systemd: units: - - name: metadata.service - enable: true - contents: | - [Unit] - Description=Bare Metal Metadata Agent - [Service] - Type=oneshot - Environment=OUTPUT=/run/metadata/bootcfg - ExecStart=/usr/bin/mkdir -p /run/metadata - ExecStart=/usr/bin/bash -c 'curl --url "http://bootcfg.foo:8080/metadata?{{.query}}" --retry 10 --output ${OUTPUT}' - [Install] - WantedBy=multi-user.target - - name: fleet.service - enable: true - dropins: - - name: fleet-metadata.conf - contents: | - [Service] - Environment="FLEET_METADATA={{.fleet_metadata}}" - name: etcd2.service enable: true dropins: - - name: etcd-metadata.conf + - name: 40-etcd-cluster.conf + contents: | + [Service] + Environment="ETCD_NAME={{.etcd_name}}" + Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.ipv4_address}}:2379" + Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.ipv4_address}}:2380" + Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379" + Environment="ETCD_LISTEN_PEER_URLS=http://{{.ipv4_address}}:2380" + Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}" + - name: fleet.service + enable: true + dropins: + - name: 40-fleet-metadata.conf + contents: | + [Service] + Environment="FLEET_METADATA={{.fleet_metadata}}" + - name: flanneld.service + dropins: + - name: 40-ExecStartPre-symlink.conf + contents: | + [Service] + ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env + - name: docker.service + dropins: + - name: 40-flannel.conf contents: | [Unit] - Requires=metadata.service - After=metadata.service - [Service] - # ETCD_NAME, ETCD_INITIAL_CLUSTER - EnvironmentFile=/run/metadata/bootcfg - ExecStart= - ExecStart=/usr/bin/etcd2 \ - --advertise-client-urls=http://${IPV4_ADDRESS}:2379 \ - --initial-advertise-peer-urls=http://${IPV4_ADDRESS}:2380 \ - --listen-client-urls=http://0.0.0.0:2379 \ - --listen-peer-urls=http://${IPV4_ADDRESS}:2380 + Requires=flanneld.service + After=flanneld.service + - name: k8stls.service + enable: true + contents: | + [Unit] + Description=Acquire Kubernetes TLS CA and Certificate + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker.pem -o /etc/kubernetes/ssl/worker.pem + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker-key.pem -o /etc/kubernetes/ssl/worker-key.pem + ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem + [Install] + WantedBy=multi-user.target - name: kubelet.service enable: true contents: | @@ -63,34 +74,6 @@ systemd: RestartSec=10 [Install] WantedBy=multi-user.target - - name: k8stls.service - enable: true - contents: | - [Unit] - Description=Acquire Kubernetes TLS CA and Certificate - Requires=network-online.target - After=network-online.target - [Service] - Type=oneshot - ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker.pem -o /etc/kubernetes/ssl/worker.pem - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker-key.pem -o /etc/kubernetes/ssl/worker-key.pem - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem - [Install] - WantedBy=multi-user.target - - name: flanneld.service - dropins: - - name: 40-ExecStartPre-symlink.conf - contents: | - [Service] - ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env - - name: docker.service - dropins: - - name: 40-flannel.conf - contents: | - [Unit] - Requires=flanneld.service - After=flanneld.service storage: disks: diff --git a/examples/profiles/k8s-master-install/profile.json b/examples/profiles/k8s-master-install/profile.json index 963fa4f8..a5f7e1a4 100644 --- a/examples/profiles/k8s-master-install/profile.json +++ b/examples/profiles/k8s-master-install/profile.json @@ -1,5 +1,5 @@ { - "id": "kubernetes-master", + "id": "k8s-master-install", "boot": { "kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz", "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], diff --git a/examples/profiles/k8s-master/profile.json b/examples/profiles/k8s-master/profile.json index be525c4f..d3f3624a 100644 --- a/examples/profiles/k8s-master/profile.json +++ b/examples/profiles/k8s-master/profile.json @@ -1,5 +1,5 @@ { - "id": "kubernetes-master", + "id": "k8s-master", "boot": { "kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz", "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], diff --git a/examples/profiles/k8s-worker-install/profile.json b/examples/profiles/k8s-worker-install/profile.json index cd08e4a4..d131a250 100644 --- a/examples/profiles/k8s-worker-install/profile.json +++ b/examples/profiles/k8s-worker-install/profile.json @@ -1,5 +1,5 @@ { - "id": "kubernetes-worker", + "id": "k8s-worker-install", "boot": { "kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz", "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], diff --git a/examples/profiles/k8s-worker/profile.json b/examples/profiles/k8s-worker/profile.json index e6c28b23..aa493303 100644 --- a/examples/profiles/k8s-worker/profile.json +++ b/examples/profiles/k8s-worker/profile.json @@ -1,5 +1,5 @@ { - "id": "kubernetes-worker", + "id": "k8s-worker", "boot": { "kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz", "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], From 62d2b43fe18d2346496b26acdad6edd8e547324e Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Mon, 21 Mar 2016 22:31:44 -0700 Subject: [PATCH 4/8] examples: Update k8s-install config to use Ignition only * Ignition is preferred over Cloud-Config --- examples/ignition/coreos-install.yaml | 3 +-- examples/ignition/k8s-master.yaml | 6 ++++++ examples/ignition/k8s-worker.yaml | 6 ++++++ examples/k8s-docker.yaml | 3 +++ examples/k8s-install.yaml | 9 ++++----- examples/k8s-rkt.yaml | 3 +++ examples/profiles/k8s-master-install/profile.json | 5 ++--- examples/profiles/k8s-worker-install/profile.json | 5 ++--- 8 files changed, 27 insertions(+), 13 deletions(-) diff --git a/examples/ignition/coreos-install.yaml b/examples/ignition/coreos-install.yaml index 1735a802..4920744b 100644 --- a/examples/ignition/coreos-install.yaml +++ b/examples/ignition/coreos-install.yaml @@ -11,8 +11,7 @@ systemd: [Service] Type=oneshot ExecStart=/usr/bin/curl {{.ignition_endpoint}}?{{.query}}&os=installed -o ignition.json - ExecStart=/usr/bin/curl {{.cloud_endpoint}}?{{.query}}&os=installed -o cloud - ExecStart=/usr/bin/coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json {{if .cloud_endpoint}}-c cloud{{end}} + ExecStart=/usr/bin/coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json ExecStart=/usr/bin/udevadm settle ExecStart=/usr/bin/systemctl reboot [Install] diff --git a/examples/ignition/k8s-master.yaml b/examples/ignition/k8s-master.yaml index 4aef058f..f171d9c1 100644 --- a/examples/ignition/k8s-master.yaml +++ b/examples/ignition/k8s-master.yaml @@ -88,6 +88,7 @@ systemd: WantedBy=multi-user.target storage: + {{ if .pxe }} disks: - device: /dev/sda wipe_table: true @@ -101,6 +102,11 @@ storage: force: true options: - "-LROOT" + {{else}} + filesystems: + - device: "/dev/disk/by-label/ROOT" + format: "ext4" + {{end}} files: - path: /etc/kubernetes/manifests/kube-proxy.yaml contents: | diff --git a/examples/ignition/k8s-worker.yaml b/examples/ignition/k8s-worker.yaml index 8bf98026..dc85a26a 100644 --- a/examples/ignition/k8s-worker.yaml +++ b/examples/ignition/k8s-worker.yaml @@ -76,6 +76,7 @@ systemd: WantedBy=multi-user.target storage: + {{ if .pxe }} disks: - device: /dev/sda wipe_table: true @@ -89,6 +90,11 @@ storage: force: true options: - "-LROOT" + {{else}} + filesystems: + - device: "/dev/disk/by-label/ROOT" + format: "ext4" + {{end}} files: - path: /etc/kubernetes/worker-kubeconfig.yaml contents: | diff --git a/examples/k8s-docker.yaml b/examples/k8s-docker.yaml index 031b833f..a567d9a5 100644 --- a/examples/k8s-docker.yaml +++ b/examples/k8s-docker.yaml @@ -7,6 +7,7 @@ groups: uuid: 16e7d8a7-bfa9-428b-9117-363341bb330b metadata: ipv4_address: 172.17.0.21 + pxe: "true" networkd_name: ens3 networkd_gateway: 172.17.0.1 networkd_dns: 172.17.0.3 @@ -28,6 +29,7 @@ groups: uuid: 264cd073-ca62-44b3-98c0-50aad5b5f819 metadata: ipv4_address: 172.17.0.22 + pxe: "true" networkd_name: ens3 networkd_gateway: 172.17.0.1 networkd_dns: 172.17.0.3 @@ -47,6 +49,7 @@ groups: uuid: 39d2e747-2648-4d68-ae92-bbc70b245055 metadata: ipv4_address: 172.17.0.23 + pxe: "true" networkd_name: ens3 networkd_gateway: 172.17.0.1 networkd_dns: 172.17.0.3 diff --git a/examples/k8s-install.yaml b/examples/k8s-install.yaml index 008f391c..5b3c2398 100644 --- a/examples/k8s-install.yaml +++ b/examples/k8s-install.yaml @@ -7,7 +7,6 @@ groups: coreos_channel: alpha coreos_version: 983.0.0 ignition_endpoint: http://bootcfg.foo:8080/ignition - cloud_endpoint: http://bootcfg.foo:8080/cloud - name: Master Node profile: k8s-master-install @@ -16,11 +15,11 @@ groups: os: installed metadata: ipv4_address: 172.15.0.21 - autoupdate: "true" networkd_name: ens3 networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 networkd_address: 172.15.0.21/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379" k8s_pod_network: 10.2.0.0/16 k8s_service_ip_range: 10.3.0.0/24 @@ -39,11 +38,11 @@ groups: os: installed metadata: ipv4_address: 172.15.0.22 - autoupdate: "true" networkd_name: ens3 networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 networkd_address: 172.15.0.22/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379" k8s_controller_endpoint: https://172.15.0.21 k8s_dns_service_ip: 10.3.0.1 @@ -60,11 +59,11 @@ groups: os: installed metadata: ipv4_address: 172.15.0.23 - autoupdate: "true" networkd_name: ens3 networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 networkd_address: 172.15.0.23/16 + k8s_version: v1.1.8_coreos.0 k8s_etcd_endpoints: "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379" k8s_controller_endpoint: https://172.15.0.21 k8s_dns_service_ip: 10.3.0.1 @@ -72,4 +71,4 @@ groups: fleet_metadata: "role=etcd,name=node3" etcd_name: node3 etcd_initial_cluster: "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380" - ssh_authorized_keys: \ No newline at end of file + ssh_authorized_keys: diff --git a/examples/k8s-rkt.yaml b/examples/k8s-rkt.yaml index 40f44f2e..b3a0c877 100644 --- a/examples/k8s-rkt.yaml +++ b/examples/k8s-rkt.yaml @@ -7,6 +7,7 @@ groups: uuid: 16e7d8a7-bfa9-428b-9117-363341bb330b metadata: ipv4_address: 172.15.0.21 + pxe: "true" networkd_name: ens3 networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 @@ -29,6 +30,7 @@ groups: uuid: 264cd073-ca62-44b3-98c0-50aad5b5f819 metadata: ipv4_address: 172.15.0.22 + pxe: "true" networkd_name: ens3 networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 @@ -49,6 +51,7 @@ groups: uuid: 39d2e747-2648-4d68-ae92-bbc70b245055 metadata: ipv4_address: 172.15.0.23 + pxe: "true" networkd_name: ens3 networkd_gateway: 172.15.0.1 networkd_dns: 172.15.0.3 diff --git a/examples/profiles/k8s-master-install/profile.json b/examples/profiles/k8s-master-install/profile.json index a5f7e1a4..46fe9261 100644 --- a/examples/profiles/k8s-master-install/profile.json +++ b/examples/profiles/k8s-master-install/profile.json @@ -4,12 +4,11 @@ "kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz", "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], "cmdline": { - "cloud-config-url": "http://bootcfg.foo:8080/cloud?uuid=${uuid}&mac=${net0/mac:hexhyp}", "coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}", "coreos.autologin": "", "coreos.first_boot": "" } }, - "cloud_id": "kubernetes-master.sh", - "ignition_id": "etcd.yaml" + "cloud_id": "", + "ignition_id": "k8s-master.yaml" } diff --git a/examples/profiles/k8s-worker-install/profile.json b/examples/profiles/k8s-worker-install/profile.json index d131a250..3b3282f0 100644 --- a/examples/profiles/k8s-worker-install/profile.json +++ b/examples/profiles/k8s-worker-install/profile.json @@ -4,12 +4,11 @@ "kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz", "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], "cmdline": { - "cloud-config-url": "http://bootcfg.foo:8080/cloud?uuid=${uuid}&mac=${net0/mac:hexhyp}", "coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}", "coreos.autologin": "", "coreos.first_boot": "" } }, - "cloud_id": "kubernetes-worker.sh", - "ignition_id": "etcd.yaml" + "cloud_id": "", + "ignition_id": "k8s-worker.yaml" } From 093c738e8f188f65b6d41ddf1965b16fe6ca15cf Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Tue, 22 Mar 2016 00:00:16 -0700 Subject: [PATCH 5/8] examples: Remove old Kubernetes cloud-configs --- Documentation/cloud-config.md | 3 +- examples/cloud/kubernetes-master.sh | 588 ---------------------------- examples/cloud/kubernetes-worker.sh | 188 --------- 3 files changed, 1 insertion(+), 778 deletions(-) delete mode 100644 examples/cloud/kubernetes-master.sh delete mode 100644 examples/cloud/kubernetes-worker.sh diff --git a/Documentation/cloud-config.md b/Documentation/cloud-config.md index 2490c1ae..542e1940 100644 --- a/Documentation/cloud-config.md +++ b/Documentation/cloud-config.md @@ -8,8 +8,7 @@ Cloud-Config template files can be added in the `/etc/bootcfg/cloud` directory o data/ ├── cloud │   ├── cloud.yaml - │   ├── kubernetes-master.sh - │   └── kubernetes-worker.sh + │   └── script.sh ├── ignition └── profiles diff --git a/examples/cloud/kubernetes-master.sh b/examples/cloud/kubernetes-master.sh deleted file mode 100644 index 5257a329..00000000 --- a/examples/cloud/kubernetes-master.sh +++ /dev/null @@ -1,588 +0,0 @@ -#!/bin/bash -set -e - -# List of etcd servers (http://ip:port), comma separated -export ETCD_ENDPOINTS={{.k8s_etcd_endpoints}} - -# Specify the version (vX.Y.Z) of Kubernetes assets to deploy -export K8S_VER=v1.1.8_coreos.0 - -# The CIDR network to use for pod IPs. -# Each pod launched in the cluster will be assigned an IP out of this range. -# Each node will be configured such that these IPs will be routable using the flannel overlay network. -export POD_NETWORK={{.k8s_pod_network}} - -# The CIDR network to use for service cluster IPs. -# Each service will be assigned a cluster IP out of this range. -# This must not overlap with any IP ranges assigned to the POD_NETWORK, or other existing network infrastructure. -# Routing to these IPs is handled by a proxy service local to each node, and are not required to be routable between nodes. -export SERVICE_IP_RANGE={{.k8s_service_ip_range}} - -# The IP address of the Kubernetes API Service -# If the SERVICE_IP_RANGE is changed above, this must be set to the first IP in that range. -export K8S_SERVICE_IP={{.k8s_service_ip}} - -# The IP address of the cluster DNS service. -# This IP must be in the range of the SERVICE_IP_RANGE and cannot be the first IP in the range. -# This same IP must be configured on all worker nodes to enable DNS service discovery. -export DNS_SERVICE_IP={{.k8s_dns_service_ip}} - -# ADVERTISE_IP is the host node's IP. -export ADVERTISE_IP={{.ipv4_address}} - -# TLS Certificate assets are hosted by the Config Server -export CERT_ENDPOINT={{.k8s_cert_endpoint}} - -function init_config { - local REQUIRED=('ADVERTISE_IP' 'POD_NETWORK' 'ETCD_ENDPOINTS' 'SERVICE_IP_RANGE' 'K8S_SERVICE_IP' 'DNS_SERVICE_IP' 'K8S_VER' ) - - for REQ in "${REQUIRED[@]}"; do - if [ -z "$(eval echo \$$REQ)" ]; then - echo "Missing required config value: ${REQ}" - exit 1 - fi - done -} - -function get_certs { - DEST=/etc/kubernetes/ssl - mkdir -p $DEST - echo "Waiting for Certificate Endpoint..." - until curl --silent $CERT_ENDPOINT - do - sleep 5 - done - curl $CERT_ENDPOINT/tls/apiserver.pem -o $DEST/apiserver.pem - curl $CERT_ENDPOINT/tls/apiserver-key.pem -o $DEST/apiserver-key.pem - curl $CERT_ENDPOINT/tls/ca.pem -o $DEST/ca.pem -} - -function init_flannel { - echo "Waiting for etcd..." - while true - do - IFS=',' read -ra ES <<< "$ETCD_ENDPOINTS" - for ETCD in "${ES[@]}"; do - echo "Trying: $ETCD" - if [ -n "$(curl --silent "$ETCD/v2/machines")" ]; then - local ACTIVE_ETCD=$ETCD - break - fi - sleep 1 - done - if [ -n "$ACTIVE_ETCD" ]; then - break - fi - done - RES=$(curl --silent -X PUT -d "value={\"Network\":\"$POD_NETWORK\",\"Backend\":{\"Type\":\"vxlan\"}}" "$ACTIVE_ETCD/v2/keys/coreos.com/network/config?prevExist=false") - if [ -z "$(echo $RES | grep '"action":"create"')" ] && [ -z "$(echo $RES | grep 'Key already exists')" ]; then - echo "Unexpected error configuring flannel pod network: $RES" - fi -} - -function init_templates { - local TEMPLATE=/etc/systemd/system/kubelet.service - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -[Service] -ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests -Environment=KUBELET_VERSION=${K8S_VER} -ExecStart=/usr/lib/coreos/kubelet-wrapper \ - --api_servers=http://127.0.0.1:8080 \ - --register-node=false \ - --allow-privileged=true \ - --config=/etc/kubernetes/manifests \ - --hostname-override=${ADVERTISE_IP} \ - --cluster_dns=${DNS_SERVICE_IP} \ - --cluster_domain=cluster.local -Restart=always -RestartSec=10 - -[Install] -WantedBy=multi-user.target -EOF - } - - local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -apiVersion: v1 -kind: Pod -metadata: - name: kube-proxy - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: kube-proxy - image: quay.io/coreos/hyperkube:$K8S_VER - command: - - /hyperkube - - proxy - - --master=http://127.0.0.1:8080 - - --proxy-mode=iptables - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/ssl/certs - name: ssl-certs-host - readOnly: true - volumes: - - hostPath: - path: /usr/share/ca-certificates - name: ssl-certs-host -EOF - } - - local TEMPLATE=/etc/kubernetes/manifests/kube-apiserver.yaml - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -apiVersion: v1 -kind: Pod -metadata: - name: kube-apiserver - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: kube-apiserver - image: quay.io/coreos/hyperkube:$K8S_VER - command: - - /hyperkube - - apiserver - - --bind-address=0.0.0.0 - - --etcd-servers=${ETCD_ENDPOINTS} - - --allow-privileged=true - - --service-cluster-ip-range=${SERVICE_IP_RANGE} - - --secure-port=443 - - --advertise-address=${ADVERTISE_IP} - - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota - - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem - - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem - - --client-ca-file=/etc/kubernetes/ssl/ca.pem - - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem - ports: - - containerPort: 443 - hostPort: 443 - name: https - - containerPort: 8080 - hostPort: 8080 - name: local - volumeMounts: - - mountPath: /etc/kubernetes/ssl - name: ssl-certs-kubernetes - readOnly: true - - mountPath: /etc/ssl/certs - name: ssl-certs-host - readOnly: true - volumes: - - hostPath: - path: /etc/kubernetes/ssl - name: ssl-certs-kubernetes - - hostPath: - path: /usr/share/ca-certificates - name: ssl-certs-host -EOF - } - - local TEMPLATE=/etc/kubernetes/manifests/kube-podmaster.yaml - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -apiVersion: v1 -kind: Pod -metadata: - name: kube-podmaster - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: scheduler-elector - image: gcr.io/google_containers/podmaster:1.1 - command: - - /podmaster - - --etcd-servers=${ETCD_ENDPOINTS} - - --key=scheduler - - --whoami=${ADVERTISE_IP} - - --source-file=/src/manifests/kube-scheduler.yaml - - --dest-file=/dst/manifests/kube-scheduler.yaml - volumeMounts: - - mountPath: /src/manifests - name: manifest-src - readOnly: true - - mountPath: /dst/manifests - name: manifest-dst - - name: controller-manager-elector - image: gcr.io/google_containers/podmaster:1.1 - command: - - /podmaster - - --etcd-servers=${ETCD_ENDPOINTS} - - --key=controller - - --whoami=${ADVERTISE_IP} - - --source-file=/src/manifests/kube-controller-manager.yaml - - --dest-file=/dst/manifests/kube-controller-manager.yaml - terminationMessagePath: /dev/termination-log - volumeMounts: - - mountPath: /src/manifests - name: manifest-src - readOnly: true - - mountPath: /dst/manifests - name: manifest-dst - volumes: - - hostPath: - path: /srv/kubernetes/manifests - name: manifest-src - - hostPath: - path: /etc/kubernetes/manifests - name: manifest-dst -EOF - } - - local TEMPLATE=/srv/kubernetes/manifests/kube-controller-manager.yaml - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -apiVersion: v1 -kind: Pod -metadata: - name: kube-controller-manager - namespace: kube-system -spec: - containers: - - name: kube-controller-manager - image: quay.io/coreos/hyperkube:$K8S_VER - command: - - /hyperkube - - controller-manager - - --master=http://127.0.0.1:8080 - - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem - - --root-ca-file=/etc/kubernetes/ssl/ca.pem - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /healthz - port: 10252 - initialDelaySeconds: 15 - timeoutSeconds: 1 - volumeMounts: - - mountPath: /etc/kubernetes/ssl - name: ssl-certs-kubernetes - readOnly: true - - mountPath: /etc/ssl/certs - name: ssl-certs-host - readOnly: true - hostNetwork: true - volumes: - - hostPath: - path: /etc/kubernetes/ssl - name: ssl-certs-kubernetes - - hostPath: - path: /usr/share/ca-certificates - name: ssl-certs-host -EOF - } - - local TEMPLATE=/srv/kubernetes/manifests/kube-scheduler.yaml - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -apiVersion: v1 -kind: Pod -metadata: - name: kube-scheduler - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: kube-scheduler - image: quay.io/coreos/hyperkube:$K8S_VER - command: - - /hyperkube - - scheduler - - --master=http://127.0.0.1:8080 - livenessProbe: - httpGet: - host: 127.0.0.1 - path: /healthz - port: 10251 - initialDelaySeconds: 15 - timeoutSeconds: 1 -EOF - } - - local TEMPLATE=/srv/kubernetes/manifests/kube-system.json - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -{ - "apiVersion": "v1", - "kind": "Namespace", - "metadata": { - "name": "kube-system" - } -} -EOF - } - - local TEMPLATE=/srv/kubernetes/manifests/kube-dns-rc.json - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -{ - "apiVersion": "v1", - "kind": "ReplicationController", - "metadata": { - "labels": { - "k8s-app": "kube-dns", - "kubernetes.io/cluster-service": "true", - "version": "v9" - }, - "name": "kube-dns-v9", - "namespace": "kube-system" - }, - "spec": { - "replicas": 1, - "selector": { - "k8s-app": "kube-dns", - "version": "v9" - }, - "template": { - "metadata": { - "labels": { - "k8s-app": "kube-dns", - "kubernetes.io/cluster-service": "true", - "version": "v9" - } - }, - "spec": { - "containers": [ - { - "command": [ - "/usr/local/bin/etcd", - "-data-dir", - "/var/etcd/data", - "-listen-client-urls", - "http://127.0.0.1:2379,http://127.0.0.1:4001", - "-advertise-client-urls", - "http://127.0.0.1:2379,http://127.0.0.1:4001", - "-initial-cluster-token", - "skydns-etcd" - ], - "image": "gcr.io/google_containers/etcd:2.0.9", - "name": "etcd", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - } - }, - "volumeMounts": [ - { - "mountPath": "/var/etcd/data", - "name": "etcd-storage" - } - ] - }, - { - "args": [ - "-domain=cluster.local" - ], - "image": "gcr.io/google_containers/kube2sky:1.11", - "name": "kube2sky", - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - } - } - }, - { - "args": [ - "-machines=http://127.0.0.1:4001", - "-addr=0.0.0.0:53", - "-ns-rotate=false", - "-domain=cluster.local." - ], - "image": "gcr.io/google_containers/skydns:2015-10-13-8c72f8c", - "livenessProbe": { - "httpGet": { - "path": "/healthz", - "port": 8080, - "scheme": "HTTP" - }, - "initialDelaySeconds": 30, - "timeoutSeconds": 5 - }, - "name": "skydns", - "ports": [ - { - "containerPort": 53, - "name": "dns", - "protocol": "UDP" - }, - { - "containerPort": 53, - "name": "dns-tcp", - "protocol": "TCP" - } - ], - "readinessProbe": { - "httpGet": { - "path": "/healthz", - "port": 8080, - "scheme": "HTTP" - }, - "initialDelaySeconds": 1, - "timeoutSeconds": 5 - }, - "resources": { - "limits": { - "cpu": "100m", - "memory": "50Mi" - } - } - }, - { - "args": [ - "-cmd=nslookup kubernetes.default.svc.cluster.local localhost >/dev/null", - "-port=8080" - ], - "image": "gcr.io/google_containers/exechealthz:1.0", - "name": "healthz", - "ports": [ - { - "containerPort": 8080, - "protocol": "TCP" - } - ], - "resources": { - "limits": { - "cpu": "10m", - "memory": "20Mi" - } - } - } - ], - "dnsPolicy": "Default", - "volumes": [ - { - "emptyDir": {}, - "name": "etcd-storage" - } - ] - } - } - } -} -EOF - } - - local TEMPLATE=/srv/kubernetes/manifests/kube-dns-svc.json - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -{ - "apiVersion": "v1", - "kind": "Service", - "metadata": { - "name": "kube-dns", - "namespace": "kube-system", - "labels": { - "k8s-app": "kube-dns", - "kubernetes.io/name": "KubeDNS", - "kubernetes.io/cluster-service": "true" - } - }, - "spec": { - "clusterIP": "$DNS_SERVICE_IP", - "ports": [ - { - "protocol": "UDP", - "name": "dns", - "port": 53 - }, - { - "protocol": "TCP", - "name": "dns-tcp", - "port": 53 - } - ], - "selector": { - "k8s-app": "kube-dns" - } - } -} -EOF - } - - local TEMPLATE=/etc/flannel/options.env - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -FLANNELD_IFACE=$ADVERTISE_IP -FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS -EOF - } - - local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -[Service] -ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env -EOF - } - - local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -[Unit] -Requires=flanneld.service -After=flanneld.service -EOF - } - -} - -function start_addons { - echo "Waiting for Kubernetes API..." - until curl --silent "http://127.0.0.1:8080/version" - do - sleep 5 - done - echo - echo "K8S: kube-system namespace" - curl --silent -XPOST -d"$(cat /srv/kubernetes/manifests/kube-system.json)" "http://127.0.0.1:8080/api/v1/namespaces" > /dev/null - echo "K8S: DNS addon" - curl --silent -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-rc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/replicationcontrollers" > /dev/null - curl --silent -XPOST -d"$(cat /srv/kubernetes/manifests/kube-dns-svc.json)" "http://127.0.0.1:8080/api/v1/namespaces/kube-system/services" > /dev/null -} - -init_config -get_certs -init_templates -init_flannel - -{{if .autoupdate}}{{else}}systemctl stop update-engine; systemctl mask update-engine{{end}} -systemctl daemon-reload -systemctl enable kubelet; systemctl start kubelet - -start_addons - -echo "done" > /home/core/master diff --git a/examples/cloud/kubernetes-worker.sh b/examples/cloud/kubernetes-worker.sh deleted file mode 100644 index c82ecdb6..00000000 --- a/examples/cloud/kubernetes-worker.sh +++ /dev/null @@ -1,188 +0,0 @@ -#!/bin/bash -set -e - -# List of etcd servers (http://ip:port), comma separated -export ETCD_ENDPOINTS={{.k8s_etcd_endpoints}} - -# The endpoint the worker node should use to contact controller nodes (https://ip:port) -# In HA configurations this should be an external DNS record or loadbalancer in front of the control nodes. -# However, it is also possible to point directly to a single control node. -export CONTROLLER_ENDPOINT={{.k8s_controller_endpoint}} - -# Specify the version (vX.Y.Z) of Kubernetes assets to deploy -export K8S_VER=v1.1.8_coreos.0 - -# The IP address of the cluster DNS service. -# This must be the same DNS_SERVICE_IP used when configuring the controller nodes. -export DNS_SERVICE_IP={{.k8s_dns_service_ip}} - -# ADVERTISE_IP is the host node's IP. -export ADVERTISE_IP={{.ipv4_address}} - -# TLS Certificate assets are hosted by the Config Server -export CERT_ENDPOINT={{.k8s_cert_endpoint}} - -function init_config { - local REQUIRED=( 'ADVERTISE_IP' 'ETCD_ENDPOINTS' 'CONTROLLER_ENDPOINT' 'DNS_SERVICE_IP' 'K8S_VER' ) - - for REQ in "${REQUIRED[@]}"; do - if [ -z "$(eval echo \$$REQ)" ]; then - echo "Missing required config value: ${REQ}" - exit 1 - fi - done -} - -function get_certs { - DEST=/etc/kubernetes/ssl - mkdir -p $DEST - echo "Waiting for Certificate Endpoint..." - until curl --silent $CERT_ENDPOINT - do - sleep 5 - done - curl $CERT_ENDPOINT/tls/worker.pem -o $DEST/worker.pem - curl $CERT_ENDPOINT/tls/worker-key.pem -o $DEST/worker-key.pem - curl $CERT_ENDPOINT/tls/ca.pem -o $DEST/ca.pem -} - -function init_templates { - local TEMPLATE=/etc/systemd/system/kubelet.service - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -[Service] -ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests -Environment=KUBELET_VERSION=${K8S_VER} -ExecStart=/usr/lib/coreos/kubelet-wrapper \ - --api_servers=${CONTROLLER_ENDPOINT} \ - --register-node=true \ - --allow-privileged=true \ - --config=/etc/kubernetes/manifests \ - --hostname-override=${ADVERTISE_IP} \ - --cluster_dns=${DNS_SERVICE_IP} \ - --cluster_domain=cluster.local \ - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \ - --tls-cert-file=/etc/kubernetes/ssl/worker.pem \ - --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem -Restart=always -RestartSec=10 -[Install] -WantedBy=multi-user.target -EOF - } - - local TEMPLATE=/etc/kubernetes/worker-kubeconfig.yaml - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -apiVersion: v1 -kind: Config -clusters: -- name: local - cluster: - certificate-authority: /etc/kubernetes/ssl/ca.pem -users: -- name: kubelet - user: - client-certificate: /etc/kubernetes/ssl/worker.pem - client-key: /etc/kubernetes/ssl/worker-key.pem -contexts: -- context: - cluster: local - user: kubelet - name: kubelet-context -current-context: kubelet-context -EOF - } - - local TEMPLATE=/etc/kubernetes/manifests/kube-proxy.yaml - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -apiVersion: v1 -kind: Pod -metadata: - name: kube-proxy - namespace: kube-system -spec: - hostNetwork: true - containers: - - name: kube-proxy - image: quay.io/coreos/hyperkube:$K8S_VER - command: - - /hyperkube - - proxy - - --master=${CONTROLLER_ENDPOINT} - - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml - - --proxy-mode=iptables - securityContext: - privileged: true - volumeMounts: - - mountPath: /etc/ssl/certs - name: "ssl-certs" - - mountPath: /etc/kubernetes/worker-kubeconfig.yaml - name: "kubeconfig" - readOnly: true - - mountPath: /etc/kubernetes/ssl - name: "etc-kube-ssl" - readOnly: true - volumes: - - name: "ssl-certs" - hostPath: - path: "/usr/share/ca-certificates" - - name: "kubeconfig" - hostPath: - path: "/etc/kubernetes/worker-kubeconfig.yaml" - - name: "etc-kube-ssl" - hostPath: - path: "/etc/kubernetes/ssl" -EOF - } - - local TEMPLATE=/etc/flannel/options.env - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -FLANNELD_IFACE=$ADVERTISE_IP -FLANNELD_ETCD_ENDPOINTS=$ETCD_ENDPOINTS -EOF - } - - local TEMPLATE=/etc/systemd/system/flanneld.service.d/40-ExecStartPre-symlink.conf.conf - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -[Service] -ExecStartPre=/usr/bin/ln -sf /etc/flannel/options.env /run/flannel/options.env -EOF - } - - local TEMPLATE=/etc/systemd/system/docker.service.d/40-flannel.conf - [ -f $TEMPLATE ] || { - echo "TEMPLATE: $TEMPLATE" - mkdir -p $(dirname $TEMPLATE) - cat << EOF > $TEMPLATE -[Unit] -Requires=flanneld.service -After=flanneld.service -EOF - } - -} - -init_config -get_certs -init_templates - -{{if .autoupdate}}{{else}}systemctl stop update-engine; systemctl mask update-engine{{end}} - -systemctl daemon-reload -systemctl enable kubelet; systemctl start kubelet - -echo "done" > /home/core/worker From b3bac02a121598159281e478b964deec4bcf6f89 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Tue, 22 Mar 2016 22:54:52 -0700 Subject: [PATCH 6/8] examples: Fix Kubelet path and service ordering * Run k8stls.service to fetch TLS assets if path is missing * Make Kubelet depend upon a kubelet.path path unit * Fixes to file mode (octal allowed in YAML) --- examples/README.md | 2 +- examples/cloud/.gitkeep | 0 examples/ignition/install-shutdown.yaml | 28 +++++++++++++++++++ examples/ignition/k8s-master.yaml | 18 ++++++++---- examples/ignition/k8s-worker.yaml | 14 ++++++++-- .../profiles/install-shutdown/profile.json | 15 ++++++++++ 6 files changed, 68 insertions(+), 9 deletions(-) create mode 100644 examples/cloud/.gitkeep create mode 100644 examples/ignition/install-shutdown.yaml create mode 100644 examples/profiles/install-shutdown/profile.json diff --git a/examples/README.md b/examples/README.md index c0d06598..c9a09a97 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,7 +1,7 @@ # Examples -These examples show declarative configurations for network booting libvirt VMs into CoreOS clusters (Kubernetes, etcd) using `bootcfg`. +These examples network boot and provision VMs into CoreOS clusters using `bootcfg`. | Name | Description | CoreOS Version | FS | Reference | |------------|-------------|----------------|----|-----------| diff --git a/examples/cloud/.gitkeep b/examples/cloud/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/examples/ignition/install-shutdown.yaml b/examples/ignition/install-shutdown.yaml new file mode 100644 index 00000000..9f90e932 --- /dev/null +++ b/examples/ignition/install-shutdown.yaml @@ -0,0 +1,28 @@ +--- +ignition_version: 1 +systemd: + units: + - name: install.service + enable: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + ExecStart=/usr/bin/curl {{.ignition_endpoint}}?{{.query}}&os=installed -o ignition.json + ExecStart=/usr/bin/coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json + ExecStart=/usr/bin/udevadm settle + ExecStart=/usr/bin/systemctl poweroff + [Install] + WantedBy=multi-user.target + +{{ if .ssh_authorized_keys }} +passwd: + users: + - name: core + ssh_authorized_keys: + {{ range $element := .ssh_authorized_keys }} + - {{$element}} + {{end}} +{{end}} diff --git a/examples/ignition/k8s-master.yaml b/examples/ignition/k8s-master.yaml index f171d9c1..d9a54845 100644 --- a/examples/ignition/k8s-master.yaml +++ b/examples/ignition/k8s-master.yaml @@ -42,23 +42,32 @@ systemd: Description=Acquire Kubernetes TLS CA and Certificate Requires=network-online.target After=network-online.target + ConditionPathExists=!/etc/kubernetes/ssl/ready [Service] Type=oneshot ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver.pem -o /etc/kubernetes/ssl/apiserver.pem ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver-key.pem -o /etc/kubernetes/ssl/apiserver-key.pem ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem + ExecStart=/usr/bin/touch /etc/kubernetes/ssl/ready [Install] WantedBy=multi-user.target + - name: kubelet.path + enable: true + contents: | + [Unit] + Description=Watch for Kubelet TLS Assets + [Path] + PathExists=/etc/kubernetes/ssl/ready - name: kubelet.service enable: true contents: | [Unit] Description=Kubelet via Hyperkube ACI - Requires=k8stls.service - After=k8stls.service Requires=flanneld.service After=flanneld.service + Requires=kubelet.path + After=kubelet.path [Service] ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests Environment=KUBELET_VERSION={{.k8s_version}} @@ -94,7 +103,6 @@ storage: wipe_table: true partitions: - label: ROOT - number: 0 filesystems: - device: "/dev/sda1" format: "ext4" @@ -487,7 +495,7 @@ storage: } } - path: /opt/init-flannel - mode: 320 + mode: 0500 contents: | #!/bin/bash function init_flannel { @@ -514,7 +522,7 @@ storage: } init_flannel - path: /opt/k8s-addons - mode: 320 + mode: 0500 contents: | #!/bin/bash echo "Waiting for Kubernetes API..." diff --git a/examples/ignition/k8s-worker.yaml b/examples/ignition/k8s-worker.yaml index dc85a26a..4a6a14c5 100644 --- a/examples/ignition/k8s-worker.yaml +++ b/examples/ignition/k8s-worker.yaml @@ -41,21 +41,30 @@ systemd: Description=Acquire Kubernetes TLS CA and Certificate Requires=network-online.target After=network-online.target + ConditionPathExists=!/etc/kubernetes/ssl/ready [Service] Type=oneshot ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker.pem -o /etc/kubernetes/ssl/worker.pem ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker-key.pem -o /etc/kubernetes/ssl/worker-key.pem ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem + ExecStart=/usr/bin/touch /etc/kubernetes/ssl/ready [Install] WantedBy=multi-user.target + - name: kubelet.path + enable: true + contents: | + [Unit] + Description=Watch for Kubelet TLS Assets + [Path] + PathExists=/etc/kubernetes/ssl/ready - name: kubelet.service enable: true contents: | [Unit] Description=Kubelet via Hyperkube ACI - Requires=k8stls.service - After=k8stls.service + Requires=kubelet.path + After=kubelet.path [Service] ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests Environment=KUBELET_VERSION={{.k8s_version}} @@ -82,7 +91,6 @@ storage: wipe_table: true partitions: - label: ROOT - number: 0 filesystems: - device: "/dev/sda1" format: "ext4" diff --git a/examples/profiles/install-shutdown/profile.json b/examples/profiles/install-shutdown/profile.json new file mode 100644 index 00000000..d7ef1cff --- /dev/null +++ b/examples/profiles/install-shutdown/profile.json @@ -0,0 +1,15 @@ +{ + "id": "install-shutdown", + "name": "Install CoreOS and Shutdown", + "boot": { + "kernel": "/assets/coreos/983.0.0/coreos_production_pxe.vmlinuz", + "initrd": ["/assets/coreos/983.0.0/coreos_production_pxe_image.cpio.gz"], + "cmdline": { + "coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}", + "coreos.autologin": "", + "coreos.first_boot": "" + } + }, + "cloud_id": "", + "ignition_id": "install-shutdown.yaml" +} \ No newline at end of file From c0cf9c5c999d8f088e8664d747835414a386d2e8 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Wed, 23 Mar 2016 10:23:55 -0700 Subject: [PATCH 7/8] examples: Change /opt script file mode to 0544 --- examples/ignition/k8s-master.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/ignition/k8s-master.yaml b/examples/ignition/k8s-master.yaml index d9a54845..4aa1c377 100644 --- a/examples/ignition/k8s-master.yaml +++ b/examples/ignition/k8s-master.yaml @@ -495,7 +495,7 @@ storage: } } - path: /opt/init-flannel - mode: 0500 + mode: 0544 contents: | #!/bin/bash function init_flannel { @@ -522,7 +522,7 @@ storage: } init_flannel - path: /opt/k8s-addons - mode: 0500 + mode: 0544 contents: | #!/bin/bash echo "Waiting for Kubernetes API..." From 50f6741eff29a807002c11d9d84dfc6bf7b61aa4 Mon Sep 17 00:00:00 2001 From: Dalton Hubble Date: Wed, 23 Mar 2016 11:49:46 -0700 Subject: [PATCH 8/8] examples: Use target and template units in k8s-{master,worker} * Remove the Path Unit which curl'd 3 files, use a template unit * Add a k8s-assets.target to simplify depending on fetched assets --- examples/ignition/k8s-master.yaml | 31 +++++++++++++------------------ examples/ignition/k8s-worker.yaml | 31 +++++++++++++------------------ 2 files changed, 26 insertions(+), 36 deletions(-) diff --git a/examples/ignition/k8s-master.yaml b/examples/ignition/k8s-master.yaml index 4aa1c377..43062f0f 100644 --- a/examples/ignition/k8s-master.yaml +++ b/examples/ignition/k8s-master.yaml @@ -35,30 +35,25 @@ systemd: [Unit] Requires=flanneld.service After=flanneld.service - - name: k8stls.service - enable: true + - name: k8s-certs@.service contents: | [Unit] - Description=Acquire Kubernetes TLS CA and Certificate + Description=Fetch Kubernetes certificate assets Requires=network-online.target After=network-online.target - ConditionPathExists=!/etc/kubernetes/ssl/ready [Service] - Type=oneshot ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver.pem -o /etc/kubernetes/ssl/apiserver.pem - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/apiserver-key.pem -o /etc/kubernetes/ssl/apiserver-key.pem - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem - ExecStart=/usr/bin/touch /etc/kubernetes/ssl/ready - [Install] - WantedBy=multi-user.target - - name: kubelet.path - enable: true + ExecStart=/usr/bin/bash -c "[ -f {{.k8s_cert_endpoint}}/tls/%i ] || curl {{.k8s_cert_endpoint}}/tls/%i -o /etc/kubernetes/ssl/%i" + - name: k8s-assets.target contents: | [Unit] - Description=Watch for Kubelet TLS Assets - [Path] - PathExists=/etc/kubernetes/ssl/ready + Description=Load Kubernetes Assets + Requires=k8s-certs@apiserver.pem.service + After=k8s-certs@apiserver.pem.service + Requires=k8s-certs@apiserver-key.pem.service + After=k8s-certs@apiserver-key.pem.service + Requires=k8s-certs@ca.pem.service + After=k8s-certs@ca.pem.service - name: kubelet.service enable: true contents: | @@ -66,8 +61,8 @@ systemd: Description=Kubelet via Hyperkube ACI Requires=flanneld.service After=flanneld.service - Requires=kubelet.path - After=kubelet.path + Requires=k8s-assets.target + After=k8s-assets.target [Service] ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests Environment=KUBELET_VERSION={{.k8s_version}} diff --git a/examples/ignition/k8s-worker.yaml b/examples/ignition/k8s-worker.yaml index 4a6a14c5..7cb72f5e 100644 --- a/examples/ignition/k8s-worker.yaml +++ b/examples/ignition/k8s-worker.yaml @@ -34,37 +34,32 @@ systemd: [Unit] Requires=flanneld.service After=flanneld.service - - name: k8stls.service - enable: true + - name: k8s-certs@.service contents: | [Unit] - Description=Acquire Kubernetes TLS CA and Certificate + Description=Fetch Kubernetes certificate assets Requires=network-online.target After=network-online.target - ConditionPathExists=!/etc/kubernetes/ssl/ready [Service] - Type=oneshot ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/ssl - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker.pem -o /etc/kubernetes/ssl/worker.pem - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/worker-key.pem -o /etc/kubernetes/ssl/worker-key.pem - ExecStart=/usr/bin/curl {{.k8s_cert_endpoint}}/tls/ca.pem -o /etc/kubernetes/ssl/ca.pem - ExecStart=/usr/bin/touch /etc/kubernetes/ssl/ready - [Install] - WantedBy=multi-user.target - - name: kubelet.path - enable: true + ExecStart=/usr/bin/bash -c "[ -f {{.k8s_cert_endpoint}}/tls/%i ] || curl {{.k8s_cert_endpoint}}/tls/%i -o /etc/kubernetes/ssl/%i" + - name: k8s-assets.target contents: | [Unit] - Description=Watch for Kubelet TLS Assets - [Path] - PathExists=/etc/kubernetes/ssl/ready + Description=Load Kubernetes Assets + Requires=k8s-certs@worker.pem.service + After=k8s-certs@worker.pem.service + Requires=k8s-certs@worker-key.pem.service + After=k8s-certs@worker-key.pem.service + Requires=k8s-certs@ca.pem.service + After=k8s-certs@ca.pem.service - name: kubelet.service enable: true contents: | [Unit] Description=Kubelet via Hyperkube ACI - Requires=kubelet.path - After=kubelet.path + Requires=k8s-assets.target + After=k8s-assets.target [Service] ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests Environment=KUBELET_VERSION={{.k8s_version}}