Merge pull request #517 from dghubble/self-hosted-etcd

examples/terraform: Add experimental self-hosted etcd option
This commit is contained in:
Dalton Hubble
2017-05-10 09:55:33 -07:00
committed by GitHub
6 changed files with 37 additions and 0 deletions

View File

@@ -35,6 +35,8 @@ matchbox_rpc_endpoint = "matchbox.example.com:8081"
ssh_authorized_key = "ADD ME"
```
You may set `experimental_self_hosted_etcd = "true"` to deploy "self-hosted" etcd atop Kubernetes instead of running etcd on hosts directly. Warning, this is experimental and potentially dangerous.
Configs in `bootkube-install` configure the matchbox provider, define profiles (e.g. `cached-container-linux-install`, `bootkube-controller`, `bootkube-worker`), and define 3 groups which match machines by MAC address to a profile. These resources declare that each machine should PXE boot and install Container Linux to disk. `node1` will provision itself as a controller, while `node2` and `noe3` provision themselves as workers.
Fetch the [profiles](../README.md#modules) Terraform [module](https://www.terraform.io/docs/modules/index.html) which let's you use common machine profiles maintained in the matchbox repo (like `bootkube`).
@@ -54,6 +56,7 @@ Apply complete! Resources: 10 added, 0 changed, 0 destroyed.
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
## Machines
Power on each machine (with PXE boot device on next boot). Machines should network boot, install Container Linux to disk, reboot, and provision themselves as bootkube controllers or workers.
@@ -87,6 +90,12 @@ Use the `bootkube` tool to render Kubernetes manifests and credentials into an `
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=http://127.0.0.1:2379
```
If you set `experimental_self_hosted_etcd` to "true", use these flags instead:
```sh
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --experimental-self-hosted-etcd
```
Secure copy the kubeconfig to /etc/kubernetes/kubeconfig on every node which will path activate the `kubelet.service`.
```

View File

@@ -32,7 +32,9 @@ resource "matchbox_group" "node1" {
domain_name = "node1.example.com"
etcd_name = "node1"
etcd_initial_cluster = "node1=http://node1.example.com:2380"
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
k8s_dns_service_ip = "${var.k8s_dns_service_ip}"
k8s_etcd_service_ip = "${var.k8s_etcd_service_ip}"
ssh_authorized_key = "${var.ssh_authorized_key}"
}
}
@@ -49,7 +51,9 @@ resource "matchbox_group" "node2" {
metadata {
domain_name = "node2.example.com"
etcd_endpoints = "node1.example.com:2379"
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
k8s_dns_service_ip = "${var.k8s_dns_service_ip}"
k8s_etcd_service_ip = "${var.k8s_etcd_service_ip}"
ssh_authorized_key = "${var.ssh_authorized_key}"
}
}
@@ -64,7 +68,9 @@ resource "matchbox_group" "node3" {
metadata {
domain_name = "node3.example.com"
etcd_endpoints = "node1.example.com:2379"
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
k8s_dns_service_ip = "${var.k8s_dns_service_ip}"
k8s_etcd_service_ip = "${var.k8s_etcd_service_ip}"
ssh_authorized_key = "${var.ssh_authorized_key}"
}
}

View File

@@ -1,3 +1,4 @@
matchbox_http_endpoint = "http://matchbox.example.com:8080"
matchbox_rpc_endpoint = "matchbox.example.com:8081"
# ssh_authorized_key = "ADD ME"
experimental_self_hosted_etcd = "false"

View File

@@ -18,3 +18,14 @@ variable "k8s_dns_service_ip" {
default = "10.3.0.10"
description = "Cluster DNS servce IP address passed via the Kubelet --cluster-dns flag"
}
variable "k8s_etcd_service_ip" {
type = "string"
default = "10.3.0.15"
description = "Cluster etcd service IP address, used if self-hosted etcd is enabled"
}
variable "experimental_self_hosted_etcd" {
default = "false"
description = "Create self-hosted etcd cluster as pods on Kubernetes, instead of on-hosts"
}

View File

@@ -1,6 +1,7 @@
---
systemd:
units:
{{ if eq .etcd_on_host "true" }}
- name: etcd-member.service
enable: true
dropins:
@@ -15,6 +16,7 @@ systemd:
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
{{ end }}
- name: docker.service
enable: true
- name: locksmithd.service
@@ -23,6 +25,9 @@ systemd:
contents: |
[Service]
Environment="REBOOT_STRATEGY=etcd-lock"
{{ if eq .etcd_on_host "false" -}}
Environment="LOCKSMITHD_ENDPOINT=http://{{.k8s_etcd_service_ip}}:2379"
{{ end }}
- name: kubelet.path
enable: true
contents: |

View File

@@ -1,6 +1,7 @@
---
systemd:
units:
{{ if eq .etcd_on_host "true" }}
- name: etcd-member.service
enable: true
dropins:
@@ -12,6 +13,7 @@ systemd:
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
--listen-addr=127.0.0.1:2379 \
--endpoints={{.etcd_endpoints}}
{{ end }}
- name: docker.service
enable: true
- name: locksmithd.service
@@ -20,6 +22,9 @@ systemd:
contents: |
[Service]
Environment="REBOOT_STRATEGY=etcd-lock"
{{ if eq .etcd_on_host "false" -}}
Environment="LOCKSMITHD_ENDPOINT=http://{{.k8s_etcd_service_ip}}:2379"
{{ end }}
- name: kubelet.path
enable: true
contents: |