mirror of
https://github.com/poseidon/matchbox.git
synced 2026-03-03 03:14:51 +00:00
Compare commits
216 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e78150218f | ||
|
|
cccb588855 | ||
|
|
9a177e83d7 | ||
|
|
dfd0457e03 | ||
|
|
9de30aea59 | ||
|
|
910ee6f18c | ||
|
|
0994b860b5 | ||
|
|
78f7e8d492 | ||
|
|
e804ace9e2 | ||
|
|
0012d691f4 | ||
|
|
e170c600b3 | ||
|
|
4f229d5d9a | ||
|
|
3cd8ba0a05 | ||
|
|
74f13a2f86 | ||
|
|
4eee84b17d | ||
|
|
845d1d0adc | ||
|
|
5b1c790d0c | ||
|
|
70400b7dd0 | ||
|
|
c6ebdfeb92 | ||
|
|
99acdf4c6b | ||
|
|
be057ed9c8 | ||
|
|
8bb99143e8 | ||
|
|
c802ce5805 | ||
|
|
c4e82c03a4 | ||
|
|
29c93046ef | ||
|
|
34e981dc7c | ||
|
|
3a88a663c3 | ||
|
|
572c8d26eb | ||
|
|
c22b273548 | ||
|
|
c3ef870ce5 | ||
|
|
e9ce7325ab | ||
|
|
948bdee165 | ||
|
|
50e923730e | ||
|
|
1799c8e23e | ||
|
|
454ae972a1 | ||
|
|
fe0c3438fd | ||
|
|
65b410e20b | ||
|
|
dced573acb | ||
|
|
4888c04dee | ||
|
|
4e9d542a87 | ||
|
|
08f4e9908b | ||
|
|
dd96f58417 | ||
|
|
f5ef2d156b | ||
|
|
f673d48007 | ||
|
|
7a58d944d8 | ||
|
|
5d975ec42a | ||
|
|
2404d34b0e | ||
|
|
c9b9711bca | ||
|
|
ae524f57f2 | ||
|
|
f26224c57d | ||
|
|
2c063a4674 | ||
|
|
7d5656ffe3 | ||
|
|
a683e8261e | ||
|
|
c75fc8f88e | ||
|
|
b10c777729 | ||
|
|
5992ba6ad5 | ||
|
|
ca223f800b | ||
|
|
1246d5a0db | ||
|
|
4f7dd0942c | ||
|
|
3e6aa4ee73 | ||
|
|
9c39221b71 | ||
|
|
4103461778 | ||
|
|
9a6d815978 | ||
|
|
6aa8759bfd | ||
|
|
d5027950e2 | ||
|
|
85a2a6b252 | ||
|
|
4bc5fcdc5e | ||
|
|
2f4d5b95e4 | ||
|
|
257f2fa553 | ||
|
|
7829c14d52 | ||
|
|
ce72fb72a0 | ||
|
|
41d5db4723 | ||
|
|
dfd08e48e5 | ||
|
|
347e142db9 | ||
|
|
b63e9b2589 | ||
|
|
4a32b0cd59 | ||
|
|
b0b8d97539 | ||
|
|
581be69da7 | ||
|
|
dc75fcc869 | ||
|
|
fc3e688c97 | ||
|
|
f07dc758c4 | ||
|
|
d2827d7ed0 | ||
|
|
692bf81df8 | ||
|
|
cfcec6ac03 | ||
|
|
592969134c | ||
|
|
2b605c8d9c | ||
|
|
63a95188be | ||
|
|
5aa301b72d | ||
|
|
7647a5d095 | ||
|
|
06f80fa003 | ||
|
|
01a767ab3e | ||
|
|
6be5c0f59c | ||
|
|
5efc514097 | ||
|
|
757f46e96f | ||
|
|
5aeb2d1d3d | ||
|
|
1119bb22f0 | ||
|
|
6195ae377e | ||
|
|
d7783a94e9 | ||
|
|
4228ccb330 | ||
|
|
e5d5280658 | ||
|
|
46f0477614 | ||
|
|
0e4265b2bc | ||
|
|
18de74e85b | ||
|
|
31040e9729 | ||
|
|
f0a4cfd1cb | ||
|
|
aeca5b08f9 | ||
|
|
7c1b9b17dc | ||
|
|
0e6ce19172 | ||
|
|
281fd5226a | ||
|
|
fb0ee0f05a | ||
|
|
7def0d7e86 | ||
|
|
1c076875c2 | ||
|
|
7ba0f1476b | ||
|
|
ec6844a43a | ||
|
|
6857c1319a | ||
|
|
cb6bb3c90d | ||
|
|
5c5be5ce5b | ||
|
|
4cbf2b7448 | ||
|
|
d781e43212 | ||
|
|
3ca88334d2 | ||
|
|
c7a649c731 | ||
|
|
d03f256976 | ||
|
|
9ecfcac0b9 | ||
|
|
035b01634f | ||
|
|
e8d3e8c70c | ||
|
|
cc490ff55d | ||
|
|
df6354ad45 | ||
|
|
3d8a3777f0 | ||
|
|
dfee550522 | ||
|
|
07e9676457 | ||
|
|
a69f6dd2d8 | ||
|
|
26d8b7d480 | ||
|
|
2c02549cd6 | ||
|
|
3c999d27e9 | ||
|
|
52b317dff9 | ||
|
|
97985b213b | ||
|
|
1ba353e5b6 | ||
|
|
398d12e148 | ||
|
|
be8fd3d488 | ||
|
|
27d1139a07 | ||
|
|
ee3445454e | ||
|
|
170f8c09ec | ||
|
|
e10525ded0 | ||
|
|
4c47adf390 | ||
|
|
ce3154cae9 | ||
|
|
5e54960a92 | ||
|
|
e008b8ea5e | ||
|
|
b636fc7a3d | ||
|
|
30cf06853d | ||
|
|
61377d2955 | ||
|
|
a7ba7714f5 | ||
|
|
ff916686e7 | ||
|
|
fbc4b39c59 | ||
|
|
be46b389bf | ||
|
|
a14e6c8bb9 | ||
|
|
c03b7a9627 | ||
|
|
ac40eeedb5 | ||
|
|
9e23f3a86d | ||
|
|
d1baa3fb65 | ||
|
|
c915fc2b52 | ||
|
|
6f02107448 | ||
|
|
ff06990edb | ||
|
|
9bc6edc65b | ||
|
|
5b8006ae35 | ||
|
|
ff5cd0468e | ||
|
|
4d9bd82c12 | ||
|
|
882793f230 | ||
|
|
858e1bda73 | ||
|
|
cfbb9cebd0 | ||
|
|
edbe5bab20 | ||
|
|
299701e7ea | ||
|
|
a20720a0d4 | ||
|
|
5a9c24ceb3 | ||
|
|
82af3f747d | ||
|
|
e955fecd30 | ||
|
|
0c1e20db27 | ||
|
|
8d6d0397ff | ||
|
|
abc7eb8dfb | ||
|
|
149f441ad8 | ||
|
|
cf43908a72 | ||
|
|
523b15ed13 | ||
|
|
aac270e937 | ||
|
|
1cfdce2970 | ||
|
|
9d3d08a26f | ||
|
|
b176de805e | ||
|
|
009b44b25d | ||
|
|
57e473b6f5 | ||
|
|
66cd8da417 | ||
|
|
50a3d11414 | ||
|
|
6fa13007c8 | ||
|
|
500a7b25e1 | ||
|
|
951e5ec4a3 | ||
|
|
f92743fa57 | ||
|
|
d84bb8e398 | ||
|
|
d54562f429 | ||
|
|
395494c1d9 | ||
|
|
ddbe17cd31 | ||
|
|
b1a866370a | ||
|
|
b8326e6db6 | ||
|
|
7864e64fd2 | ||
|
|
89bb5125b5 | ||
|
|
cff053328d | ||
|
|
698b6f6118 | ||
|
|
23f23c1dcb | ||
|
|
51cf859587 | ||
|
|
8061f57346 | ||
|
|
8000c323b6 | ||
|
|
314a317271 | ||
|
|
d437167ebf | ||
|
|
4067702641 | ||
|
|
86c07da76e | ||
|
|
be00fdbca0 | ||
|
|
abbf7faf56 | ||
|
|
76cc8cb13c | ||
|
|
ed6dde528a | ||
|
|
1e095661ad |
11
.travis.yml
11
.travis.yml
@@ -3,22 +3,23 @@ sudo: required
|
||||
services:
|
||||
- docker
|
||||
go:
|
||||
- 1.7.4
|
||||
- 1.8
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.11.1
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
install:
|
||||
- go get github.com/golang/lint/golint
|
||||
- go get golang.org/x/lint/golint
|
||||
script:
|
||||
- make test
|
||||
deploy:
|
||||
provider: script
|
||||
script: scripts/travis-docker-push
|
||||
script: scripts/dev/travis-docker-push
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: master
|
||||
go: '1.8'
|
||||
go: '1.11.1'
|
||||
notifications:
|
||||
email: change
|
||||
|
||||
39
CHANGES.md
39
CHANGES.md
@@ -1,9 +1,46 @@
|
||||
# matchbox
|
||||
# Matchbox
|
||||
|
||||
Notable changes between releases.
|
||||
|
||||
## Latest
|
||||
|
||||
## v0.7.1 (2018-11-01)
|
||||
|
||||
* Add `kernel_args` variable to the terraform bootkube-install cluster definition
|
||||
* Add `get-flatcar` helper script
|
||||
* Add optional TLS support to read-only HTTP API
|
||||
* Build Matchbox with Go 1.11.1 for images and binaries
|
||||
|
||||
### Examples
|
||||
|
||||
* Upgrade Kubernetes example clusters to v1.10.0 (Terraform-based)
|
||||
* Upgrade Kubernetes example clusters to v1.8.5
|
||||
|
||||
## v0.7.0 (2017-12-12)
|
||||
|
||||
* Add gRPC API endpoints for managing generic (experimental) templates
|
||||
* Update Container Linux config transpiler to v0.5.0
|
||||
* Update Ignition to v0.19.0, render v2.1.0 Ignition configs
|
||||
* Drop support for Container Linux versions below 1465.0.0 (breaking)
|
||||
* Build Matchbox with Go 1.8.5 for images and binaries
|
||||
* Remove Profile `Cmdline` map (deprecated in v0.5.0), use `Args` slice instead
|
||||
* Remove pixiecore support (deprecated in v0.5.0)
|
||||
* Remove `ContextHandler`, `ContextHandlerFunc`, and `NewHandler` from the `matchbox/http` package.
|
||||
|
||||
### Examples / Modules
|
||||
|
||||
* Upgrade Kubernetes example clusters to v1.8.4
|
||||
* Kubernetes examples clusters enable etcd TLS
|
||||
* Deploy the Container Linux Update Operator (CLUO) to coordinate reboots of Container Linux nodes in Kubernetes clusters. See the cluster [addon docs](Documentation/cluster-addons.md).
|
||||
* Kubernetes examples (terraform and non-terraform) mask locksmithd
|
||||
* Terraform modules `bootkube` and `profiles` (Kubernetes) mask locksmithd
|
||||
|
||||
## v0.6.1 (2017-05-25)
|
||||
|
||||
* Improve the installation documentation
|
||||
* Move examples/etc/matchbox/cert-gen to scripts/tls
|
||||
* Build Matchbox with Go 1.8.3 for images and binaries
|
||||
|
||||
### Examples
|
||||
|
||||
* Upgrade self-hosted Kubernetes cluster examples to v1.6.4
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.5
|
||||
FROM alpine:3.6
|
||||
MAINTAINER Dalton Hubble <dalton.hubble@coreos.com>
|
||||
COPY bin/matchbox /matchbox
|
||||
EXPOSE 8080
|
||||
|
||||
@@ -39,8 +39,8 @@ GET http://matchbox.foo/ipxe?label=value
|
||||
|
||||
```
|
||||
#!ipxe
|
||||
kernel /assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp} coreos.first_boot=1 coreos.autologin
|
||||
initrd /assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz
|
||||
kernel /assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp} coreos.first_boot=1 coreos.autologin
|
||||
initrd /assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz
|
||||
boot
|
||||
```
|
||||
|
||||
@@ -67,15 +67,15 @@ default=0
|
||||
timeout=1
|
||||
menuentry "CoreOS" {
|
||||
echo "Loading kernel"
|
||||
linuxefi "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz" "coreos.autologin" "coreos.config.url=http://matchbox.foo:8080/ignition" "coreos.first_boot"
|
||||
linuxefi "(http;matchbox.foo:8080)/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz" "coreos.autologin" "coreos.config.url=http://matchbox.foo:8080/ignition" "coreos.first_boot"
|
||||
echo "Loading initrd"
|
||||
initrdefi "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"
|
||||
initrdefi "(http;matchbox.foo:8080)/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud config
|
||||
|
||||
Finds the profile matching the machine and renders the corresponding Cloud-Config with group metadata, selectors, and query params.
|
||||
DEPRECATED: Finds the profile matching the machine and renders the corresponding Cloud-Config with group metadata, selectors, and query params.
|
||||
|
||||
```
|
||||
GET http://matchbox.foo/cloud?label=value
|
||||
@@ -101,7 +101,7 @@ coreos:
|
||||
command: start
|
||||
```
|
||||
|
||||
## Ignition Config
|
||||
## Container Linux Config / Ignition Config
|
||||
|
||||
Finds the profile matching the machine and renders the corresponding Ignition Config with group metadata, selectors, and query params.
|
||||
|
||||
@@ -231,7 +231,7 @@ If you need to serve static assets (e.g. kernel, initrd), `matchbox` can serve a
|
||||
```
|
||||
matchbox.foo/assets/
|
||||
└── coreos
|
||||
└── 1298.7.0
|
||||
└── 1576.5.0
|
||||
├── coreos_production_pxe.vmlinuz
|
||||
└── coreos_production_pxe_image.cpio.gz
|
||||
└── 1153.0.0
|
||||
|
||||
@@ -1,9 +1,16 @@
|
||||
|
||||
# Upgrading self-hosted Kubernetes
|
||||
|
||||
[Self-hosted](bootkube.md) Kubernetes clusters schedule Kubernetes components such as the apiserver, kubelet, scheduler, and controller-manager as pods like other applications (except with node selectors). This allows Kubernetes level operations to be performed to upgrade clusters in place, rather than by re-provisioning.
|
||||
CoreOS Kubernetes clusters "self-host" the apiserver, scheduler, controller-manager, flannel, kube-dns, and kube-proxy as Kubernetes pods, like ordinary applications (except with taint tolerations). This allows upgrades to be performed in-place using (mostly) `kubectl` as an alternative to re-provisioning.
|
||||
|
||||
Let's upgrade a self-hosted Kubernetes v1.4.1 cluster to v1.4.3 as an example.
|
||||
Let's upgrade a Kubernetes v1.6.6 cluster to v1.6.7 as an example.
|
||||
|
||||
## Stability
|
||||
|
||||
This guide shows how to attempt a in-place upgrade of a Kubernetes cluster setup via the [examples](../examples). It does not provide exact diffs, migrations between breaking changes, the stability of a fresh re-provision, or any guarantees. Evaluate whether in-place updates are appropriate for your Kubernetes cluster and be prepared to perform a fresh re-provision if something goes wrong, especially between Kubernetes minor releases (e.g. 1.6 to 1.7).
|
||||
|
||||
Matchbox Kubernetes examples provide a vanilla Kubernetes cluster with only free (as in freedom and cost) software components. If you require currated updates, migrations, or guarantees for production, consider [Tectonic](https://coreos.com/tectonic/) by CoreOS.
|
||||
|
||||
**Note: Tectonic users should NOT manually upgrade. Follow the [Tectonic docs](https://coreos.com/tectonic/docs/latest/admin/upgrade.html)**
|
||||
|
||||
## Inspect
|
||||
|
||||
@@ -11,193 +18,130 @@ Show the control plane daemonsets and deployments which will need to be updated.
|
||||
|
||||
```sh
|
||||
$ kubectl get daemonsets -n=kube-system
|
||||
NAME DESIRED CURRENT NODE-SELECTOR AGE
|
||||
kube-apiserver 1 1 master=true 5m
|
||||
kube-proxy 3 3 <none> 5m
|
||||
kubelet 3 3 <none> 5m
|
||||
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE-SELECTOR AGE
|
||||
kube-apiserver 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
kube-etcd-network-checkpointer 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
kube-flannel 4 4 4 4 4 <none> 21d
|
||||
kube-proxy 4 4 4 4 4 <none> 21d
|
||||
pod-checkpointer 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
|
||||
$ kubectl get deployments -n=kube-system
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
kube-controller-manager 1 1 1 1 5m
|
||||
kube-dns-v20 1 1 1 1 5m
|
||||
kube-scheduler 1 1 1 1 5m
|
||||
kube-controller-manager 2 2 2 2 21d
|
||||
kube-dns 1 1 1 1 21d
|
||||
kube-scheduler 2 2 2 2 21d
|
||||
```
|
||||
|
||||
Check the current Kubernetes version.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.0", GitCommit:"a16c0a7f71a6f93c7e0f222d961f4675cd97a46b", GitTreeState:"clean", BuildDate:"2016-09-26T18:16:57Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.1+coreos.0", GitCommit:"b7a02f46b972c5211e5c04fdb1d5b86ac16c00eb", GitTreeState:"clean", BuildDate:"2016-10-11T20:13:55Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.6+coreos.1", GitCommit:"42a5c8b99c994a51d9ceaed5d0254f177e97d419", GitTreeState:"clean", BuildDate:"2017-06-21T01:10:07Z", GoVersion:"go1.7.6", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
In this case, Kubernetes is `v1.4.1+coreos.0` and our goal is to upgrade to `v1.4.3+coreos.0`. First, update the control plane pods. Then the kubelets and proxies on all nodes.
|
||||
```sh
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 21d v1.6.6+coreos.1
|
||||
node2.example.com Ready 21d v1.6.6+coreos.1
|
||||
node3.example.com Ready 21d v1.6.6+coreos.1
|
||||
node4.example.com Ready 21d v1.6.6+coreos.1
|
||||
```
|
||||
|
||||
**Tip**: Follow along with a QEMU/KVM self-hosted Kubernetes cluster the first time, before upgrading your production bare-metal clusters ([tutorial](bootkube.md)).
|
||||
## Strategy
|
||||
|
||||
Update control plane components with `kubectl`. Then update the `kubelet` systemd unit on each host.
|
||||
|
||||
Prepare the changes to the Kubernetes manifests by generating assets for a target Kubernetes cluster (e.g. bootkube `v0.5.0` produces Kubernetes 1.6.6 and bootkube `v0.5.1` produces Kubernetes 1.6.7). Choose the tool used during creation of the cluster:
|
||||
|
||||
* [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) - install the `bootkube` binary for the target version and render assets
|
||||
* [poseidon/bootkube-terraform](https://github.com/poseidon/bootkube-terraform) - checkout the tag for the target version and `terraform apply` to render assets
|
||||
|
||||
Diff the generated assets against the assets used when originally creating the cluster. In simple cases, you may only need to bump the hyperkube image. In more complex cases, some manifests may have new flags or configuration.
|
||||
|
||||
## Control Plane
|
||||
|
||||
### kube-apiserver
|
||||
|
||||
Edit the kube-apiserver daemonset. Change the container image name to `quay.io/coreos/hyperkube:v1.4.3_coreos.0`.
|
||||
Edit the `kube-apiserver` daemonset to rolling update the apiserver.
|
||||
|
||||
```sh
|
||||
$ kubectl edit daemonset kube-apiserver -n=kube-system
|
||||
```
|
||||
|
||||
Since daemonsets don't yet support rolling, manually delete each apiserver one by one and wait for each to be re-scheduled.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods -n=kube-system
|
||||
# WARNING: Self-hosted Kubernetes is still new and this may fail
|
||||
$ kubectl delete pod kube-apiserver-s62kb -n=kube-system
|
||||
```
|
||||
|
||||
If you only have one, your cluster will be temporarily unavailable. Remember the Hyperkube image is quite large and this can take a minute.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods -n=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-api-checkpoint-node1.example.com 1/1 Running 0 12m
|
||||
kube-apiserver-vyg3t 2/2 Running 0 2m
|
||||
kube-controller-manager-1510822774-qebia 1/1 Running 2 12m
|
||||
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 12m
|
||||
kube-proxy-8jthl 1/1 Running 0 12m
|
||||
kube-proxy-bnvgy 1/1 Running 0 12m
|
||||
kube-proxy-gkyx8 1/1 Running 0 12m
|
||||
kube-scheduler-2099299605-67ezp 1/1 Running 2 12m
|
||||
kubelet-exe5k 1/1 Running 0 12m
|
||||
kubelet-p3g98 1/1 Running 0 12m
|
||||
kubelet-quhhg 1/1 Running 0 12m
|
||||
```
|
||||
If you only have one apiserver, the cluster may be momentarily unavailable.
|
||||
|
||||
### kube-scheduler
|
||||
|
||||
Edit the scheduler deployment to rolling update the scheduler. Change the container image name for the hyperkube.
|
||||
Edit the `kube-scheduler` deployment to rolling update the scheduler.
|
||||
|
||||
```sh
|
||||
$ kubectl edit deployments kube-scheduler -n=kube-system
|
||||
```
|
||||
|
||||
Wait for the schduler to be deployed.
|
||||
|
||||
### kube-controller-manager
|
||||
|
||||
Edit the controller-manager deployment to rolling update the controller manager. Change the container image name for the hyperkube.
|
||||
Edit the `kube-controller-manager` deployment to rolling update the controller manager.
|
||||
|
||||
```sh
|
||||
$ kubectl edit deployments kube-controller-manager -n=kube-system
|
||||
```
|
||||
|
||||
Wait for the controller manager to be deployed.
|
||||
### kube-proxy
|
||||
|
||||
Edit the `kube-proxy` daemonset to rolling update the proxy.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods -n=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-api-checkpoint-node1.example.com 1/1 Running 0 28m
|
||||
kube-apiserver-vyg3t 2/2 Running 0 18m
|
||||
kube-controller-manager-1709527928-zj8c4 1/1 Running 0 4m
|
||||
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 28m
|
||||
kube-proxy-8jthl 1/1 Running 0 28m
|
||||
kube-proxy-bnvgy 1/1 Running 0 28m
|
||||
kube-proxy-gkyx8 1/1 Running 0 28m
|
||||
kube-scheduler-2255275287-hti6w 1/1 Running 0 6m
|
||||
kubelet-exe5k 1/1 Running 0 28m
|
||||
kubelet-p3g98 1/1 Running 0 28m
|
||||
kubelet-quhhg 1/1 Running 0 28m
|
||||
$ kubectl edit daemonset kube-proxy -n=kube-system
|
||||
```
|
||||
|
||||
### Others
|
||||
|
||||
If there are changes between the prior version and target version manifests, update the `kube-dns` deployment, `kube-flannel` daemonset, or `pod-checkpointer` daemonset.
|
||||
|
||||
### Verify
|
||||
|
||||
Verify the control plane components updated.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.7+coreos.0", GitCommit:"c8c505ee26ac3ab4d1dff506c46bc5538bc66733", GitTreeState:"clean", BuildDate:"2017-07-06T17:38:33Z", GoVersion:"go1.7.6", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 21d v1.6.7+coreos.0
|
||||
node2.example.com Ready 21d v1.6.7+coreos.0
|
||||
node3.example.com Ready 21d v1.6.7+coreos.0
|
||||
node4.example.com Ready 21d v1.6.7+coreos.0
|
||||
```
|
||||
|
||||
## kubelet
|
||||
|
||||
SSH to each node and update `/etc/kubernetes/kubelet.env`. Restart the `kubelet.service`.
|
||||
|
||||
```sh
|
||||
ssh core@node1.example.com
|
||||
sudo vim /etc/kubernetes/kubelet.env
|
||||
sudo systemctl restart kubelet
|
||||
```
|
||||
|
||||
### Verify
|
||||
|
||||
At this point, the control plane components have been upgraded to v1.4.3.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.0", GitCommit:"a16c0a7f71a6f93c7e0f222d961f4675cd97a46b", GitTreeState:"clean", BuildDate:"2016-09-26T18:16:57Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.3+coreos.0", GitCommit:"7819c84f25e8c661321ee80d6b9fa5f4ff06676f", GitTreeState:"clean", BuildDate:"2016-10-17T21:19:17Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
Finally, upgrade the kubelets and kube-proxies.
|
||||
|
||||
## kubelet and kube-proxy
|
||||
|
||||
Show the current kubelet and kube-proxy version on each node.
|
||||
Verify the kubelet and kube-proxy of each node updated.
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes -o yaml | grep 'kubeletVersion\|kubeProxyVersion'
|
||||
kubeProxyVersion: v1.4.1+coreos.0
|
||||
kubeletVersion: v1.4.1+coreos.0
|
||||
kubeProxyVersion: v1.4.1+coreos.0
|
||||
kubeletVersion: v1.4.1+coreos.0
|
||||
kubeProxyVersion: v1.4.1+coreos.0
|
||||
kubeletVersion: v1.4.1+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
```
|
||||
|
||||
Edit the kubelet and kube-proxy daemonsets. Change the container image name for the hyperkube.
|
||||
|
||||
```sh
|
||||
$ kubectl edit daemonset kubelet -n=kube-system
|
||||
$ kubectl edit daemonset kube-proxy -n=kube-system
|
||||
```
|
||||
|
||||
Since daemonsets don't yet support rolling, manually delete each kubelet and each kube-proxy. The daemonset controller will create new (upgraded) replics.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods -n=kube-system
|
||||
$ kubectl delete pod kubelet-quhhg
|
||||
...repeat
|
||||
$ kubectl delete pod kube-proxy-8jthl -n=kube-system
|
||||
...repeat
|
||||
|
||||
$ kubectl get pods -n=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-api-checkpoint-node1.example.com 1/1 Running 0 1h
|
||||
kube-apiserver-vyg3t 2/2 Running 0 1h
|
||||
kube-controller-manager-1709527928-zj8c4 1/1 Running 0 47m
|
||||
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 1h
|
||||
kube-proxy-6dbne 1/1 Running 0 1s
|
||||
kube-proxy-sm4jv 1/1 Running 0 8s
|
||||
kube-proxy-xmuao 1/1 Running 0 14s
|
||||
kube-scheduler-2255275287-hti6w 1/1 Running 0 49m
|
||||
kubelet-hfdwr 1/1 Running 0 38s
|
||||
kubelet-oia47 1/1 Running 0 52s
|
||||
kubelet-s6dab 1/1 Running 0 59s
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
Verify that the kubelet and kube-proxy on each node have been upgraded.
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes -o yaml | grep 'kubeletVersion\|kubeProxyVersion'
|
||||
kubeProxyVersion: v1.4.3+coreos.0
|
||||
kubeletVersion: v1.4.3+coreos.0
|
||||
kubeProxyVersion: v1.4.3+coreos.0
|
||||
kubeletVersion: v1.4.3+coreos.0
|
||||
kubeProxyVersion: v1.4.3+coreos.0
|
||||
kubeletVersion: v1.4.3+coreos.0
|
||||
```
|
||||
|
||||
Now, Kubernetes components have been upgraded to a new version of Kubernetes!
|
||||
|
||||
## Going further
|
||||
|
||||
Bare-metal or virtualized self-hosted Kubernetes clusters can be upgraded in place in 5-10 minutes. Here is a bare-metal example:
|
||||
|
||||
```sh
|
||||
$ kubectl -n=kube-system get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-api-checkpoint-ibm0.lab.dghubble.io 1/1 Running 0 2d
|
||||
kube-apiserver-j6atn 2/2 Running 0 5m
|
||||
kube-controller-manager-1709527928-y05n5 1/1 Running 0 1m
|
||||
kube-dns-v20-3531996453-zwbl8 3/3 Running 0 2d
|
||||
kube-proxy-e49p5 1/1 Running 0 14s
|
||||
kube-proxy-eu5dc 1/1 Running 0 8s
|
||||
kube-proxy-gjrzq 1/1 Running 0 3s
|
||||
kube-scheduler-2255275287-96n56 1/1 Running 0 2m
|
||||
kubelet-9ob0e 1/1 Running 0 19s
|
||||
kubelet-bvwp0 1/1 Running 0 14s
|
||||
kubelet-xlrql 1/1 Running 0 24s
|
||||
```
|
||||
|
||||
Check upstream for updates to addons like `kube-dns` or `kube-dashboard` and update them like any other applications. Some kube-system components use version labels and you may wish to clean those up as well.
|
||||
Kubernetes control plane components have been successfully updated!
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes
|
||||
# Kubernetes
|
||||
|
||||
The self-hosted Kubernetes example provisions a 3 node "self-hosted" Kubernetes v1.6.4 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting. An etcd3 cluster across controllers is used to back Kubernetes and coordinate Container Linux auto-updates (enabled for disk installs).
|
||||
The Kubernetes example provisions a 3 node Kubernetes v1.8.5 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting. An etcd3 cluster across controllers is used to back Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -9,13 +9,13 @@ Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md)
|
||||
* Use rkt or Docker to start `matchbox`
|
||||
* Create a network boot environment with `coreos/dnsmasq`
|
||||
* Create the example libvirt client VMs
|
||||
* `/etc/hosts` entries for `node[1-3].example.com` (or pass custom names to `k8s-certgen`)
|
||||
* `/etc/hosts` entries for `node[1-3].example.com`
|
||||
|
||||
Install [bootkube](https://github.com/kubernetes-incubator/bootkube/releases) v0.4.4 and add it somewhere on your PATH.
|
||||
Install [bootkube](https://github.com/kubernetes-incubator/bootkube/releases) v0.9.1 and add it on your $PATH.
|
||||
|
||||
```sh
|
||||
$ bootkube version
|
||||
Version: v0.4.4
|
||||
Version: v0.9.1
|
||||
```
|
||||
|
||||
## Examples
|
||||
@@ -27,10 +27,10 @@ The [examples](../examples) statically assign IP addresses to libvirt client VMs
|
||||
|
||||
## Assets
|
||||
|
||||
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
|
||||
Download the CoreOS Container Linux image assets referenced in the target [profile](../examples/profiles).
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
|
||||
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
|
||||
```
|
||||
|
||||
Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
@@ -44,39 +44,50 @@ Add your SSH public key to each machine group definition [as shown](../examples/
|
||||
}
|
||||
```
|
||||
|
||||
Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Later, `bootkube` will schedule these manifests during bootstrapping and the credentials will be used to access your cluster.
|
||||
Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Set the `--network-provider` to `flannel` (default) or `experimental-calico` if desired.
|
||||
|
||||
```sh
|
||||
$ bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=http://127.0.0.1:2379
|
||||
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=https://node1.example.com:2379
|
||||
```
|
||||
|
||||
Later, a controller will use `bootkube` to bootstrap these manifests and the credentials will be used to access your cluster.
|
||||
|
||||
## Containers
|
||||
|
||||
Use rkt or docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with rkt](getting-started-rkt.md) or [matchbox with Docker](getting-started-docker.md) for help.
|
||||
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot CoreOS and become available via SSH in about 1 minute. If you chose `bootkube-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot Container Linux and become available via SSH in about 1 minute. If you chose `bootkube-install`, notice that machines install Container Linux and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
|
||||
## bootkube
|
||||
|
||||
We're ready to use bootkube to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster.
|
||||
|
||||
Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every** node which will path activate the `kubelet.service`.
|
||||
Secure copy the etcd TLS assets to `/etc/ssl/etcd/*` on **every controller** node.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
for node in 'node1'; do
|
||||
scp -r assets/tls/etcd-* assets/tls/etcd core@$node.example.com:/home/core/
|
||||
ssh core@$node.example.com 'sudo mkdir -p /etc/ssl/etcd && sudo mv etcd-* etcd /etc/ssl/etcd/ && sudo chown -R etcd:etcd /etc/ssl/etcd && sudo chmod -R 500 /etc/ssl/etcd/'
|
||||
done
|
||||
```
|
||||
|
||||
Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every node** to path activate the `kubelet.service`.
|
||||
|
||||
```sh
|
||||
for node in 'node1' 'node2' 'node3'; do
|
||||
scp assets/auth/kubeconfig core@$node.example.com:/home/core/kubeconfig
|
||||
ssh core@$node.example.com 'sudo mv kubeconfig /etc/kubernetes/kubeconfig'
|
||||
done
|
||||
```
|
||||
|
||||
Secure copy the `bootkube` generated assets to any controller node and run `bootkube-start`.
|
||||
Secure copy the `bootkube` generated assets to **any controller** node and run `bootkube-start` (takes ~10 minutes).
|
||||
|
||||
```sh
|
||||
$ scp -r assets core@node1.example.com:/home/core
|
||||
$ ssh core@node1.example.com 'sudo mv assets /opt/bootkube/assets && sudo systemctl start bootkube'
|
||||
scp -r assets core@node1.example.com:/home/core
|
||||
ssh core@node1.example.com 'sudo mv assets /opt/bootkube/assets && sudo systemctl start bootkube'
|
||||
```
|
||||
|
||||
Optionally watch the Kubernetes control plane bootstrapping with the bootkube temporary api-server. You will see quite a bit of output.
|
||||
Watch the Kubernetes control plane bootstrapping with the bootkube temporary api-server. You will see quite a bit of output.
|
||||
|
||||
```sh
|
||||
$ ssh core@node1.example.com 'journalctl -f -u bootkube'
|
||||
@@ -87,39 +98,41 @@ $ ssh core@node1.example.com 'journalctl -f -u bootkube'
|
||||
[ 299.311743] bootkube[5]: All self-hosted control plane components successfully started
|
||||
```
|
||||
|
||||
You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` used to access the cluster.
|
||||
[Verify](#verify) the Kubernetes cluster is accessible once complete. Then install **important** cluster [addons](cluster-addons.md). You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` used to access the cluster.
|
||||
|
||||
## Verify
|
||||
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the kubelet, apiserver, scheduler, and controller-manager are running as pods.
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the apiserver, scheduler, and controller-manager are running as pods.
|
||||
|
||||
```sh
|
||||
$ KUBECONFIG=assets/auth/kubeconfig
|
||||
$ export KUBECONFIG=assets/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
node1.example.com Ready 3m
|
||||
node2.example.com Ready 3m
|
||||
node3.example.com Ready 3m
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 11m v1.8.5
|
||||
node2.example.com Ready 11m v1.8.5
|
||||
node3.example.com Ready 11m v1.8.5
|
||||
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system checkpoint-installer-p8g8r 1/1 Running 1 13m
|
||||
kube-system kube-apiserver-s5gnx 1/1 Running 1 41s
|
||||
kube-system kube-controller-manager-3438979800-jrlnd 1/1 Running 1 13m
|
||||
kube-system kube-controller-manager-3438979800-tkjx7 1/1 Running 1 13m
|
||||
kube-system kube-dns-4101612645-xt55f 4/4 Running 4 13m
|
||||
kube-system kube-flannel-pl5c2 2/2 Running 0 13m
|
||||
kube-system kube-flannel-r9t5r 2/2 Running 3 13m
|
||||
kube-system kube-flannel-vfb0s 2/2 Running 4 13m
|
||||
kube-system kube-proxy-cvhmj 1/1 Running 0 13m
|
||||
kube-system kube-proxy-hf9mh 1/1 Running 1 13m
|
||||
kube-system kube-proxy-kpl73 1/1 Running 1 13m
|
||||
kube-system kube-scheduler-694795526-1l23b 1/1 Running 1 13m
|
||||
kube-system kube-scheduler-694795526-fks0b 1/1 Running 1 13m
|
||||
kube-system pod-checkpointer-node1.example.com 1/1 Running 2 10m
|
||||
kube-system kube-apiserver-zd1k3 1/1 Running 0 7m
|
||||
kube-system kube-controller-manager-762207937-2ztxb 1/1 Running 0 7m
|
||||
kube-system kube-controller-manager-762207937-vf6bk 1/1 Running 1 7m
|
||||
kube-system kube-dns-2431531914-qc752 3/3 Running 0 7m
|
||||
kube-system kube-flannel-180mz 2/2 Running 1 7m
|
||||
kube-system kube-flannel-jjr0x 2/2 Running 0 7m
|
||||
kube-system kube-flannel-mlr9w 2/2 Running 0 7m
|
||||
kube-system kube-proxy-0jlq7 1/1 Running 0 7m
|
||||
kube-system kube-proxy-k4mjl 1/1 Running 0 7m
|
||||
kube-system kube-proxy-l4xrd 1/1 Running 0 7m
|
||||
kube-system kube-scheduler-1873228005-5d2mk 1/1 Running 0 7m
|
||||
kube-system kube-scheduler-1873228005-s4w27 1/1 Running 0 7m
|
||||
kube-system pod-checkpointer-hb960 1/1 Running 0 7m
|
||||
kube-system pod-checkpointer-hb960-node1.example.com 1/1 Running 0 6m
|
||||
```
|
||||
|
||||
Try deleting pods to see that the cluster is resilient to failures and machine restarts (CoreOS auto-updates).
|
||||
## Addons
|
||||
|
||||
Install **important** cluster [addons](cluster-addons.md).
|
||||
|
||||
## Going further
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
# Cloud config
|
||||
|
||||
**Note:** We recommend migrating to [Container Linux Configs](container-linux-config.md) for hardware provisioning.
|
||||
**Note:** Please migrate to [Container Linux Configs](container-linux-config.md). Cloud-Config support will be removed in the future.
|
||||
|
||||
CoreOS Cloud-Config is a system for configuring machines with a Cloud-Config file or executable script from user-data. Cloud-Config runs in userspace on each boot and implements a subset of the [cloud-init spec](http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data). See the cloud-config [docs](https://coreos.com/os/docs/latest/cloud-config.html) for details.
|
||||
|
||||
|
||||
30
Documentation/cluster-addons.md
Normal file
30
Documentation/cluster-addons.md
Normal file
@@ -0,0 +1,30 @@
|
||||
## Cluster Addons
|
||||
|
||||
Kubernetes clusters run cluster addons atop Kubernetes itself. Addons may be considered essential for bootstrapping (non-optional), important (highly recommended), or optional.
|
||||
|
||||
## Essential
|
||||
|
||||
Several addons are considered essential. CoreOS cluster creation tools ensure these addons are included. Kubernetes clusters deployed via the Matchbox examples or using our Terraform Modules include these addons as well.
|
||||
|
||||
### kube-proxy
|
||||
|
||||
`kube-proxy` is deployed as a DaemonSet.
|
||||
|
||||
### kube-dns
|
||||
|
||||
`kube-dns` is deployed as a Deployment.
|
||||
|
||||
## Important
|
||||
|
||||
### Container Linux Update Operator
|
||||
|
||||
The [Container Linux Update Operator](https://github.com/coreos/container-linux-update-operator) (i.e. CLUO) coordinates reboots of auto-updating Container Linux nodes so that one node reboots at a time and nodes are drained before reboot. CLUO enables the auto-update behavior Container Linux clusters are known for, but does it in a Kubernetes native way. Deploying CLUO is strongly recommended.
|
||||
|
||||
Create the `update-operator` deployment and `update-agent` DaemonSet.
|
||||
|
||||
```
|
||||
kubectl apply -f examples/addons/cluo/update-operator.yaml
|
||||
kubectl apply -f examples/addons/cluo/update-agent.yaml
|
||||
```
|
||||
|
||||
*Note, CLUO replaces `locksmithd` reboot coordination. The `update_engine` systemd unit on hosts still performs the Container Linux update check, download, and install to the inactive partition.*
|
||||
@@ -1,6 +1,6 @@
|
||||
# Container Linux Configs
|
||||
|
||||
A Container Linux Config is a YAML document which declares how Container Linux instances' disks should be provisioned on network boot and first-boot from disk. Configs can declare disk paritions, write files (regular files, systemd units, networkd units, etc.), and configure users. See the Container Linux Config [spec](https://coreos.com/os/docs/latest/configuration.html).
|
||||
A Container Linux Config is a YAML document which declares how Container Linux instances' disks should be provisioned on network boot and first-boot from disk. Configs can declare disk partitions, write files (regular files, systemd units, networkd units, etc.), and configure users. See the Container Linux Config [spec](https://coreos.com/os/docs/latest/configuration.html).
|
||||
|
||||
### Ignition
|
||||
|
||||
@@ -75,7 +75,7 @@ passwd:
|
||||
```
|
||||
<!-- {% endraw %} -->
|
||||
|
||||
The Ignition config response (formatted) to a query `/ignition?label=value` for a CoreOS instance supporting Ignition 2.0.0 would be:
|
||||
The Ignition config response (formatted) to a query `/ignition?label=value` for a Container Linux instance supporting Ignition 2.0.0 would be:
|
||||
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -4,11 +4,11 @@ This guide walks through deploying the `matchbox` service on a Linux host (via R
|
||||
|
||||
## Provisoner
|
||||
|
||||
`matchbox` is a service for network booting and provisioning machines to create Container Linux clusters. `matchbox` should be installed on a provisioner machine (CoreOS or any Linux distribution) or cluster (Kubernetes) which can serve configs to client machines in a lab or datacenter.
|
||||
`matchbox` is a service for network booting and provisioning machines to create CoreOS Container Linux clusters. `matchbox` should be installed on a provisioner machine (Container Linux or any Linux distribution) or cluster (Kubernetes) which can serve configs to client machines in a lab or datacenter.
|
||||
|
||||
Choose one of the supported installation options:
|
||||
|
||||
* [CoreOS (rkt)](#coreos)
|
||||
* [CoreOS Container Linux (rkt)](#coreos-container-linux)
|
||||
* [RPM-based](#rpm-based-distro)
|
||||
* [Generic Linux (binary)](#generic-linux)
|
||||
* [With rkt](#rkt)
|
||||
@@ -20,39 +20,41 @@ Choose one of the supported installation options:
|
||||
Download the latest matchbox [release](https://github.com/coreos/matchbox/releases) to the provisioner host.
|
||||
|
||||
```sh
|
||||
$ wget https://github.com/coreos/matchbox/releases/download/v0.6.0/matchbox-v0.6.0-linux-amd64.tar.gz
|
||||
$ wget https://github.com/coreos/matchbox/releases/download/v0.6.0/matchbox-v0.6.0-linux-amd64.tar.gz.asc
|
||||
$ wget https://github.com/coreos/matchbox/releases/download/v0.7.1/matchbox-v0.7.1-linux-amd64.tar.gz
|
||||
$ wget https://github.com/coreos/matchbox/releases/download/v0.7.1/matchbox-v0.7.1-linux-amd64.tar.gz.asc
|
||||
```
|
||||
|
||||
Verify the release has been signed by the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/).
|
||||
|
||||
```sh
|
||||
$ gpg --keyserver pgp.mit.edu --recv-key 18AD5014C99EF7E3BA5F6CE950BDD3E0FC8A365E
|
||||
$ gpg --verify matchbox-v0.6.0-linux-amd64.tar.gz.asc matchbox-v0.6.0-linux-amd64.tar.gz
|
||||
$ gpg --verify matchbox-v0.7.1-linux-amd64.tar.gz.asc matchbox-v0.7.1-linux-amd64.tar.gz
|
||||
# gpg: Good signature from "CoreOS Application Signing Key <security@coreos.com>"
|
||||
```
|
||||
|
||||
Untar the release.
|
||||
|
||||
```sh
|
||||
$ tar xzvf matchbox-v0.6.0-linux-amd64.tar.gz
|
||||
$ cd matchbox-v0.6.0-linux-amd64
|
||||
$ tar xzvf matchbox-v0.7.1-linux-amd64.tar.gz
|
||||
$ cd matchbox-v0.7.1-linux-amd64
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
### RPM-based distro
|
||||
|
||||
On an RPM-based provisioner, install the `matchbox` RPM from the Copr [repository](https://copr.fedorainfracloud.org/coprs/g/CoreOS/matchbox/) using `dnf` or `yum`.
|
||||
On an RPM-based provisioner (Fedora 24+), install the `matchbox` RPM from the Copr [repository](https://copr.fedorainfracloud.org/coprs/g/CoreOS/matchbox/) using `dnf`.
|
||||
|
||||
```sh
|
||||
dnf copr enable @CoreOS/matchbox
|
||||
dnf install matchbox
|
||||
```
|
||||
|
||||
### CoreOS
|
||||
RPMs are not currently available for CentOS and RHEL (due to Go version). CentOS and RHEL users should follow the Generic Linux section below.
|
||||
|
||||
On a CoreOS provisioner, rkt run `matchbox` image with the provided systemd unit.
|
||||
### CoreOS Container Linux
|
||||
|
||||
On a Container Linux provisioner, rkt run `matchbox` image with the provided systemd unit.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-on-coreos.service /etc/systemd/system/matchbox.service
|
||||
@@ -81,7 +83,7 @@ $ sudo chown -R matchbox:matchbox /var/lib/matchbox
|
||||
Copy the provided `matchbox` systemd unit file.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-local.service /etc/systemd/system/
|
||||
$ sudo cp contrib/systemd/matchbox-local.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
## Customization
|
||||
@@ -110,7 +112,7 @@ Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
|
||||
```
|
||||
|
||||
The Tectonic [Installer](https://tectonic.com/enterprise/docs/latest/install/bare-metal/index.html) uses this API. Tectonic users with a CoreOS provisioner can start with an example that enables it.
|
||||
The Tectonic [Installer](https://tectonic.com/enterprise/docs/latest/install/bare-metal/index.html) uses this API. Tectonic users with a Container Linux provisioner can start with an example that enables it.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-for-tectonic.service /etc/systemd/system/matchbox.service
|
||||
@@ -127,31 +129,44 @@ $ sudo firewall-cmd --zone=MYZONE --add-port=8080/tcp --permanent
|
||||
$ sudo firewall-cmd --zone=MYZONE --add-port=8081/tcp --permanent
|
||||
```
|
||||
|
||||
## Generate TLS credentials
|
||||
## Generate TLS Certificates
|
||||
|
||||
*Skip this unless you need to enable the gRPC API*
|
||||
The Matchbox gRPC API allows clients (terraform-provider-matchbox) to create and update Matchbox resources. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
|
||||
|
||||
The `matchbox` gRPC API allows client apps (terraform-provider-matchbox, Tectonic Installer, etc.) to update how machines are provisioned. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
|
||||
The `cert-gen` helper script generates a self-signed CA, server certificate, and client certificate. **Prefer your organization's PKI, if possible**
|
||||
|
||||
If your organization manages public key infrastructure and a certificate authority, create a server certificate and key for the `matchbox` service and a client certificate and key for each client tool.
|
||||
|
||||
Otherwise, generate a self-signed `ca.crt`, a server certificate (`server.crt`, `server.key`), and client credentials (`client.crt`, `client.key`) with the `examples/etc/matchbox/cert-gen` script. Export the DNS name or IP (discouraged) of the provisioner host.
|
||||
Navigate to the `scripts/tls` directory.
|
||||
|
||||
```sh
|
||||
$ cd scripts/tls
|
||||
```
|
||||
|
||||
Export `SAN` to set the Subject Alt Names which should be used in certificates. Provide the fully qualified domain name or IP (discouraged) where Matchbox will be installed.
|
||||
|
||||
```sh
|
||||
# DNS or IP Subject Alt Names where matchbox runs
|
||||
$ export SAN=DNS.1:matchbox.example.com,IP.1:172.18.0.2
|
||||
```
|
||||
|
||||
Generate a `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`.
|
||||
|
||||
```sh
|
||||
$ cd examples/etc/matchbox
|
||||
# DNS or IP Subject Alt Names where matchbox can be reached
|
||||
$ export SAN=DNS.1:matchbox.example.com,IP.1:192.168.1.42
|
||||
$ ./cert-gen
|
||||
```
|
||||
|
||||
Place the TLS credentials in the default location:
|
||||
Move TLS credentials to the matchbox server's default location.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/matchbox
|
||||
$ sudo cp ca.crt server.crt server.key /etc/matchbox/
|
||||
$ sudo cp ca.crt server.crt server.key /etc/matchbox
|
||||
```
|
||||
|
||||
Save `client.crt`, `client.key`, and `ca.crt` to use with a client tool later.
|
||||
Save `client.crt`, `client.key`, and `ca.crt` for later use (e.g. `~/.matchbox`).
|
||||
|
||||
```sh
|
||||
$ mkdir -p ~/.matchbox
|
||||
$ cp client.crt client.key ca.crt ~/.matchbox/
|
||||
```
|
||||
|
||||
## Start matchbox
|
||||
|
||||
@@ -182,7 +197,7 @@ matchbox
|
||||
If you enabled the gRPC API,
|
||||
|
||||
```sh
|
||||
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert examples/etc/matchbox/client.crt -key examples/etc/matchbox/client.key
|
||||
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert scripts/tls/client.crt -key scripts/tls/client.key
|
||||
CONNECTED(00000003)
|
||||
depth=1 CN = fake-ca
|
||||
verify return:1
|
||||
@@ -196,14 +211,14 @@ Certificate chain
|
||||
....
|
||||
```
|
||||
|
||||
## Download CoreOS (optional)
|
||||
## Download Container Linux (optional)
|
||||
|
||||
`matchbox` can serve CoreOS images in development or lab environments to reduce bandwidth usage and increase the speed of CoreOS PXE boots and installs to disk.
|
||||
`matchbox` can serve Container Linux images in development or lab environments to reduce bandwidth usage and increase the speed of Container Linux PXE boots and installs to disk.
|
||||
|
||||
Download a recent CoreOS [release](https://coreos.com/releases/) with signatures.
|
||||
Download a recent Container Linux [release](https://coreos.com/releases/) with signatures.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 . # note the "." 3rd argument
|
||||
$ ./scripts/get-coreos stable 1576.5.0 . # note the "." 3rd argument
|
||||
```
|
||||
|
||||
Move the images to `/var/lib/matchbox/assets`,
|
||||
@@ -215,7 +230,7 @@ $ sudo cp -r coreos /var/lib/matchbox/assets
|
||||
```
|
||||
/var/lib/matchbox/assets/
|
||||
├── coreos
|
||||
│ └── 1298.7.0
|
||||
│ └── 1576.5.0
|
||||
│ ├── CoreOS_Image_Signing_Key.asc
|
||||
│ ├── coreos_production_image.bin.bz2
|
||||
│ ├── coreos_production_image.bin.bz2.sig
|
||||
@@ -228,11 +243,11 @@ $ sudo cp -r coreos /var/lib/matchbox/assets
|
||||
and verify the images are acessible.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080/assets/coreos/1298.7.0/
|
||||
$ curl http://matchbox.example.com:8080/assets/coreos/1576.5.0/
|
||||
<pre>...
|
||||
```
|
||||
|
||||
For large production environments, use a cache proxy or mirror suitable for your environment to serve CoreOS images. See [contrib/squid](../contrib/squid/README.md) for details.
|
||||
For large production environments, use a cache proxy or mirror suitable for your environment to serve Container Linux images. See [contrib/squid](../contrib/squid/README.md) for details.
|
||||
|
||||
## Network
|
||||
|
||||
@@ -292,7 +307,8 @@ Create an Ingress resource to expose the HTTP read-only and gRPC API endpoints.
|
||||
$ kubectl create -f contrib/k8s/matchbox-ingress.yaml
|
||||
$ kubectl get ingress
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
matchbox matchbox.example.com,matchbox-rpc.example.com 10.128.0.3,10... 80, 443 32m
|
||||
matchbox matchbox.example.com 10.128.0.3,10... 80 29m
|
||||
matchbox-rpc matchbox-rpc.example.com 10.128.0.3,10... 80, 443 29m
|
||||
```
|
||||
|
||||
Add DNS records `matchbox.example.com` and `matchbox-rpc.example.com` to route traffic to the Ingress Controller.
|
||||
@@ -304,6 +320,16 @@ $ curl http://matchbox.example.com
|
||||
$ openssl s_client -connect matchbox-rpc.example.com:443 -CAfile ca.crt -cert client.crt -key client.key
|
||||
```
|
||||
|
||||
# HTTPS - The read-only Matchbox API is also available with HTTPS
|
||||
|
||||
To start matchbox in this mode you will need the following flags set:
|
||||
|
||||
| Name | Type | Description |
|
||||
|----------------|--------|---------------------------------------------------------------|
|
||||
| -web-ssl | bool | true/false |
|
||||
| -web-cert-file | string | Path to the server TLS certificate file |
|
||||
| -web-key-file | string | Path to the server TLS key file |
|
||||
|
||||
### Operational notes
|
||||
|
||||
* Secrets: Matchbox **can** be run as a public facing service. However, you **must** follow best practices and avoid writing secret material into machine user-data. Instead, load secret materials from an internal secret store.
|
||||
|
||||
@@ -8,7 +8,7 @@ This guide covers releasing new versions of matchbox.
|
||||
Create a release commit which updates old version references.
|
||||
|
||||
```sh
|
||||
$ export VERSION=v0.6.0
|
||||
$ export VERSION=v0.7.1
|
||||
```
|
||||
|
||||
## Tag
|
||||
@@ -45,7 +45,7 @@ $ make release
|
||||
Verify the reported version.
|
||||
|
||||
```
|
||||
./_output/matchbox-v0.6.0-linux-amd64/matchbox -version
|
||||
./_output/matchbox-v0.7.1-linux-amd64/matchbox -version
|
||||
```
|
||||
|
||||
## Signing
|
||||
@@ -54,10 +54,10 @@ Sign the release tarballs and ACI with a [CoreOS App Signing Key](https://coreos
|
||||
|
||||
```sh
|
||||
cd _output
|
||||
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-amd64.tar.gz
|
||||
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-darwin-amd64.tar.gz
|
||||
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-arm.tar.gz
|
||||
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-arm64.tar.gz
|
||||
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-amd64.tar.gz
|
||||
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-darwin-amd64.tar.gz
|
||||
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-arm.tar.gz
|
||||
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-arm64.tar.gz
|
||||
```
|
||||
|
||||
Verify the signatures.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
|
||||
# Getting started with Docker
|
||||
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with Docker to network boot and provision a cluster of QEMU/KVM CoreOS machines locally. You'll be able to create Kubernetes clusters, etcd3 clusters, and test network setups.
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with Docker to network boot and provision a cluster of QEMU/KVM Container Linux machines locally. You'll be able to create Kubernetes clusters, etcd3 clusters, and test network setups.
|
||||
|
||||
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
@@ -26,13 +25,13 @@ $ git clone https://github.com/coreos/matchbox.git
|
||||
$ cd matchbox
|
||||
```
|
||||
|
||||
Download CoreOS image assets referenced by the `etcd-docker` [example](../examples) to `examples/assets`.
|
||||
Download CoreOS Container Linux image assets referenced by the `etcd3` [example](../examples) to `examples/assets`.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
|
||||
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
|
||||
```
|
||||
|
||||
For development convenience, add `/etc/hosts` entries for nodes so they may be referenced by name as you would in production.
|
||||
For development convenience, add `/etc/hosts` entries for nodes so they may be referenced by name.
|
||||
|
||||
```sh
|
||||
# /etc/hosts
|
||||
@@ -44,11 +43,18 @@ For development convenience, add `/etc/hosts` entries for nodes so they may be r
|
||||
|
||||
## Containers
|
||||
|
||||
Run the latest `matchbox` Docker image from `quay.io/coreos/matchbox` with the `etcd-docker` example. The container should receive the IP address 172.17.0.2 on the `docker0` bridge.
|
||||
Run the `matchbox` and `dnsmasq` services on the `docker0` bridge. `dnsmasq` will run DHCP, DNS and TFTP services to create a suitable network boot environment. `matchbox` will serve configs to machines as they PXE boot.
|
||||
|
||||
The `devnet` convenience script can start these services and accepts the name of any example cluster in [examples](../examples).
|
||||
|
||||
```sh
|
||||
$ sudo docker pull quay.io/coreos/matchbox:latest
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd3:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo ./scripts/devnet create etcd3
|
||||
```
|
||||
|
||||
Inspect the logs.
|
||||
|
||||
```
|
||||
$ sudo ./scripts/devnet status
|
||||
```
|
||||
|
||||
Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of how machines are mapped to Profiles. Explore some endpoints exposed by the service, say for QEMU/KVM node1.
|
||||
@@ -57,28 +63,28 @@ Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of ho
|
||||
* Ignition [http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae)
|
||||
* Metadata [http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae)
|
||||
|
||||
## Network
|
||||
### Manual
|
||||
|
||||
Since the virtual network has no network boot services, use the `dnsmasq` image to create an iPXE network boot environment which runs DHCP, DNS, and TFTP.
|
||||
If you prefer to start the containers yourself, instead of using `devnet`,
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd3:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo docker run --name dnsmasq --cap-add=NET_ADMIN -v $PWD/contrib/dnsmasq/docker0.conf:/etc/dnsmasq.conf:Z quay.io/coreos/dnsmasq -d
|
||||
```
|
||||
|
||||
In this case, dnsmasq runs a DHCP server allocating IPs to VMs between 172.17.0.43 and 172.17.0.99, resolves `matchbox.foo` to 172.17.0.2 (the IP where `matchbox` runs), and points iPXE clients to `http://matchbox.foo:8080/boot.ipxe`.
|
||||
|
||||
## Client VMs
|
||||
|
||||
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `docker0` bridge, where Docker's containers run.
|
||||
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `docker0` bridge, where Docker containers run.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-docker
|
||||
$ sudo ./scripts/libvirt create
|
||||
```
|
||||
|
||||
You can connect to the serial console of any node. If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
You can connect to the serial console of any node (ctrl+] to exit). If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
|
||||
```sh
|
||||
$ sudo virsh console node1
|
||||
$ ssh core@node1.example.com
|
||||
```
|
||||
|
||||
You can also use `virt-manager` to watch the console.
|
||||
@@ -101,7 +107,6 @@ The example profile added autologin so you can verify that etcd3 works between n
|
||||
|
||||
```sh
|
||||
$ systemctl status etcd-member
|
||||
$ ETCDCTL_API=3
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
```
|
||||
@@ -110,8 +115,7 @@ $ etcdctl get /message
|
||||
Clean up the containers and VM machines.
|
||||
|
||||
```sh
|
||||
$ sudo docker rm -f dnsmasq
|
||||
$ sudo ./scripts/libvirt poweroff
|
||||
$ sudo ./scripts/devnet destroy
|
||||
$ sudo ./scripts/libvirt destroy
|
||||
```
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Getting started with rkt
|
||||
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with `rkt` and `CNI` to network boot and provision a cluster of QEMU/KVM CoreOS machines locally. You'll be able to create Kubernetes clustes, etcd3 clusters, and test network setups.
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with `rkt` and `CNI` to network boot and provision a cluster of QEMU/KVM Container Linux machines locally. You'll be able to create Kubernetes clustes, etcd3 clusters, and test network setups.
|
||||
|
||||
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
@@ -27,10 +27,10 @@ $ git clone https://github.com/coreos/matchbox.git
|
||||
$ cd matchbox
|
||||
```
|
||||
|
||||
Download CoreOS image assets referenced by the `etcd` [example](../examples) to `examples/assets`.
|
||||
Download CoreOS Container Linux image assets referenced by the `etcd3` [example](../examples) to `examples/assets`.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
|
||||
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
|
||||
```
|
||||
|
||||
## Network
|
||||
@@ -74,18 +74,19 @@ For development convenience, you may wish to add `/etc/hosts` entries for nodes
|
||||
|
||||
## Containers
|
||||
|
||||
Run the `matchbox` and `dnsmasq` services on the `metal0` bridge. `dnsmasq` will run DHCP, DNS, and TFTP services to create a suitable network boot environment. `matchbox` will serve provisioning configs to machines on the network which attempt to PXE boot.
|
||||
Run the `matchbox` and `dnsmasq` services on the `metal0` bridge. `dnsmasq` will run DHCP, DNS, and TFTP services to create a suitable network boot environment. `matchbox` will serve configs to machinesas they PXE boot.
|
||||
|
||||
The `devnet` wrapper script rkt runs `matchbox` and `dnsmasq` in systemd transient units. Create can take the name of any example cluster in [examples](../examples).
|
||||
The `devnet` convenience script can rkt run these services in systemd transient units and accepts the name of any example cluster in [examples](../examples).
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/devnet create etcd3
|
||||
$ export CONTAINER_RUNTIME=rkt
|
||||
$ sudo -E ./scripts/devnet create etcd3
|
||||
```
|
||||
|
||||
Inspect the journal logs or check the status of the systemd services.
|
||||
Inspect the journal logs.
|
||||
|
||||
```
|
||||
$ sudo ./scripts/devnet status
|
||||
$ sudo -E ./scripts/devnet status
|
||||
$ journalctl -f -u dev-matchbox
|
||||
$ journalctl -f -u dev-dnsmasq
|
||||
```
|
||||
@@ -106,14 +107,14 @@ sudo rkt run --net=metal0:IP=172.18.0.2 \
|
||||
--volume data,kind=host,source=$PWD/examples \
|
||||
--mount volume=groups,target=/var/lib/matchbox/groups \
|
||||
--volume groups,kind=host,source=$PWD/examples/groups/etcd3 \
|
||||
quay.io/coreos/matchbox:v0.6.0 -- -address=0.0.0.0:8080 -log-level=debug
|
||||
quay.io/coreos/matchbox:v0.7.1 -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
```sh
|
||||
sudo rkt run --net=metal0:IP=172.18.0.3 \
|
||||
--dns=host \
|
||||
--mount volume=config,target=/etc/dnsmasq.conf \
|
||||
--volume config,kind=host,source=$PWD/contrib/dnsmasq/metal0.conf \
|
||||
quay.io/coreos/dnsmasq:v0.4.0 \
|
||||
quay.io/coreos/dnsmasq:v0.4.1 \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW
|
||||
```
|
||||
|
||||
@@ -128,13 +129,14 @@ $ sudo rkt gc --grace-period=0
|
||||
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `metal0` bridge, where your pods run.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create
|
||||
$ sudo ./scripts/libvirt create-rkt
|
||||
```
|
||||
|
||||
You can connect to the serial console of any node. If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
You can connect to the serial console of any node (ctrl+] to exit). If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
|
||||
```sh
|
||||
$ sudo virsh console node1
|
||||
$ ssh core@node1.example.com
|
||||
```
|
||||
|
||||
You can also use `virt-manager` to watch the console.
|
||||
@@ -157,7 +159,6 @@ The example profile added autologin so you can verify that etcd3 works between n
|
||||
|
||||
```sh
|
||||
$ systemctl status etcd-member
|
||||
$ ETCDCTL_API=3
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
```
|
||||
@@ -167,7 +168,7 @@ $ etcdctl get /message
|
||||
Clean up the systemd units running `matchbox` and `dnsmasq`.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/devnet destroy
|
||||
$ sudo -E ./scripts/devnet destroy
|
||||
```
|
||||
|
||||
Clean up VM machines.
|
||||
|
||||
@@ -8,7 +8,7 @@ You'll install the `matchbox` service, setup a PXE network boot environment, and
|
||||
|
||||
Install `matchbox` on a dedicated server or Kubernetes cluster. Generate TLS credentials and enable the gRPC API as directed. Save the `ca.crt`, `client.crt`, and `client.key` on your local machine (e.g. `~/.matchbox`).
|
||||
|
||||
* Installing on [CoreOS / Linux distros](deployment.md)
|
||||
* Installing on [Container Linux / other distros](deployment.md)
|
||||
* Installing on [Kubernetes](deployment.md#kubernetes)
|
||||
* Running with [rkt](deployment.md#rkt) / [docker](deployment.md#docker)
|
||||
|
||||
@@ -34,7 +34,7 @@ Install [Terraform][terraform-dl] v0.9+ on your system.
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.9.2
|
||||
Terraform v0.9.4
|
||||
```
|
||||
|
||||
Add the `terraform-provider-matchbox` plugin binary on your system.
|
||||
@@ -61,7 +61,7 @@ $ git clone https://github.com/coreos/matchbox.git
|
||||
$ cd matchbox/examples/terraform
|
||||
```
|
||||
|
||||
Let's start with the `simple-install` example. With `simple-install`, any machines which PXE boot from matchbox will install CoreOS to `dev/sda`, reboot, and have your SSH key set. Its not much of a cluster, but we'll get to that later.
|
||||
Let's start with the `simple-install` example. With `simple-install`, any machines which PXE boot from matchbox will install Container Linux to `dev/sda`, reboot, and have your SSH key set. Its not much of a cluster, but we'll get to that later.
|
||||
|
||||
```sh
|
||||
$ cd simple-install
|
||||
@@ -122,9 +122,9 @@ resource "matchbox_profile" "coreos-install" {
|
||||
|
||||
#### Groups
|
||||
|
||||
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and template in machine-specific values. This group does not have a `selector` block, so any machines which network boot from matchbox will match this group and be provisioned using the `coreos-install` profile. Machines are matched to the most specific matching group.
|
||||
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and templates in machine-specific values. This group does not have a `selector` block, so any machines which network boot from matchbox will match this group and be provisioned using the `coreos-install` profile. Machines are matched to the most specific matching group.
|
||||
|
||||
```
|
||||
```hcl
|
||||
resource "matchbox_group" "default" {
|
||||
name = "default"
|
||||
profile = "${matchbox_profile.coreos-install.name}"
|
||||
|
||||
@@ -62,5 +62,5 @@ $ sudo docker run --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-r
|
||||
Create a VM to verify the machine network boots.
|
||||
|
||||
```sh
|
||||
$ sudo virt-install --name uefi-test --pxe --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
|
||||
$ sudo virt-install --name uefi-test --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
|
||||
```
|
||||
|
||||
@@ -4,9 +4,9 @@
|
||||
|
||||
Physical machines [network boot](network-booting.md) in an network boot environment with DHCP/TFTP/DNS services or with [coreos/dnsmasq](../contrib/dnsmasq).
|
||||
|
||||
`matchbox` serves iPXE, GRUB, or Pixiecore boot configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS. The "first boot" Ignition config if fetched and CoreOS is installed.
|
||||
`matchbox` serves iPXE or GRUB configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS Container Linux. The "first boot" Ignition config if fetched and Container Linux is installed.
|
||||
|
||||
CoreOS boots ("first boot" from disk) and runs Ignition to provision its disk with systemd units, files, keys, and more to become a cluster node. Systemd units may fetch metadata from a remote source if needed.
|
||||
Container Linux boots ("first boot" from disk) and runs Ignition to provision its disk with systemd units, files, keys, and more to become a cluster node. Systemd units may fetch metadata from a remote source if needed.
|
||||
|
||||
Coordinated auto-updates are enabled. Systems like [fleet](https://coreos.com/docs/#fleet) or [Kubernetes](http://kubernetes.io/docs/) coordinate container services. IPMI, vendor utilities, or first-boot are used to re-provision machines into new roles.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# matchbox
|
||||
|
||||
`matchbox` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create Container Linux clusters. `matchbox` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use CoreOS Linux's early-boot capabilities to provision CoreOS machines.
|
||||
`matchbox` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create CoreOS Container Linux clusters. `matchbox` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use Container Linux's early-boot capabilities to provision Container Linux machines.
|
||||
|
||||
Network boot endpoints provide PXE, iPXE, GRUB support. `matchbox` can be deployed as a binary, as an [appc](https://github.com/appc/spec) container with rkt, or as a Docker container.
|
||||
|
||||
@@ -59,13 +59,13 @@ Profiles reference an Ignition config, Cloud-Config, and/or generic config by na
|
||||
```json
|
||||
{
|
||||
"id": "etcd",
|
||||
"name": "CoreOS with etcd2",
|
||||
"name": "Container Linux with etcd2",
|
||||
"cloud_id": "",
|
||||
"ignition_id": "etcd.yaml",
|
||||
"generic_id": "some-service.cfg",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
@@ -75,7 +75,7 @@ Profiles reference an Ignition config, Cloud-Config, and/or generic config by na
|
||||
}
|
||||
```
|
||||
|
||||
The `"boot"` settings will be used to render configs to network boot programs such as iPXE, GRUB, or Pixiecore. You may reference remote kernel and initrd assets or [local assets](#assets).
|
||||
The `"boot"` settings will be used to render configs to network boot programs such as iPXE or GRUB. You may reference remote kernel and initrd assets or [local assets](#assets).
|
||||
|
||||
To use Ignition, set the `coreos.config.url` kernel option to reference the `matchbox` [Ignition endpoint](api.md#ignition-config), which will render the `ignition_id` file. Be sure to add the `coreos.first_boot` option as well.
|
||||
|
||||
@@ -173,7 +173,7 @@ matchbox.foo/assets/
|
||||
|
||||
For example, a `Profile` might refer to a local asset `/assets/coreos/VERSION/coreos_production_pxe.vmlinuz` instead of `http://stable.release.core-os.net/amd64-usr/VERSION/coreos_production_pxe.vmlinuz`.
|
||||
|
||||
See the [get-coreos](../scripts/README.md#get-coreos) script to quickly download, verify, and place CoreOS assets.
|
||||
See the [get-coreos](../scripts/README.md#get-coreos) script to quickly download, verify, and place Container Linux assets.
|
||||
|
||||
## Network
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ The network environment can be set up in a number of ways, which we'll discuss.
|
||||
|
||||
### Network boot programs
|
||||
|
||||
Machines can be booted and configured with CoreOS using several network boot programs and approaches. Let's review them. If you're new to network booting or unsure which to choose, iPXE is a reasonable and flexible choice.
|
||||
Machines can be booted and configured with CoreOS Container Linux using several network boot programs and approaches. Let's review them. If you're new to network booting or unsure which to choose, iPXE is a reasonable and flexible choice.
|
||||
|
||||
#### PXELINUX
|
||||
|
||||
@@ -26,7 +26,7 @@ $ mybootdir/pxelinux.cfg/b8945908-d6a6-41a9-611d-74a6ab80b83d
|
||||
$ mybootdir/pxelinux.cfg/default
|
||||
```
|
||||
|
||||
Here is an example PXE config file which boots a CoreOS image hosted on the TFTP server.
|
||||
Here is an example PXE config file which boots a Container Linux image hosted on the TFTP server.
|
||||
|
||||
```
|
||||
default coreos
|
||||
@@ -53,7 +53,7 @@ This approach has a number of drawbacks. TFTP can be slow, managing config files
|
||||
|
||||
A DHCPOFFER to iPXE client firmware specifies an HTTP boot script such as `http://matchbox.foo/boot.ipxe`.
|
||||
|
||||
Here is an example iPXE script for booting the remote CoreOS stable image.
|
||||
Here is an example iPXE script for booting the remote Container Linux stable image.
|
||||
|
||||
```
|
||||
#!ipxe
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Network setup
|
||||
|
||||
This guide shows how to create a DHCP/TFTP/DNS network boot environment to work with `matchbox` to boot and provision PXE, iPXE, or GRUB2 client machines.
|
||||
This guide shows how to create a DHCP/TFTP/DNS network boot environment to boot and provision BIOS/PXE, iPXE, or UEFI client machines.
|
||||
|
||||
`matchbox` serves iPXE scripts or GRUB configs over HTTP to serve as the entrypoint for CoreOS cluster bring-up. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, you can configure your own network services to point to `matchbox` or use the convenient [coreos/dnsmasq](../contrib/dnsmasq) container image (used in libvirt demos).
|
||||
Matchbox serves iPXE scripts over HTTP to serve as the entrypoint for provisioning clusters. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, configure your network environment to point to Matchbox or use the convenient [coreos/dnsmasq](../contrib/dnsmasq) container image (used in local QEMU/KVM setup).
|
||||
|
||||
*Note*: These are just suggestions. Your network administrator or system administrator should choose the right network setup for your company.
|
||||
|
||||
@@ -13,13 +13,14 @@ Client hardware must have a network interface which supports PXE or iPXE.
|
||||
## Goals
|
||||
|
||||
* Add a DNS name which resolves to a `matchbox` deploy.
|
||||
* Chainload PXE firmware to iPXE or GRUB2
|
||||
* Point iPXE clients to `http://matchbox.foo:port/boot.ipxe`
|
||||
* Point GRUB clients to `http://matchbox.foo:port/grub`
|
||||
* Chainload BIOS clients (legacy PXE) to iPXE (undionly.kpxe)
|
||||
* Chainload UEFI clients to iPXE (ipxe.efi)
|
||||
* Point iPXE clients to `http://matchbox.example.com:port/boot.ipxe`
|
||||
* Point GRUB clients to `http://matchbox.example.com:port/grub`
|
||||
|
||||
## Setup
|
||||
|
||||
Many companies already have DHCP/TFTP configured to "PXE-boot" PXE/iPXE clients. In this case, machines (or a subset of machines) can be made to chainload from `chain http://matchbox.foo:port/boot.ipxe`. Older PXE clients can be made to chainload into iPXE or GRUB to be able to fetch subsequent configs via HTTP.
|
||||
Many companies already have DHCP/TFTP configured to "PXE-boot" PXE/iPXE clients. In this case, machines (or a subset of machines) can be made to chainload from `chain http://matchbox.example.com:port/boot.ipxe`. Older PXE clients can be made to chainload into iPXE to be able to fetch subsequent configs via HTTP.
|
||||
|
||||
On simpler networks, such as what a developer might have at home, a relatively inflexible DHCP server may be in place, with no TFTP server. In this case, a proxy DHCP server can be run alongside a non-PXE capable DHCP server.
|
||||
|
||||
@@ -31,17 +32,17 @@ The setup of DHCP, TFTP, and DNS services on a network varies greatly. If you wi
|
||||
|
||||
## DNS
|
||||
|
||||
Add a DNS entry (e.g. `matchbox.foo`, `provisoner.mycompany-internal`) that resolves to a deployment of the CoreOS `matchbox` service from machines you intend to boot and provision.
|
||||
Add a DNS entry (e.g. `matchbox.example.com`, `provisoner.mycompany-internal`) that resolves to a deployment of the CoreOS `matchbox` service from machines you intend to boot and provision.
|
||||
|
||||
```sh
|
||||
$ dig matchbox.foo
|
||||
$ dig matchbox.example.com
|
||||
```
|
||||
|
||||
If you deployed `matchbox` to a known IP address (e.g. dedicated host, load balanced endpoint, Kubernetes NodePort) and use `dnsmasq`, a domain name to IPv4/IPv6 address mapping could be added to the `/etc/dnsmasq.conf`.
|
||||
|
||||
```
|
||||
# dnsmasq.conf
|
||||
address=/matchbox.foo/172.18.0.2
|
||||
address=/matchbox.example.com/172.18.0.2
|
||||
```
|
||||
|
||||
## iPXE
|
||||
@@ -50,7 +51,7 @@ Networks which already run DHCP and TFTP services to network boot PXE/iPXE clien
|
||||
|
||||
```
|
||||
# /var/www/html/ipxe/default.ipxe
|
||||
chain http://matchbox.foo:8080/boot.ipxe
|
||||
chain http://matchbox.example.com:8080/boot.ipxe
|
||||
```
|
||||
|
||||
You can chainload from a menu entry or use other [iPXE commands](http://ipxe.org/cmd) if you need to do more than simple delegation.
|
||||
@@ -67,26 +68,35 @@ dhcp-range=192.168.1.1,192.168.1.254,30m
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
|
||||
# if request comes from older PXE ROM, chainload to iPXE (via TFTP)
|
||||
dhcp-boot=tag:!ipxe,undionly.kpxe
|
||||
# if request comes from iPXE user class, set tag "ipxe"
|
||||
# Legacy PXE
|
||||
dhcp-match=set:bios,option:client-arch,0
|
||||
dhcp-boot=tag:bios,undionly.kpxe
|
||||
|
||||
# UEFI
|
||||
dhcp-match=set:efi32,option:client-arch,6
|
||||
dhcp-boot=tag:efi32,ipxe.efi
|
||||
dhcp-match=set:efibc,option:client-arch,7
|
||||
dhcp-boot=tag:efibc,ipxe.efi
|
||||
dhcp-match=set:efi64,option:client-arch,9
|
||||
dhcp-boot=tag:efi64,ipxe.efi
|
||||
|
||||
# iPXE - chainload to matchbox ipxe boot script
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
# point ipxe tagged requests to the matchbox iPXE boot script (via HTTP)
|
||||
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
# verbose
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
# static DNS assignements
|
||||
address=/matchbox.foo/192.168.1.100
|
||||
address=/matchbox.example.com/192.168.1.100
|
||||
|
||||
# (optional) disable DNS and specify alternate
|
||||
# port=0
|
||||
# dhcp-option=6,192.168.1.100
|
||||
```
|
||||
|
||||
Add [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) (and undionly.kpxe.0 if using dnsmasq) to your tftp-root (e.g. `/var/lib/tftpboot`).
|
||||
Add [ipxe.efi](http://boot.ipxe.org/ipxe.efi) and [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) to your tftp-root (e.g. `/var/lib/tftpboot`).
|
||||
|
||||
```sh
|
||||
$ sudo systemctl start dnsmasq
|
||||
@@ -113,7 +123,7 @@ pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe
|
||||
# if request comes from iPXE user class, set tag "ipxe"
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
# point ipxe tagged requests to the matchbox iPXE boot script (via HTTP)
|
||||
pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.foo:8080/boot.ipxe
|
||||
pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
# verbose
|
||||
log-queries
|
||||
@@ -141,14 +151,14 @@ timeout 10
|
||||
default iPXE
|
||||
LABEL iPXE
|
||||
KERNEL ipxe.lkrn
|
||||
APPEND dhcp && chain http://matchbox.foo:8080/boot.ipxe
|
||||
APPEND dhcp && chain http://matchbox.example.com:8080/boot.ipxe
|
||||
```
|
||||
|
||||
Add ipxe.lkrn to `/var/lib/tftpboot` (see [iPXE docs](http://ipxe.org/embed)).
|
||||
|
||||
## coreos/dnsmasq
|
||||
|
||||
The [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image can run DHCP, TFTP, and DNS services via rkt or docker. The image bundles `undionly.kpxe` and `grub.efi` for convenience. See [contrib/dnsmasq](contrib/dnsmasq) for details.
|
||||
The [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image can run DHCP, TFTP, and DNS services via rkt or docker. The image bundles `ipxe.efi`, `undionly.kpxe`, and `grub.efi` for convenience. See [contrib/dnsmasq](../contrib/dnsmasq) for details.
|
||||
|
||||
Run DHCP, TFTP, and DNS on the host's network:
|
||||
|
||||
@@ -159,9 +169,16 @@ sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:#ipxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
@@ -171,10 +188,17 @@ sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:#ipxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example/192.168.1.2 \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
@@ -211,20 +235,19 @@ Be sure to allow enabled services in your firewall configuration.
|
||||
$ sudo firewall-cmd --add-service=dhcp --add-service=tftp --add-service=dns
|
||||
```
|
||||
|
||||
## GRUB
|
||||
## UEFI
|
||||
|
||||
Grub can be used to delegate as well.
|
||||
### Development
|
||||
|
||||
`grub-mknetdir --net-directory=/var/lib/tftpboot`
|
||||
Install the dependencies for [QEMU with UEFI](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU). Walk through the [getting-started-with-docker](getting-started-with-docker.md) tutorial. Launch client VMs using `create-uefi`.
|
||||
|
||||
/var/lib/tftpboot/boot/grub/grub.cfg:
|
||||
```ini
|
||||
insmod i386-pc/http.mod
|
||||
set root=http,matchbox.foo:8080
|
||||
configfile /grub
|
||||
Create UEFI QEMU/KVM VMs attached to the `docker0` bridge.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-uefi
|
||||
```
|
||||
|
||||
Make sure to replace variables in the example config files; instead of iPXE variables, use GRUB variables. Check the [GRUB2 manual](https://www.gnu.org/software/grub/manual/grub.html#Network).
|
||||
UEFI clients should chainload `ipxe.efi`, load iPXE and Ignition configs from Matchbox, and Container Linux should boot as usual.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
# Kubernetes (with rkt)
|
||||
|
||||
The `rktnetes` example provisions a 3 node Kubernetes v1.5.5 cluster with [rkt](https://github.com/coreos/rkt) as the container runtime. The cluster has one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs).
|
||||
|
||||
## Requirements
|
||||
|
||||
Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md) or [matchbox with docker](getting-started-docker.md) guide and understand the basics. In particular, you should be able to:
|
||||
|
||||
* Use rkt or Docker to start `matchbox`
|
||||
* Create a network boot environment with `coreos/dnsmasq`
|
||||
* Create the example libvirt client VMs
|
||||
* `/etc/hosts` entries for `node[1-3].example.com` (or pass custom names to `k8s-certgen`)
|
||||
|
||||
## Examples
|
||||
|
||||
The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. VMs are setup on the `metal0` CNI bridge for rkt or the `docker0` bridge for Docker. The examples can be used for physical machines if you update the MAC addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
* [rktnetes](../examples/groups/rktnetes) - iPXE boot a Kubernetes cluster
|
||||
* [rktnetes-install](../examples/groups/rktnetes-install) - Install a Kubernetes cluster to disk
|
||||
* [Lab examples](https://github.com/dghubble/metal) - Lab hardware examples
|
||||
|
||||
## Assets
|
||||
|
||||
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
|
||||
```
|
||||
|
||||
Optionally, add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
|
||||
Generate a root CA and Kubernetes TLS assets for components (`admin`, `apiserver`, `worker`) with SANs for `node1.example.com`, etc.
|
||||
|
||||
```sh
|
||||
$ rm -rf examples/assets/tls
|
||||
$ ./scripts/tls/k8s-certgen
|
||||
```
|
||||
|
||||
**Note**: TLS assets are served to any machines which request them, which requires a trusted network. Alternately, provisioning may be tweaked to require TLS assets be securely copied to each host.
|
||||
|
||||
## Containers
|
||||
|
||||
Use rkt or docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with rkt](getting-started-rkt.md) or [matchbox with Docker](getting-started-docker.md) for help.
|
||||
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot CoreOS in about a 1 minute and the Kubernetes API should be available after 3-4 minutes (each node downloads a ~160MB Hyperkube). If you chose `rktnetes-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision Kubernetes clusters on physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
|
||||
## Verify
|
||||
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster created on rkt `metal0` or `docker0`.
|
||||
|
||||
```sh
|
||||
$ KUBECONFIG=examples/assets/tls/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
node1.example.com Ready 3m
|
||||
node2.example.com Ready 3m
|
||||
node3.example.com Ready 3m
|
||||
```
|
||||
|
||||
Get all pods.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system heapster-v1.2.0-4088228293-k3yn8 2/2 Running 0 3m
|
||||
kube-system kube-apiserver-node1.example.com 1/1 Running 0 4m
|
||||
kube-system kube-controller-manager-node1.example.com 1/1 Running 0 3m
|
||||
kube-system kube-dns-v19-l2u8r 3/3 Running 0 4m
|
||||
kube-system kube-proxy-node1.example.com 1/1 Running 0 3m
|
||||
kube-system kube-proxy-node2.example.com 1/1 Running 0 3m
|
||||
kube-system kube-proxy-node3.example.com 1/1 Running 0 3m
|
||||
kube-system kube-scheduler-node1.example.com 1/1 Running 0 3m
|
||||
kube-system kubernetes-dashboard-v1.4.1-0iy07 1/1 Running 0 4m
|
||||
```
|
||||
|
||||
## Kubernetes Dashboard
|
||||
|
||||
Access the Kubernetes Dashboard with `kubeconfig` credentials by port forwarding to the dashboard pod.
|
||||
|
||||
```sh
|
||||
$ kubectl port-forward kubernetes-dashboard-v1.4.1-SOME-ID 9090 -n=kube-system
|
||||
Forwarding from 127.0.0.1:9090 -> 9090
|
||||
```
|
||||
|
||||
Then visit [http://127.0.0.1:9090](http://127.0.0.1:9090/).
|
||||
|
||||
<img src='img/kubernetes-dashboard.png' class="img-center" alt="Kubernetes Dashboard"/>
|
||||
107
Jenkinsfile
vendored
107
Jenkinsfile
vendored
@@ -1,46 +1,63 @@
|
||||
properties([
|
||||
[$class: 'BuildDiscarderProperty', strategy: [$class: 'LogRotator', numToKeepStr: '20']],
|
||||
[$class: 'GithubProjectProperty', projectUrlStr: 'https://github.com/coreos/matchbox'],
|
||||
[$class: 'PipelineTriggersJobProperty', triggers: [
|
||||
[$class: 'GitHubPushTrigger'],
|
||||
]]
|
||||
])
|
||||
parallel (
|
||||
etcd3: {
|
||||
node('fedora && bare-metal') {
|
||||
stage('etcd3') {
|
||||
timeout(time:5, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
export ASSETS_DIR=~/assets; ./tests/smoke/etcd3
|
||||
'''
|
||||
}
|
||||
}
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timeout(time:45, unit:'MINUTES')
|
||||
buildDiscarder(logRotator(numToKeepStr:'20'))
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Cluster Tests') {
|
||||
steps {
|
||||
parallel (
|
||||
etcd3: {
|
||||
node('fedora && bare-metal') {
|
||||
timeout(time:5, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
export ASSETS_DIR=~/assets; ./tests/smoke/etcd3
|
||||
'''
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
},
|
||||
bootkube: {
|
||||
node('fedora && bare-metal') {
|
||||
timeout(time:60, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
chmod 600 ./tests/smoke/fake_rsa
|
||||
export ASSETS_DIR=~/assets; ./tests/smoke/bootkube
|
||||
'''
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
},
|
||||
"etcd3-terraform": {
|
||||
node('fedora && bare-metal') {
|
||||
timeout(time:10, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/etcd3-terraform
|
||||
'''
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
},
|
||||
"bootkube-terraform": {
|
||||
node('fedora && bare-metal') {
|
||||
timeout(time:60, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
chmod 600 ./tests/smoke/fake_rsa
|
||||
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/bootkube-terraform
|
||||
'''
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
},
|
||||
bootkube: {
|
||||
node('fedora && bare-metal') {
|
||||
stage('bootkube') {
|
||||
timeout(time:12, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
chmod 600 ./tests/smoke/fake_rsa
|
||||
export ASSETS_DIR=~/assets; ./tests/smoke/bootkube
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"etcd3-terraform": {
|
||||
node('fedora && bare-metal') {
|
||||
stage('etcd3-terraform') {
|
||||
timeout(time:10, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/etcd3-terraform
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
12
Makefile
12
Makefile
@@ -1,6 +1,6 @@
|
||||
export CGO_ENABLED:=0
|
||||
|
||||
VERSION=$(shell ./scripts/git-version)
|
||||
VERSION=$(shell ./scripts/dev/git-version)
|
||||
LD_FLAGS="-w -X github.com/coreos/matchbox/matchbox/version.Version=$(VERSION)"
|
||||
|
||||
REPO=github.com/coreos/matchbox
|
||||
@@ -15,11 +15,11 @@ bin/%:
|
||||
@go build -o bin/$* -v -ldflags $(LD_FLAGS) $(REPO)/cmd/$*
|
||||
|
||||
test:
|
||||
@./scripts/test
|
||||
@./scripts/dev/test
|
||||
|
||||
.PHONY: aci
|
||||
aci: clean build
|
||||
@sudo ./scripts/build-aci
|
||||
@sudo ./scripts/dev/build-aci
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image:
|
||||
@@ -40,13 +40,13 @@ vendor:
|
||||
|
||||
.PHONY: codegen
|
||||
codegen: tools
|
||||
@./scripts/codegen
|
||||
@./scripts/dev/codegen
|
||||
|
||||
.PHONY: tools
|
||||
tools: bin/protoc bin/protoc-gen-go
|
||||
|
||||
bin/protoc:
|
||||
@./scripts/get-protoc
|
||||
@./scripts/dev/get-protoc
|
||||
|
||||
bin/protoc-gen-go:
|
||||
@go build -o bin/protoc-gen-go $(REPO)/vendor/github.com/golang/protobuf/protoc-gen-go
|
||||
@@ -78,7 +78,7 @@ _output/matchbox-%.tar.gz: DEST=_output/$(NAME)
|
||||
_output/matchbox-%.tar.gz: bin/%/matchbox
|
||||
mkdir -p $(DEST)
|
||||
cp bin/$*/matchbox $(DEST)
|
||||
./scripts/release-files $(DEST)
|
||||
./scripts/dev/release-files $(DEST)
|
||||
tar zcvf $(DEST).tar.gz -C _output $(NAME)
|
||||
|
||||
.PHONY: all build clean test release
|
||||
|
||||
61
README.md
61
README.md
@@ -1,16 +1,14 @@
|
||||
# matchbox [](https://travis-ci.org/coreos/matchbox) [](https://godoc.org/github.com/coreos/matchbox) [](https://quay.io/repository/coreos/matchbox) [](https://botbot.me/freenode/coreos)
|
||||
# matchbox [](https://travis-ci.org/coreos/matchbox) [](https://godoc.org/github.com/coreos/matchbox) [](https://quay.io/repository/coreos/matchbox) [](https://botbot.me/freenode/coreos)
|
||||
|
||||
**Announcement**: Matchbox [v0.6.0](https://github.com/coreos/matchbox/releases) is released with a new [Matchbox Terraform Provider][terraform] and [tutorial](Documentation/getting-started.md).
|
||||
|
||||
`matchbox` is a service that matches bare-metal machines (based on labels like MAC, UUID, etc.) to profiles to PXE boot and provision Container Linux clusters. Profiles specify the kernel/initrd, kernel arguments, iPXE config, GRUB config, [Container Linux Config][cl-config], [Cloud-Config][cloud-config], or other configs a machine should use. Matchbox can be [installed](Documentation/deployment.md) as a binary, RPM, container image, or deployed on a Kubernetes cluster and it provides an authenticated gRPC API for clients like [terraform][terraform].
|
||||
`matchbox` is a service that matches bare-metal machines (based on labels like MAC, UUID, etc.) to profiles that PXE boot and provision Container Linux clusters. Profiles specify the kernel/initrd, kernel arguments, iPXE config, GRUB config, [Container Linux Config][cl-config], or other configs a machine should use. Matchbox can be [installed](Documentation/deployment.md) as a binary, RPM, container image, or deployed on a Kubernetes cluster and it provides an authenticated gRPC API for clients like [Terraform][terraform].
|
||||
|
||||
* [Documentation][docs]
|
||||
* [matchbox Service](Documentation/matchbox.md)
|
||||
* [Profiles](Documentation/matchbox.md#profiles)
|
||||
* [Groups](Documentation/matchbox.md#groups)
|
||||
* Config Templates
|
||||
* [Container Linux Config][cl-config]
|
||||
* [Cloud-Config][cloud-config]
|
||||
* [Container Linux Config][cl-config]
|
||||
* [Cloud-Config][cloud-config]
|
||||
* [Configuration](Documentation/config.md)
|
||||
* [HTTP API](Documentation/api.md) / [gRPC API](https://godoc.org/github.com/coreos/matchbox/matchbox/client)
|
||||
* [Background: Machine Lifecycle](Documentation/machine-lifecycle.md)
|
||||
@@ -19,51 +17,34 @@
|
||||
### Installation
|
||||
|
||||
* Installation
|
||||
* Installing on [CoreOS / Linux distros](Documentation/deployment.md)
|
||||
* Installing on [Kubernetes](Documentation/deployment.md#kubernetes)
|
||||
* Running with [rkt](Documentation/deployment.md#rkt) / [docker](Documentation/deployment.md#docker)
|
||||
* Installing on [Container Linux / other distros](Documentation/deployment.md)
|
||||
* Installing on [Kubernetes](Documentation/deployment.md#kubernetes)
|
||||
* Running with [rkt](Documentation/deployment.md#rkt) / [docker](Documentation/deployment.md#docker)
|
||||
* [Network Setup](Documentation/network-setup.md)
|
||||
|
||||
### Tutorials
|
||||
|
||||
* [Getting Started](Documentation/getting-started.md)
|
||||
* [Getting Started](Documentation/getting-started.md) - provision physical machines with Container Linux
|
||||
* Local QEMU/KVM
|
||||
* [matchbox with Docker](Documentation/getting-started-docker.md)
|
||||
* [matchbox with rkt](Documentation/getting-started-rkt.md)
|
||||
* Clusters
|
||||
* [etcd3](Documentation/getting-started-rkt.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](Documentation/bootkube.md) - Install a 3-node Kubernetes v1.8.5 cluster
|
||||
* Clusters (Terraform-based)
|
||||
* [etcd3](examples/terraform/etcd3-install/README.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](examples/terraform/bootkube-install/README.md) - Install a 3-node Kubernetes v1.10.3 cluster
|
||||
|
||||
Local QEMU/KVM
|
||||
### Projects
|
||||
|
||||
* [matchbox with rkt](Documentation/getting-started-rkt.md)
|
||||
* [matchbox with Docker](Documentation/getting-started-docker.md)
|
||||
|
||||
### Example Clusters
|
||||
|
||||
Create [example](examples) clusters on-premise or locally with [QEMU/KVM](scripts/README.md#libvirt).
|
||||
|
||||
**Terraform-based**
|
||||
|
||||
* [simple-install](Documentation/getting-started.md) - Install Container Linux with an SSH key on all machines (beginner)
|
||||
* [etcd3](examples/terraform/etcd3-install/README.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](examples/terraform/bootkube-install/README.md) - Install a 3-node self-hosted Kubernetes v1.6.4 cluster
|
||||
* Terraform [Modules](examples/terraform/modules) - Re-usable Terraform Modules
|
||||
|
||||
**Manual**
|
||||
|
||||
* [etcd3](Documentation/getting-started-rkt.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](Documentation/bootkube.md) - Install a 3-node self-hosted Kubernetes v1.6.4 cluster
|
||||
* [Tectonic](https://coreos.com/tectonic/docs/latest/index.html) - enterprise-ready Kubernetes
|
||||
* [Typhoon](https://typhoon.psdn.io/) - minimal and free Kubernetes
|
||||
|
||||
## Contrib
|
||||
|
||||
* [dnsmasq](contrib/dnsmasq/README.md) - Run DHCP, TFTP, and DNS services with docker or rkt
|
||||
* [squid](contrib/squid/README.md) - Run a transparent cache proxy
|
||||
* [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) - Terraform plugin which supports "matchbox" provider
|
||||
|
||||
## Enterprise
|
||||
|
||||
[Tectonic](https://coreos.com/tectonic/) is the enterprise-ready Kubernetes offering from CoreOS (free for 10 nodes!). The [Tectonic Installer](https://coreos.com/tectonic/docs/latest/install/bare-metal/#4-tectonic-installer) app integrates directly with `matchbox` through its gRPC API to provide a rich graphical client for populating `matchbox` with machine configs.
|
||||
|
||||
Learn more from our [docs](https://coreos.com/tectonic/docs/latest/) or [blog](https://coreos.com/blog/announcing-tectonic-1.6).
|
||||
|
||||

|
||||
|
||||

|
||||
* [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) - Terraform provider plugin for Matchbox
|
||||
|
||||
[docs]: https://coreos.com/matchbox/docs/latest
|
||||
[terraform]: https://github.com/coreos/terraform-provider-matchbox
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/coreos/pkg/flagutil"
|
||||
|
||||
web "github.com/coreos/matchbox/matchbox/http"
|
||||
"github.com/coreos/matchbox/matchbox/rpc"
|
||||
"github.com/coreos/matchbox/matchbox/server"
|
||||
@@ -17,6 +15,7 @@ import (
|
||||
"github.com/coreos/matchbox/matchbox/storage"
|
||||
"github.com/coreos/matchbox/matchbox/tlsutil"
|
||||
"github.com/coreos/matchbox/matchbox/version"
|
||||
"github.com/coreos/pkg/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -26,17 +25,20 @@ var (
|
||||
|
||||
func main() {
|
||||
flags := struct {
|
||||
address string
|
||||
rpcAddress string
|
||||
dataPath string
|
||||
assetsPath string
|
||||
logLevel string
|
||||
certFile string
|
||||
keyFile string
|
||||
caFile string
|
||||
keyRingPath string
|
||||
version bool
|
||||
help bool
|
||||
address string
|
||||
rpcAddress string
|
||||
dataPath string
|
||||
assetsPath string
|
||||
logLevel string
|
||||
grpcCAFile string
|
||||
grpcCertFile string
|
||||
grpcKeyFile string
|
||||
tlsCertFile string
|
||||
tlsKeyFile string
|
||||
tlsEnabled bool
|
||||
keyRingPath string
|
||||
version bool
|
||||
help bool
|
||||
}{}
|
||||
flag.StringVar(&flags.address, "address", "127.0.0.1:8080", "HTTP listen address")
|
||||
flag.StringVar(&flags.rpcAddress, "rpc-address", "", "RPC listen address")
|
||||
@@ -47,14 +49,20 @@ func main() {
|
||||
flag.StringVar(&flags.logLevel, "log-level", "info", "Set the logging level")
|
||||
|
||||
// gRPC Server TLS
|
||||
flag.StringVar(&flags.certFile, "cert-file", "/etc/matchbox/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.keyFile, "key-file", "/etc/matchbox/server.key", "Path to the server TLS key file")
|
||||
// TLS Client Authentication
|
||||
flag.StringVar(&flags.caFile, "ca-file", "/etc/matchbox/ca.crt", "Path to the CA verify and authenticate client certificates")
|
||||
flag.StringVar(&flags.grpcCertFile, "cert-file", "/etc/matchbox/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.grpcKeyFile, "key-file", "/etc/matchbox/server.key", "Path to the server TLS key file")
|
||||
|
||||
// gRPC TLS Client Authentication
|
||||
flag.StringVar(&flags.grpcCAFile, "ca-file", "/etc/matchbox/ca.crt", "Path to the CA verify and authenticate client certificates")
|
||||
|
||||
// Signing
|
||||
flag.StringVar(&flags.keyRingPath, "key-ring-path", "", "Path to a private keyring file")
|
||||
|
||||
// SSL flags
|
||||
flag.StringVar(&flags.tlsCertFile, "web-cert-file", "/etc/matchbox/ssl/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.tlsKeyFile, "web-key-file", "/etc/matchbox/ssl/server.key", "Path to the server TLS key file")
|
||||
flag.BoolVar(&flags.tlsEnabled, "web-ssl", false, "True to enable HTTPS")
|
||||
|
||||
// subcommands
|
||||
flag.BoolVar(&flags.version, "version", false, "print version and exit")
|
||||
flag.BoolVar(&flags.help, "help", false, "print usage and exit")
|
||||
@@ -87,16 +95,24 @@ func main() {
|
||||
}
|
||||
}
|
||||
if flags.rpcAddress != "" {
|
||||
if _, err := os.Stat(flags.certFile); err != nil {
|
||||
if _, err := os.Stat(flags.grpcCertFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS server certificate with -cert-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.keyFile); err != nil {
|
||||
if _, err := os.Stat(flags.grpcKeyFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS server key with -key-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.caFile); err != nil {
|
||||
if _, err := os.Stat(flags.grpcCAFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS certificate authority for authorizing client certificates: %v", err)
|
||||
}
|
||||
}
|
||||
if flags.tlsEnabled {
|
||||
if _, err := os.Stat(flags.tlsCertFile); err != nil {
|
||||
log.Fatalf("Provide a valid SSL server certificate with -web-cert-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.tlsKeyFile); err != nil {
|
||||
log.Fatalf("Provide a valid SSL server key with -web-key-file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// logging setup
|
||||
lvl, err := logrus.ParseLevel(flags.logLevel)
|
||||
@@ -130,17 +146,17 @@ func main() {
|
||||
// gRPC Server (feature disabled by default)
|
||||
if flags.rpcAddress != "" {
|
||||
log.Infof("Starting matchbox gRPC server on %s", flags.rpcAddress)
|
||||
log.Infof("Using TLS server certificate: %s", flags.certFile)
|
||||
log.Infof("Using TLS server key: %s", flags.keyFile)
|
||||
log.Infof("Using CA certificate: %s to authenticate client certificates", flags.caFile)
|
||||
log.Infof("Using TLS server certificate: %s", flags.grpcCertFile)
|
||||
log.Infof("Using TLS server key: %s", flags.grpcKeyFile)
|
||||
log.Infof("Using CA certificate: %s to authenticate client certificates", flags.grpcCAFile)
|
||||
lis, err := net.Listen("tcp", flags.rpcAddress)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
tlsinfo := tlsutil.TLSInfo{
|
||||
CertFile: flags.certFile,
|
||||
KeyFile: flags.keyFile,
|
||||
CAFile: flags.caFile,
|
||||
CertFile: flags.grpcCertFile,
|
||||
KeyFile: flags.grpcKeyFile,
|
||||
CAFile: flags.grpcCAFile,
|
||||
}
|
||||
tlscfg, err := tlsinfo.ServerConfig()
|
||||
if err != nil {
|
||||
@@ -151,7 +167,6 @@ func main() {
|
||||
defer grpcServer.Stop()
|
||||
}
|
||||
|
||||
// HTTP Server
|
||||
config := &web.Config{
|
||||
Core: server,
|
||||
Logger: log,
|
||||
@@ -160,9 +175,23 @@ func main() {
|
||||
ArmoredSigner: armoredSigner,
|
||||
}
|
||||
httpServer := web.NewServer(config)
|
||||
log.Infof("Starting matchbox HTTP server on %s", flags.address)
|
||||
err = http.ListenAndServe(flags.address, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
|
||||
if flags.tlsEnabled {
|
||||
// HTTPS Server
|
||||
log.Infof("Starting matchbox HTTPS server on %s", flags.address)
|
||||
log.Infof("Using SSL server certificate: %s", flags.tlsCertFile)
|
||||
log.Infof("Using SSL server key: %s", flags.tlsKeyFile)
|
||||
err = http.ListenAndServeTLS(flags.address, flags.tlsCertFile, flags.tlsKeyFile, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
} else {
|
||||
// HTTP Server
|
||||
log.Infof("Starting matchbox HTTP server on %s", flags.address)
|
||||
err = http.ListenAndServe(flags.address, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
61
code-of-conduct.md
Normal file
61
code-of-conduct.md
Normal file
@@ -0,0 +1,61 @@
|
||||
## CoreOS Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of
|
||||
fostering an open and welcoming community, we pledge to respect all people who
|
||||
contribute through reporting issues, posting feature requests, updating
|
||||
documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free
|
||||
experience for everyone, regardless of level of experience, gender, gender
|
||||
identity and expression, sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as physical or electronic addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently applying these
|
||||
principles to every aspect of managing this project. Project maintainers who do
|
||||
not follow or enforce the Code of Conduct may be permanently removed from the
|
||||
project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting a project maintainer, Brandon Philips
|
||||
<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### CoreOS Events Code of Conduct
|
||||
|
||||
CoreOS events are working conferences intended for professional networking and
|
||||
collaboration in the CoreOS community. Attendees are expected to behave
|
||||
according to professional standards and in accordance with their employer’s
|
||||
policies on appropriate workplace behavior.
|
||||
|
||||
While at CoreOS events or related social networking opportunities, attendees
|
||||
should not engage in discriminatory or offensive speech or actions including
|
||||
but not limited to gender, sexuality, race, age, disability, or religion.
|
||||
Speakers should be especially aware of these concerns.
|
||||
|
||||
CoreOS does not condone any statements by speakers contrary to these standards.
|
||||
CoreOS reserves the right to deny entrance and/or eject from an event (without
|
||||
refund) any individual found to be engaging in discriminatory or offensive
|
||||
speech or actions.
|
||||
|
||||
Please bring any concerns to the immediate attention of designated on-site
|
||||
staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Notable changes image releases. The dnsmasq project [upstream](http://www.thekelleys.org.uk/dnsmasq/doc.html) has its own [changelog](http://www.thekelleys.org.uk/dnsmasq/CHANGELOG).
|
||||
|
||||
## v0.4.1
|
||||
|
||||
* Rebuild with alpine:3.6 base image
|
||||
* Add EXPOSE ports 67 and 69 to Dockerfile
|
||||
|
||||
## v0.4.0
|
||||
|
||||
* `dnsmasq` package version 2.76
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM alpine:3.5
|
||||
FROM alpine:3.6
|
||||
MAINTAINER Dalton Hubble <dalton.hubble@coreos.com>
|
||||
RUN apk -U add dnsmasq curl
|
||||
COPY tftpboot /var/lib/tftpboot
|
||||
EXPOSE 53
|
||||
ENTRYPOINT ["/usr/sbin/dnsmasq"]
|
||||
EXPOSE 53 67 69
|
||||
ENTRYPOINT ["/usr/sbin/dnsmasq"]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
VERSION=v0.4.0
|
||||
VERSION=v0.5.0
|
||||
|
||||
IMAGE_REPO=coreos/dnsmasq
|
||||
QUAY_REPO=quay.io/coreos/dnsmasq
|
||||
@@ -6,12 +6,12 @@ QUAY_REPO=quay.io/coreos/dnsmasq
|
||||
.PHONY: all
|
||||
all: docker-image
|
||||
|
||||
.PHONY: undionly
|
||||
undionly:
|
||||
.PHONY: tftp
|
||||
tftp:
|
||||
@./get-tftp-files
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: undionly
|
||||
docker-image: tftp
|
||||
@sudo docker build --rm=true -t $(IMAGE_REPO):$(VERSION) .
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
`dnsmasq` provides a container image for running DHCP, proxy DHCP, DNS, and/or TFTP with [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html). Use it to test different network setups with clusters of network bootable machines.
|
||||
|
||||
The image bundles `undionly.kpxe` which chainloads PXE clients to iPXE and `grub.efi` (experimental) which chainloads UEFI architectures to GRUB2.
|
||||
The image bundles `undionly.kpxe`, `ipxe.efi`, and `grub.efi` (experimental) for chainloading BIOS and UEFI clients to iPXE.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -15,8 +15,15 @@ sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:#ipxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
@@ -28,8 +35,15 @@ sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:#ipxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example/192.168.1.2 \
|
||||
--log-queries \
|
||||
@@ -53,8 +67,13 @@ Configuration arguments can be provided as flags. Check the dnsmasq [man pages](
|
||||
|
||||
Build a container image locally.
|
||||
|
||||
make docker-image
|
||||
```
|
||||
make docker-image
|
||||
```
|
||||
|
||||
Run the image with Docker on the `docker0` bridge (default).
|
||||
|
||||
sudo docker run --rm --cap-add=NET_ADMIN coreos/dnsmasq -d -q
|
||||
```
|
||||
sudo docker run --rm --cap-add=NET_ADMIN coreos/dnsmasq -d -q
|
||||
```
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# dnsmasq.conf
|
||||
|
||||
no-daemon
|
||||
dhcp-range=172.17.0.50,172.17.0.99
|
||||
dhcp-option=3,172.17.0.1
|
||||
dhcp-host=52:54:00:a1:9c:ae,172.17.0.21,1h
|
||||
@@ -10,15 +11,27 @@ dhcp-host=52:54:00:d7:99:c7,172.17.0.24,1h
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
|
||||
# Legacy PXE
|
||||
dhcp-match=set:bios,option:client-arch,0
|
||||
dhcp-boot=tag:bios,undionly.kpxe
|
||||
|
||||
# UEFI
|
||||
dhcp-match=set:efi32,option:client-arch,6
|
||||
dhcp-boot=tag:efi32,ipxe.efi
|
||||
|
||||
dhcp-match=set:efibc,option:client-arch,7
|
||||
dhcp-boot=tag:efibc,ipxe.efi
|
||||
|
||||
dhcp-match=set:efi64,option:client-arch,9
|
||||
dhcp-boot=tag:efi64,ipxe.efi
|
||||
|
||||
# iPXE
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:#ipxe,undionly.kpxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
address=/bootcfg.foo/172.18.0.2
|
||||
address=/matchbox.foo/172.17.0.2
|
||||
address=/matchbox.example.com/172.17.0.2
|
||||
address=/node1.example.com/172.17.0.21
|
||||
address=/node2.example.com/172.17.0.22
|
||||
|
||||
@@ -10,6 +10,7 @@ fi
|
||||
|
||||
curl -s -o $DEST/undionly.kpxe http://boot.ipxe.org/undionly.kpxe
|
||||
cp $DEST/undionly.kpxe $DEST/undionly.kpxe.0
|
||||
curl -s -o $DEST/ipxe.efi http://boot.ipxe.org/ipxe.efi
|
||||
|
||||
# Any vaguely recent CoreOS grub.efi is fine
|
||||
curl -s -o $DEST/grub.efi https://stable.release.core-os.net/amd64-usr/1298.7.0/coreos_production_pxe_grub.efi
|
||||
curl -s -o $DEST/grub.efi https://stable.release.core-os.net/amd64-usr/1353.7.0/coreos_production_pxe_grub.efi
|
||||
|
||||
@@ -13,13 +13,11 @@ tftp-root=/var/lib/tftpboot
|
||||
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:#ipxe,undionly.kpxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
address=/bootcfg.foo/172.18.0.2
|
||||
address=/matchbox.foo/172.18.0.2
|
||||
address=/matchbox.example.com/172.18.0.2
|
||||
address=/node1.example.com/172.18.0.21
|
||||
address=/node2.example.com/172.18.0.22
|
||||
|
||||
@@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: matchbox
|
||||
image: quay.io/coreos/matchbox:v0.6.0
|
||||
image: quay.io/coreos/matchbox:v0.7.1
|
||||
env:
|
||||
- name: MATCHBOX_ADDRESS
|
||||
value: "0.0.0.0:8080"
|
||||
|
||||
@@ -2,12 +2,7 @@ apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
annotations:
|
||||
ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- matchbox-rpc.example.com
|
||||
rules:
|
||||
- host: matchbox.example.com
|
||||
http:
|
||||
@@ -16,6 +11,18 @@ spec:
|
||||
backend:
|
||||
serviceName: matchbox
|
||||
servicePort: 8080
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- matchbox-rpc.example.com
|
||||
rules:
|
||||
- host: matchbox-rpc.example.com
|
||||
http:
|
||||
paths:
|
||||
|
||||
@@ -65,7 +65,7 @@ iptables -t nat -A PREROUTING -i enp14s0 -p tcp --dport 80 -j REDIRECT --to-port
|
||||
|
||||
Your DHCP server should be configured so the Squid host is the default gateway for PXE, iPXE, or GRUB2 clients. For deployments that run Squid on the same host as dnsmasq, remove any DHCP option 3 settings. For example ```--dhcp-option=3,192.168.10.1"```
|
||||
|
||||
Update Matchbox policies to use the url of the CoreOS kernel/initrd download site:
|
||||
Update Matchbox policies to use the url of the Container Linux kernel/initrd download site:
|
||||
```
|
||||
cat policy/etcd3.json
|
||||
{
|
||||
|
||||
@@ -4,7 +4,7 @@ Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
Environment="IMAGE=quay.io/coreos/matchbox"
|
||||
Environment="VERSION=v0.6.0"
|
||||
Environment="VERSION=v0.7.1"
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
|
||||
Environment="MATCHBOX_LOG_LEVEL=debug"
|
||||
|
||||
@@ -4,7 +4,7 @@ Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
Environment="IMAGE=quay.io/coreos/matchbox"
|
||||
Environment="VERSION=v0.6.0"
|
||||
Environment="VERSION=v0.7.1"
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/matchbox
|
||||
ExecStartPre=/usr/bin/mkdir -p /var/lib/matchbox/assets
|
||||
|
||||
@@ -8,9 +8,9 @@ These examples use [Terraform](https://www.terraform.io/intro/) as a client to M
|
||||
|
||||
| Name | Description |
|
||||
|-------------------------------|-------------------------------|
|
||||
| [simple-install](terraform/simple-install) | Install Container Linux with an SSH key |
|
||||
| [etcd3-install](terraform/etcd3-install) | Install a 3-node etcd3 cluster |
|
||||
| [bootkube-install](terraform/bootkube-install) | Install a 3-node self-hosted Kubernetes v1.6.4 cluster |
|
||||
| [simple-install](terraform/simple-install/) | Install Container Linux with an SSH key |
|
||||
| [etcd3-install](terraform/etcd3-install/) | Install a 3-node etcd3 cluster |
|
||||
| [bootkube-install](terraform/bootkube-install/) | Install a 3-node Kubernetes v1.10.3 cluster |
|
||||
|
||||
### Customization
|
||||
|
||||
@@ -20,15 +20,15 @@ You are encouraged to look through the examples and Terraform modules. Implement
|
||||
|
||||
These examples mount raw Matchbox objects into a Matchbox server's `/var/lib/matchbox/` directory.
|
||||
|
||||
| Name | Description | CoreOS Version | FS | Docs |
|
||||
| Name | Description | CoreOS Container Linux Version | FS | Docs |
|
||||
|------------|-------------|----------------|----|-----------|
|
||||
| simple | CoreOS with autologin, using iPXE | stable/1298.7.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| simple-install | CoreOS Install, using iPXE | stable/1298.7.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| grub | CoreOS via GRUB2 Netboot | stable/1298.7.0 | RAM | NA |
|
||||
| etcd3 | PXE boot 3 node etcd3 cluster with proxies | stable/1298.7.0 | RAM | None |
|
||||
| etcd3-install | Install a 3 node etcd3 cluster to disk | stable/1298.7.0 | Disk | None |
|
||||
| bootkube | PXE boot a self-hosted Kubernetes v1.6.4 cluster | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| bootkube-install | Install a self-hosted Kubernetes v1.6.4 cluster | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| simple | CoreOS Container Linux with autologin, using iPXE | stable/1576.5.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| simple-install | CoreOS Container Linux Install, using iPXE | stable/1576.5.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| grub | CoreOS Container Linux via GRUB2 Netboot | stable/1576.5.0 | RAM | NA |
|
||||
| etcd3 | PXE boot a 3-node etcd3 cluster with proxies | stable/1576.5.0 | RAM | None |
|
||||
| etcd3-install | Install a 3-node etcd3 cluster to disk | stable/1576.5.0 | Disk | None |
|
||||
| bootkube | PXE boot a 3-node Kubernetes v1.8.5 cluster | stable/1576.5.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| bootkube-install | Install a 3-node Kubernetes v1.8.5 cluster | stable/1576.5.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
|
||||
### Customization
|
||||
|
||||
|
||||
56
examples/addons/cluo/update-agent.yaml
Normal file
56
examples/addons/cluo/update-agent.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: container-linux-update-agent
|
||||
namespace: kube-system
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: container-linux-update-agent
|
||||
spec:
|
||||
containers:
|
||||
- name: update-agent
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.3.1
|
||||
command:
|
||||
- "/bin/update-agent"
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/dbus
|
||||
name: var-run-dbus
|
||||
- mountPath: /etc/coreos
|
||||
name: etc-coreos
|
||||
- mountPath: /usr/share/coreos
|
||||
name: usr-share-coreos
|
||||
- mountPath: /etc/os-release
|
||||
name: etc-os-release
|
||||
env:
|
||||
# read by update-agent as the node name to manage reboots for
|
||||
- name: UPDATE_AGENT_NODE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: var-run-dbus
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
- name: etc-coreos
|
||||
hostPath:
|
||||
path: /etc/coreos
|
||||
- name: usr-share-coreos
|
||||
hostPath:
|
||||
path: /usr/share/coreos
|
||||
- name: etc-os-release
|
||||
hostPath:
|
||||
path: /etc/os-release
|
||||
22
examples/addons/cluo/update-operator.yaml
Normal file
22
examples/addons/cluo/update-operator.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: container-linux-update-operator
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: container-linux-update-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: update-operator
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.3.1
|
||||
command:
|
||||
- "/bin/update-operator"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
@@ -1,44 +0,0 @@
|
||||
|
||||
## gRPC API Credentials
|
||||
|
||||
Create FAKE TLS credentials for running the `matchbox` gRPC API examples.
|
||||
|
||||
**DO NOT** use these certificates for anything other than running `matchbox` examples. Use your organization's production PKI for production deployments.
|
||||
|
||||
Navigate to the example directory which will be mounted as `/etc/matchbox` in examples:
|
||||
|
||||
cd matchbox/examples/etc/matchbox
|
||||
|
||||
Set certificate subject alt names which should be used by exporting `SAN`. Use the DNS name or IP at which `matchbox` is hosted.
|
||||
|
||||
# for examples on metal0 or docker0 bridges
|
||||
export SAN=IP.1:127.0.0.1,IP.2:172.18.0.2
|
||||
|
||||
# production example
|
||||
export SAN=DNS.1:matchbox.example.com
|
||||
|
||||
Create a fake `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`. Type 'Y' when prompted.
|
||||
|
||||
$ ./cert-gen
|
||||
Creating FAKE CA, server cert/key, and client cert/key...
|
||||
...
|
||||
...
|
||||
...
|
||||
******************************************************************
|
||||
WARNING: Generated TLS credentials are ONLY SUITABLE FOR EXAMPLES!
|
||||
Use your organization's production PKI for production deployments!
|
||||
|
||||
## Inpsect
|
||||
|
||||
Inspect the generated FAKE certificates if desired.
|
||||
|
||||
openssl x509 -noout -text -in ca.crt
|
||||
openssl x509 -noout -text -in server.crt
|
||||
openssl x509 -noout -text -in client.crt
|
||||
|
||||
## Verify
|
||||
|
||||
Verify that the FAKE server and client certificates were signed by the fake CA.
|
||||
|
||||
openssl verify -CAfile ca.crt server.crt
|
||||
openssl verify -CAfile ca.crt client.crt
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "coreos-install",
|
||||
"name": "CoreOS Install",
|
||||
"name": "CoreOS Container Linux Install",
|
||||
"profile": "install-reboot",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1298.7.0",
|
||||
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
|
||||
"baseurl": "http://matchbox.foo:8080/assets/coreos"
|
||||
"coreos_version": "1576.5.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,11 +8,12 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380",
|
||||
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
|
||||
"etcd_name": "node1",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,10 +8,9 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node2.example.com",
|
||||
"etcd_endpoints": "node1.example.com:2379",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,10 +8,9 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node3.example.com",
|
||||
"etcd_endpoints": "node1.example.com:2379",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380",
|
||||
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
|
||||
"etcd_name": "node1",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node2.example.com",
|
||||
"etcd_endpoints": "node1.example.com:2379",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node3.example.com",
|
||||
"etcd_endpoints": "node1.example.com:2379",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "coreos-install",
|
||||
"name": "CoreOS Install",
|
||||
"name": "CoreOS Container Linux Install",
|
||||
"profile": "install-reboot",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1298.7.0",
|
||||
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
|
||||
"baseurl": "http://matchbox.foo:8080/assets/coreos"
|
||||
"coreos_version": "1576.5.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "GRUB CoreOS alpha",
|
||||
"name": "GRUB CoreOS Container Linux alpha",
|
||||
"profile": "grub"
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "install",
|
||||
"name": "Simple CoreOS Alpha Install",
|
||||
"name": "Simple CoreOS Container Linux Install",
|
||||
"profile": "simple-install",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1298.7.0",
|
||||
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
|
||||
"baseurl": "http://matchbox.foo:8080/assets/coreos"
|
||||
"coreos_version": "1576.5.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"id": "simple",
|
||||
"name": "Simple CoreOS Alpha",
|
||||
"name": "Simple CoreOS Container Linux Alpha",
|
||||
"profile": "simple",
|
||||
"selector": {
|
||||
"os": "installed"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "Simple CoreOS Alpha with RAM disk",
|
||||
"name": "Simple CoreOS Container Linux Alpha with RAM disk",
|
||||
"profile": "simple"
|
||||
}
|
||||
|
||||
@@ -7,22 +7,27 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{.domain_name}}:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
||||
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
|
||||
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
||||
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
||||
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
||||
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
||||
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
||||
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
||||
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
||||
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
||||
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
mask: true
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
@@ -51,37 +56,41 @@ systemd:
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
||||
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log"
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--anonymous-auth=false \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
--anonymous-auth=false \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||
--cluster_domain=cluster.local \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--exit-on-lock-contention \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
--require-kubeconfig
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
@@ -117,8 +126,14 @@ storage:
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
|
||||
KUBELET_IMAGE_URL=docker://gcr.io/google_containers/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.8.5
|
||||
- path: /etc/ssl/etcd/.empty
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
empty
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
@@ -143,7 +158,7 @@ storage:
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
|
||||
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.4.4}"
|
||||
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.9.1}"
|
||||
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
|
||||
@@ -1,25 +1,10 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
ExecStart=
|
||||
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
|
||||
--listen-addr=127.0.0.1:2379 \
|
||||
--endpoints={{.etcd_endpoints}}
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
mask: true
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
@@ -48,36 +33,40 @@ systemd:
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
||||
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log"
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--anonymous-auth=false \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--anonymous-auth=false \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||
--cluster_domain=cluster.local \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--exit-on-lock-contention \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--require-kubeconfig
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
[Install]
|
||||
@@ -106,8 +95,14 @@ storage:
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
|
||||
KUBELET_IMAGE_URL=docker://gcr.io/google_containers/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.8.5
|
||||
- path: /etc/ssl/etcd/.empty
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
empty
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
|
||||
@@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
ExecStart=
|
||||
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
|
||||
--listen-addr=127.0.0.1:2379 \
|
||||
|
||||
@@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
|
||||
|
||||
@@ -20,7 +20,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -ex
|
||||
curl --fail "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
curl --retry 10 --fail "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}}
|
||||
udevadm settle
|
||||
systemctl reboot
|
||||
|
||||
@@ -2,11 +2,12 @@
|
||||
"id": "bootkube-controller",
|
||||
"name": "bootkube Ready Controller",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"root=/dev/sda1",
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
|
||||
@@ -2,11 +2,12 @@
|
||||
"id": "bootkube-worker",
|
||||
"name": "bootkube Ready Worker",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"root=/dev/sda1",
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
|
||||
@@ -2,10 +2,11 @@
|
||||
"id": "etcd3-gateway",
|
||||
"name": "etcd3-gateway",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
|
||||
@@ -2,10 +2,11 @@
|
||||
"id": "etcd3",
|
||||
"name": "etcd3",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
@@ -13,4 +14,4 @@
|
||||
]
|
||||
},
|
||||
"ignition_id": "etcd3.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "grub",
|
||||
"name": "CoreOS via GRUB2",
|
||||
"name": "CoreOS Container Linux via GRUB2",
|
||||
"boot": {
|
||||
"kernel": "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "(http;matchbox.example.com:8080)/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["(http;matchbox.example.com:8080)/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
@@ -13,4 +13,4 @@
|
||||
]
|
||||
},
|
||||
"ignition_id": "ssh.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
{
|
||||
"id": "install-reboot",
|
||||
"name": "Install CoreOS and Reboot",
|
||||
"name": "Install CoreOS Container Linux and Reboot",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
@@ -13,4 +14,4 @@
|
||||
]
|
||||
},
|
||||
"ignition_id": "install-reboot.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
{
|
||||
"id": "simple-install",
|
||||
"name": "Simple CoreOS Alpha Install",
|
||||
"name": "Simple CoreOS Container Linux Alpha Install",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
@@ -13,4 +14,4 @@
|
||||
]
|
||||
},
|
||||
"ignition_id": "install-reboot.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
{
|
||||
"id": "simple",
|
||||
"name": "Simple CoreOS Alpha",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "ssh.yaml"
|
||||
}
|
||||
"id": "simple",
|
||||
"name": "Simple CoreOS Container Linux Alpha",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": [
|
||||
"/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"
|
||||
],
|
||||
"args": [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "ssh.yaml"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Self-hosted Kubernetes
|
||||
# Kubernetes
|
||||
|
||||
The self-hosted Kubernetes example shows how to use matchbox to network boot and provision a 3 node "self-hosted" Kubernetes v1.6.4 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting.
|
||||
The Kubernetes example shows how to use Matchbox to network boot and provision a 3 node Kubernetes v1.10.3 cluster. This example uses [Terraform](https://www.terraform.io/intro/index.html) and a module provided by [Typhoon](https://github.com/poseidon/typhoon) to describe cluster resources. [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) is run once to bootstrap the Kubernetes control plane.
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -9,7 +9,7 @@ Follow the getting started [tutorial](../../../Documentation/getting-started.md)
|
||||
* Matchbox v0.6+ [installation](../../../Documentation/deployment.md) with gRPC API enabled
|
||||
* Matchbox provider credentials `client.crt`, `client.key`, and `ca.crt`
|
||||
* PXE [network boot](../../../Documentation/network-setup.md) environment
|
||||
* Terraform v0.9+ and [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) installed locally on your system
|
||||
* Terraform v0.10.x or v0.11.x and [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) installed locally on your system
|
||||
* Machines with known DNS names and MAC addresses
|
||||
|
||||
If you prefer to provision QEMU/KVM VMs on your local Linux machine, set up the matchbox [development environment](../../../Documentation/getting-started-rkt.md).
|
||||
@@ -32,16 +32,16 @@ Copy the `terraform.tfvars.example` file to `terraform.tfvars`. Ensure `provider
|
||||
```hcl
|
||||
matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
ssh_authorized_key = "ADD ME"
|
||||
|
||||
cluster_name = "demo"
|
||||
container_linux_version = "1298.7.0"
|
||||
container_linux_channel = "stable"
|
||||
ssh_authorized_key = "ADD ME"
|
||||
os_channel = "coreos-stable"
|
||||
os_version = "1576.5.0"
|
||||
```
|
||||
|
||||
Provide an ordered list of controller names, MAC addresses, and domain names. Provide an ordered list of worker names, MAC addresses, and domain names.
|
||||
|
||||
```
|
||||
```hcl
|
||||
controller_names = ["node1"]
|
||||
controller_macs = ["52:54:00:a1:9c:ae"]
|
||||
controller_domains = ["node1.example.com"]
|
||||
@@ -50,48 +50,80 @@ worker_macs = ["52:54:00:b2:2f:86", "52:54:00:c3:61:77"]
|
||||
worker_domains = ["node2.example.com", "node3.example.com"]
|
||||
```
|
||||
|
||||
Finally, provide an `assets_dir` for generated manifests and a DNS name which you've setup to resolves to controller(s) (e.g. round-robin). Worker nodes and your kubeconfig will communicate via this endpoint.
|
||||
Provide an `assets_dir` for generated manifests and a DNS name which you've setup to resolves to controller(s) (e.g. round-robin). Worker nodes and your kubeconfig will communicate via this endpoint.
|
||||
|
||||
```
|
||||
```hcl
|
||||
k8s_domain_name = "cluster.example.com"
|
||||
asset_dir = "assets"
|
||||
```
|
||||
|
||||
You may set `experimental_self_hosted_etcd = "true"` to deploy "self-hosted" etcd atop Kubernetes instead of running etcd on hosts directly. Warning, this is experimental and potentially dangerous.
|
||||
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
|
||||
|
||||
## Apply
|
||||
### Optional
|
||||
|
||||
Fetch the [bootkube](../README.md#modules) Terraform [module](https://www.terraform.io/docs/modules/index.html) for bare-metal, which is maintained in the in the matchbox repo.
|
||||
You may set certain optional variables to override defaults. Set `networking` to either "flannel" or "calico" to set the networking provider. [Check upstream](https://typhoon.psdn.io/bare-metal/) for the full list of options.
|
||||
|
||||
```sh
|
||||
$ terraform get
|
||||
```hcl
|
||||
# Optional (defaults)
|
||||
# cached_install = "false"
|
||||
# install_disk = "/dev/sda"
|
||||
# container_linux_oem = ""
|
||||
# networking = "flannel"
|
||||
```
|
||||
|
||||
Plan and apply to create the resources on Matchbox.
|
||||
The default is to create a Kubernetes cluster with 1 controller and 2 workers as an example, but check `multi-controller.tfvars.example` for an example which defines 3 controllers and 1 worker.
|
||||
|
||||
## Terraform
|
||||
|
||||
Initialize Terraform from the `bootkube-install` directory.
|
||||
|
||||
```sh
|
||||
terraform init
|
||||
```
|
||||
|
||||
Get or update Terraform modules.
|
||||
|
||||
```sh
|
||||
$ terraform get # downloads missing modules
|
||||
$ terraform get --update # updates all modules
|
||||
Get: git::https://github.com/poseidon/typhoon (update)
|
||||
Get: git::https://github.com/poseidon/bootkube-terraform.git?ref=v0.11.0 (update)
|
||||
```
|
||||
|
||||
Plan the resources to be created.
|
||||
|
||||
```sh
|
||||
$ terraform plan
|
||||
Plan: 37 to add, 0 to change, 0 to destroy.
|
||||
Plan: 55 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
Terraform will configure matchbox with profiles (e.g. `cached-container-linux-install`, `bootkube-controller`, `bootkube-worker`) and add groups to match machines by MAC address to a profile. These resources declare that each machine should PXE boot and install Container Linux to disk. `node1` will provision itself as a controller, while `node2` and `noe3` provision themselves as workers.
|
||||
Terraform will configure matchbox with profiles (e.g. `cached-container-linux-install`, `bootkube-controller`, `bootkube-worker`) and add groups to match machines by MAC address to a profile. These resources declare that each machine should PXE boot and install Container Linux to disk. `node1` will provision itself as a controller, while `node2` and `node3` provision themselves as workers.
|
||||
|
||||
The module referenced in `cluster.tf` will also generate bootkube assets to `assets_dir` (exactly like the [bootkube](https://github.com/kubernetes-incubator/bootkube) binary would). These assets include Kubernetes bootstrapping and control plane manifests as well as a kubeconfig you can use to access the cluster.
|
||||
|
||||
### ssh-agent
|
||||
|
||||
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`, otherwise `terraform apply` will hang.
|
||||
|
||||
```sh
|
||||
$ terraform apply
|
||||
module.cluster.null_resource.copy-kubeconfig.0: Still creating... (5m0s elapsed)
|
||||
module.cluster.null_resource.copy-kubeconfig.1: Still creating... (5m0s elapsed)
|
||||
module.cluster.null_resource.copy-kubeconfig.2: Still creating... (5m0s elapsed)
|
||||
ssh-add ~/.ssh/id_rsa
|
||||
ssh-add -L
|
||||
```
|
||||
|
||||
### Apply
|
||||
|
||||
Apply the changes.
|
||||
|
||||
```sh
|
||||
module.cluster.null_resource.copy-secrets.0: Still creating... (5m0s elapsed)
|
||||
module.cluster.null_resource.copy-secrets.1: Still creating... (5m0s elapsed)
|
||||
module.cluster.null_resource.copy-secrets.2: Still creating... (5m0s elapsed)
|
||||
...
|
||||
module.cluster.null_resource.bootkube-start: Still creating... (8m40s elapsed)
|
||||
...
|
||||
Apply complete! Resources: 37 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
|
||||
You can now move on to the "Machines" section. Apply will loop until it can successfully copy the kubeconfig to each node and start the one-time Kubernetes bootstrapping process on a controller. In practice, you may see `apply` fail if it connects before the disk install has completed. Run terraform apply until it reconciles successfully.
|
||||
|
||||
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
|
||||
Apply will then loop until it can successfully copy credentials to each machine and start the one-time Kubernetes bootstrap service. Proceed to the next step while this loops.
|
||||
|
||||
## Machines
|
||||
|
||||
@@ -114,32 +146,34 @@ $ sudo ./scripts/libvirt [start|reboot|shutdown|poweroff|destroy]
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the apiserver, scheduler, and controller-manager are running as pods.
|
||||
|
||||
```sh
|
||||
$ KUBECONFIG=assets/auth/kubeconfig
|
||||
$ export KUBECONFIG=assets/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
node1.example.com Ready 3m
|
||||
node2.example.com Ready 3m
|
||||
node3.example.com Ready 3m
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 11m v1.10.0
|
||||
node2.example.com Ready 11m v1.10.0
|
||||
node3.example.com Ready 11m v1.10.0
|
||||
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system checkpoint-installer-p8g8r 1/1 Running 1 13m
|
||||
kube-system kube-apiserver-s5gnx 1/1 Running 1 41s
|
||||
kube-system kube-controller-manager-3438979800-jrlnd 1/1 Running 1 13m
|
||||
kube-system kube-controller-manager-3438979800-tkjx7 1/1 Running 1 13m
|
||||
kube-system kube-dns-4101612645-xt55f 4/4 Running 4 13m
|
||||
kube-system kube-flannel-pl5c2 2/2 Running 0 13m
|
||||
kube-system kube-flannel-r9t5r 2/2 Running 3 13m
|
||||
kube-system kube-flannel-vfb0s 2/2 Running 4 13m
|
||||
kube-system kube-proxy-cvhmj 1/1 Running 0 13m
|
||||
kube-system kube-proxy-hf9mh 1/1 Running 1 13m
|
||||
kube-system kube-proxy-kpl73 1/1 Running 1 13m
|
||||
kube-system kube-scheduler-694795526-1l23b 1/1 Running 1 13m
|
||||
kube-system kube-scheduler-694795526-fks0b 1/1 Running 1 13m
|
||||
kube-system pod-checkpointer-node1.example.com 1/1 Running 2 10m
|
||||
kube-system kube-flannel-fqp7f 2/2 Running 1 11m
|
||||
kube-system kube-flannel-gnjrm 2/2 Running 0 11m
|
||||
kube-system kube-flannel-llbgt 2/2 Running 0 11m
|
||||
kube-system kube-apiserver-7336w 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-3271970485-b9chx 1/1 Running 0 11m
|
||||
kube-system kube-controller-manager-3271970485-v30js 1/1 Running 1 11m
|
||||
kube-system kube-dns-1187388186-mx9rt 3/3 Running 0 11m
|
||||
kube-system kube-proxy-50sd4 1/1 Running 0 11m
|
||||
kube-system kube-proxy-bczhp 1/1 Running 0 11m
|
||||
kube-system kube-proxy-mp2fw 1/1 Running 0 11m
|
||||
kube-system kube-scheduler-3895335239-fd3l7 1/1 Running 1 11m
|
||||
kube-system kube-scheduler-3895335239-hfjv0 1/1 Running 0 11m
|
||||
kube-system pod-checkpointer-wf65d 1/1 Running 0 11m
|
||||
kube-system pod-checkpointer-wf65d-node1.example.com 1/1 Running 0 11m
|
||||
```
|
||||
|
||||
Try restarting machines or deleting pods to see that the cluster is resilient to failures.
|
||||
## Addons
|
||||
|
||||
Install **important** cluster [addons](../../../Documentation/cluster-addons.md).
|
||||
|
||||
## Going Further
|
||||
|
||||
|
||||
@@ -1,15 +1,26 @@
|
||||
// Self-hosted Kubernetes cluster
|
||||
// Kubernetes cluster
|
||||
module "cluster" {
|
||||
source = "../modules/bootkube"
|
||||
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.10.3"
|
||||
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
providers = {
|
||||
local = "local.default"
|
||||
null = "null.default"
|
||||
template = "template.default"
|
||||
tls = "tls.default"
|
||||
}
|
||||
|
||||
# bare-metal
|
||||
cluster_name = "${var.cluster_name}"
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
os_channel = "${var.os_channel}"
|
||||
os_version = "${var.os_version}"
|
||||
|
||||
# Machines
|
||||
# configuration
|
||||
k8s_domain_name = "${var.k8s_domain_name}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
asset_dir = "${var.asset_dir}"
|
||||
|
||||
# machines
|
||||
controller_names = "${var.controller_names}"
|
||||
controller_macs = "${var.controller_macs}"
|
||||
controller_domains = "${var.controller_domains}"
|
||||
@@ -17,11 +28,10 @@ module "cluster" {
|
||||
worker_macs = "${var.worker_macs}"
|
||||
worker_domains = "${var.worker_domains}"
|
||||
|
||||
# bootkube assets
|
||||
k8s_domain_name = "${var.k8s_domain_name}"
|
||||
asset_dir = "${var.asset_dir}"
|
||||
|
||||
# Optional
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
experimental_self_hosted_etcd = "${var.experimental_self_hosted_etcd}"
|
||||
# optional
|
||||
networking = "${var.networking}"
|
||||
cached_install = "${var.cached_install}"
|
||||
install_disk = "${var.install_disk}"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
kernel_args = "${var.kernel_args}"
|
||||
}
|
||||
|
||||
@@ -0,0 +1,22 @@
|
||||
matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
# ssh_authorized_key = "ADD ME"
|
||||
|
||||
cluster_name = "example"
|
||||
os_channel = "coreos-stable"
|
||||
os_version = "1576.5.0"
|
||||
|
||||
# Machines
|
||||
controller_names = ["node1", "node2", "node3"]
|
||||
controller_macs = ["52:54:00:a1:9c:ae", "52:54:00:b2:2f:86", "52:54:00:c3:61:77"]
|
||||
controller_domains = ["node1.example.com", "node2.example.com", "node3.example.com"]
|
||||
worker_names = ["node4"]
|
||||
worker_macs = ["52:54:00:d7:99:c7"]
|
||||
worker_domains = ["node4.example.com"]
|
||||
|
||||
# Bootkube
|
||||
k8s_domain_name = "cluster.example.com"
|
||||
asset_dir = "assets"
|
||||
|
||||
# Optional
|
||||
# container_linux_oem = ""
|
||||
@@ -5,3 +5,23 @@ provider "matchbox" {
|
||||
client_key = "${file("~/.matchbox/client.key")}"
|
||||
ca = "${file("~/.matchbox/ca.crt")}"
|
||||
}
|
||||
|
||||
provider "local" {
|
||||
version = "~> 1.0"
|
||||
alias = "default"
|
||||
}
|
||||
|
||||
provider "null" {
|
||||
version = "~> 1.0"
|
||||
alias = "default"
|
||||
}
|
||||
|
||||
provider "template" {
|
||||
version = "~> 1.0"
|
||||
alias = "default"
|
||||
}
|
||||
|
||||
provider "tls" {
|
||||
version = "~> 1.0"
|
||||
alias = "default"
|
||||
}
|
||||
|
||||
@@ -3,8 +3,8 @@ matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
# ssh_authorized_key = "ADD ME"
|
||||
|
||||
cluster_name = "example"
|
||||
container_linux_version = "1298.7.0"
|
||||
container_linux_channel = "stable"
|
||||
os_channel = "coreos-stable"
|
||||
os_version = "1576.5.0"
|
||||
|
||||
# Machines
|
||||
controller_names = ["node1"]
|
||||
@@ -18,6 +18,8 @@ worker_domains = ["node2.example.com", "node3.example.com"]
|
||||
k8s_domain_name = "cluster.example.com"
|
||||
asset_dir = "assets"
|
||||
|
||||
# Optional
|
||||
# Optional (defaults)
|
||||
cached_install = "true"
|
||||
# install_disk = "/dev/sda"
|
||||
# container_linux_oem = ""
|
||||
# experimental_self_hosted_etcd = "true"
|
||||
# networking = "flannel"
|
||||
|
||||
@@ -8,14 +8,14 @@ variable "matchbox_rpc_endpoint" {
|
||||
description = "Matchbox gRPC API endpoint, without the protocol (e.g. matchbox.example.com:8081)"
|
||||
}
|
||||
|
||||
variable "container_linux_channel" {
|
||||
variable "os_channel" {
|
||||
type = "string"
|
||||
description = "Container Linux channel corresponding to the container_linux_version"
|
||||
description = "Channel for a Container Linux derivative"
|
||||
}
|
||||
|
||||
variable "container_linux_version" {
|
||||
variable "os_version" {
|
||||
type = "string"
|
||||
description = "Container Linux version of the kernel/initrd to PXE or the image to install"
|
||||
description = "Version for a Container Linux to PXE and install"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
@@ -62,6 +62,12 @@ variable "k8s_domain_name" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "networking" {
|
||||
description = "Choice of networking provider (flannel or calico)"
|
||||
type = "string"
|
||||
default = "flannel"
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = "string"
|
||||
@@ -78,8 +84,23 @@ variable "service_cidr" {
|
||||
CIDR IP range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
|
||||
EOD
|
||||
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
# optional
|
||||
|
||||
variable "cached_install" {
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
default = "false"
|
||||
description = "Whether Container Linux should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
|
||||
}
|
||||
|
||||
variable "install_disk" {
|
||||
type = "string"
|
||||
default = "/dev/sda"
|
||||
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
||||
}
|
||||
|
||||
variable "container_linux_oem" {
|
||||
@@ -88,7 +109,8 @@ variable "container_linux_oem" {
|
||||
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||
}
|
||||
|
||||
variable "experimental_self_hosted_etcd" {
|
||||
default = "false"
|
||||
description = "Create self-hosted etcd cluster as pods on Kubernetes, instead of on-hosts"
|
||||
variable "kernel_args" {
|
||||
description = "Additional kernel arguments to provide at PXE boot."
|
||||
type = "list"
|
||||
default = []
|
||||
}
|
||||
|
||||
@@ -37,6 +37,19 @@ ssh_authorized_key = "ADD ME"
|
||||
|
||||
Configs in `etcd3-install` configure the matchbox provider, define profiles (e.g. `cached-container-linux-install`, `etcd3`), and define 3 groups which match machines by MAC address to a profile. These resources declare that the machines should PXE boot, install Container Linux to disk, and provision themselves into peers in a 3-node etcd3 cluster.
|
||||
|
||||
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
|
||||
|
||||
### Optional
|
||||
|
||||
You may set certain optional variables to override defaults.
|
||||
|
||||
```hcl
|
||||
# install_disk = "/dev/sda"
|
||||
# container_linux_oem = ""
|
||||
```
|
||||
|
||||
## Apply
|
||||
|
||||
Fetch the [profiles](../README.md#modules) Terraform [module](https://www.terraform.io/docs/modules/index.html) which let's you use common machine profiles maintained in the matchbox repo (like `etcd3`).
|
||||
|
||||
```sh
|
||||
@@ -52,8 +65,6 @@ $ terraform apply
|
||||
Apply complete! Resources: 10 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
|
||||
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
|
||||
|
||||
## Machines
|
||||
|
||||
Power on each machine (with PXE boot device on next boot). Machines should network boot, install Container Linux to disk, reboot, and provision themselves as a 3-node etcd3 cluster.
|
||||
@@ -82,7 +93,6 @@ $ systemctl status etcd-member
|
||||
Verify that etcd3 peers are healthy and communicating.
|
||||
|
||||
```sh
|
||||
$ ETCDCTL_API=3
|
||||
$ etcdctl cluster-health
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
module "profiles" {
|
||||
source = "../modules/profiles"
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
container_linux_version = "1298.7.0"
|
||||
container_linux_version = "1576.5.0"
|
||||
container_linux_channel = "stable"
|
||||
install_disk = "${var.install_disk}"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
}
|
||||
|
||||
// Install Container Linux to disk before provisioning
|
||||
@@ -12,13 +14,9 @@ resource "matchbox_group" "default" {
|
||||
profile = "${module.profiles.cached-container-linux-install}"
|
||||
|
||||
// No selector, matches all nodes
|
||||
|
||||
metadata {
|
||||
container_linux_channel = "stable"
|
||||
container_linux_version = "1298.7.0"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
ignition_endpoint = "${var.matchbox_http_endpoint}/ignition"
|
||||
baseurl = "${var.matchbox_http_endpoint}/assets/coreos"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,5 +2,6 @@ matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
# ssh_authorized_key = "ADD ME"
|
||||
|
||||
# Optional
|
||||
# Optional (defaults)
|
||||
# install_disk = "/dev/sda"
|
||||
# container_linux_oem = ""
|
||||
|
||||
@@ -13,8 +13,16 @@ variable "ssh_authorized_key" {
|
||||
description = "SSH public key to set as an authorized_key on machines"
|
||||
}
|
||||
|
||||
# optional
|
||||
|
||||
variable "install_disk" {
|
||||
type = "string"
|
||||
default = "/dev/sda"
|
||||
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
||||
}
|
||||
|
||||
variable "container_linux_oem" {
|
||||
type = "string"
|
||||
default = ""
|
||||
type = "string"
|
||||
default = ""
|
||||
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ Matchbox provides Terraform [modules](https://www.terraform.io/docs/modules/usag
|
||||
|
||||
```hcl
|
||||
module "profiles" {
|
||||
source = "git::https://github.com/coreos/matchbox.git//examples/terraform/modules/profiles?ref=4451425db8f230012c36de6e6628c72aa34e1c10"
|
||||
source = "git::https://github.com/coreos/matchbox.git//examples/terraform/modules/profiles?ref=08f4e9908b167fba608e60169ec6a803df9db37f"
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
@@ -27,9 +27,6 @@ Available modules:
|
||||
| | cached-container-linux-install | Install Container Linux to disk from matchbox assets cache |
|
||||
| | etcd3 | Provision an etcd3 peer node |
|
||||
| | etcd3-gateway | Provision an etcd3 gateway node |
|
||||
| | bootkube-controller | Provision a self-hosted Kubernetes controller/master node |
|
||||
| | bootkube-worker | Provisioner a self-hosted Kubernetes worker node |
|
||||
| bootkube | | Creates a multi-controller, multi-worker self-hosted Kubernetes cluster |
|
||||
|
||||
## Customization
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/dghubble/bootkube-terraform.git?ref=3720aff28a465987e079dcd74fe3b6d5046d7010"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${var.k8s_domain_name}"]
|
||||
etcd_servers = ["http://127.0.0.1:2379"]
|
||||
asset_dir = "${var.asset_dir}"
|
||||
pod_cidr = "${var.pod_cidr}"
|
||||
service_cidr = "${var.service_cidr}"
|
||||
experimental_self_hosted_etcd = "${var.experimental_self_hosted_etcd}"
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
// Install Container Linux to disk
|
||||
resource "matchbox_group" "container-linux-install" {
|
||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||
|
||||
name = "${format("container-linux-install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
|
||||
profile = "${module.profiles.cached-container-linux-install}"
|
||||
|
||||
selector {
|
||||
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
|
||||
}
|
||||
|
||||
metadata {
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
ignition_endpoint = "${var.matchbox_http_endpoint}/ignition"
|
||||
baseurl = "${var.matchbox_http_endpoint}/assets/coreos"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "controller" {
|
||||
count = "${length(var.controller_names)}"
|
||||
name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
||||
profile = "${module.profiles.bootkube-controller}"
|
||||
|
||||
selector {
|
||||
mac = "${element(var.controller_macs, count.index)}"
|
||||
os = "installed"
|
||||
}
|
||||
|
||||
metadata {
|
||||
domain_name = "${element(var.controller_domains, count.index)}"
|
||||
etcd_name = "${element(var.controller_names, count.index)}"
|
||||
etcd_initial_cluster = "${join(",", formatlist("%s=http://%s:2380", var.controller_names, var.controller_domains))}"
|
||||
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
||||
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||
k8s_etcd_service_ip = "${module.bootkube.etcd_service_ip}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "worker" {
|
||||
count = "${length(var.worker_names)}"
|
||||
name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
||||
profile = "${module.profiles.bootkube-worker}"
|
||||
|
||||
selector {
|
||||
mac = "${element(var.worker_macs, count.index)}"
|
||||
os = "installed"
|
||||
}
|
||||
|
||||
metadata {
|
||||
domain_name = "${element(var.worker_domains, count.index)}"
|
||||
etcd_endpoints = "${join(",", formatlist("%s:2379", var.controller_domains))}"
|
||||
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
||||
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||
k8s_etcd_service_ip = "${module.bootkube.etcd_service_ip}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
// Create common profiles
|
||||
module "profiles" {
|
||||
source = "../profiles"
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
# Secure copy kubeconfig to all nodes to activate kubelet.service
|
||||
resource "null_resource" "copy-kubeconfig" {
|
||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(concat(var.controller_domains, var.worker_domains), count.index)}"
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.kubeconfig}"
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||
# Terraform only does one task at a time, so it would try to bootstrap
|
||||
# Kubernetes and Tectonic while no Kubelets are running. Ensure all nodes
|
||||
# receive a kubeconfig before proceeding with bootkube and tectonic.
|
||||
depends_on = ["null_resource.copy-kubeconfig"]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(var.controller_domains, 0)}"
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = "${var.asset_dir}"
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv /home/core/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
variable "matchbox_http_endpoint" {
|
||||
type = "string"
|
||||
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
||||
}
|
||||
|
||||
variable "container_linux_channel" {
|
||||
type = "string"
|
||||
description = "Container Linux channel corresponding to the container_linux_version"
|
||||
}
|
||||
|
||||
variable "container_linux_version" {
|
||||
type = "string"
|
||||
description = "Container Linux version of the kernel/initrd to PXE or the image to install"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = "string"
|
||||
description = "Cluster name"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
description = "SSH public key to set as an authorized_key on machines"
|
||||
}
|
||||
|
||||
# Machines
|
||||
# Terraform's crude "type system" does properly support lists of maps so we do this.
|
||||
|
||||
variable "controller_names" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "controller_macs" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "controller_domains" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_names" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_macs" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_domains" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
# bootkube assets
|
||||
|
||||
variable "k8s_domain_name" {
|
||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IP range to assign Kubernetes pods"
|
||||
type = "string"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
description = <<EOD
|
||||
CIDR IP range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
|
||||
EOD
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "container_linux_oem" {
|
||||
type = "string"
|
||||
default = ""
|
||||
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||
}
|
||||
|
||||
variable "experimental_self_hosted_etcd" {
|
||||
default = "false"
|
||||
description = "Create self-hosted etcd cluster as pods on Kubernetes, instead of on-hosts"
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
{{ if eq .etcd_on_host "true" }}
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
|
||||
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
|
||||
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||
{{ end }}
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
{{ if eq .etcd_on_host "false" -}}
|
||||
Environment="LOCKSMITHD_ENDPOINT=http://{{.k8s_etcd_service_ip}}:2379"
|
||||
{{ end }}
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log"
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--anonymous-auth=false \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes control plane with a temp api-server
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
storage:
|
||||
{{ if index . "pxe" }}
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: root
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
{{.domain_name}}
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
# Move experimental manifests
|
||||
[ -d /opt/bootkube/assets/experimental/manifests ] && mv /opt/bootkube/assets/experimental/manifests/* /opt/bootkube/assets/manifests && rm -r /opt/bootkube/assets/experimental/manifests
|
||||
[ -d /opt/bootkube/assets/experimental/bootstrap-manifests ] && mv /opt/bootkube/assets/experimental/bootstrap-manifests/* /opt/bootkube/assets/bootstrap-manifests && rm -r /opt/bootkube/assets/experimental/bootstrap-manifests
|
||||
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
|
||||
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.4.4}"
|
||||
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$RKT_OPTS \
|
||||
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- {{.ssh_authorized_key}}
|
||||
@@ -1,131 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
{{ if eq .etcd_on_host "true" }}
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
ExecStart=
|
||||
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
|
||||
--listen-addr=127.0.0.1:2379 \
|
||||
--endpoints={{.etcd_endpoints}}
|
||||
{{ end }}
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
{{ if eq .etcd_on_host "false" -}}
|
||||
Environment="LOCKSMITHD_ENDPOINT=http://{{.k8s_etcd_service_ip}}:2379"
|
||||
{{ end }}
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log"
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--anonymous-auth=false \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
storage:
|
||||
{{ if index . "pxe" }}
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: root
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
{{.domain_name}}
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- {{.ssh_authorized_key}}
|
||||
@@ -20,8 +20,14 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -ex
|
||||
curl "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
coreos-install -d /dev/sda -C {{.container_linux_channel}} -V {{.container_linux_version}} -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}} {{if index . "container_linux_oem"}}-o {{.container_linux_oem}}{{end}}
|
||||
curl --retry 10 "${ignition_endpoint}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
coreos-install \
|
||||
-d ${install_disk} \
|
||||
-C ${container_linux_channel} \
|
||||
-V ${container_linux_version} \
|
||||
-o "${container_linux_oem}" \
|
||||
${baseurl_flag} \
|
||||
-i ignition.json
|
||||
udevadm settle
|
||||
systemctl reboot
|
||||
passwd:
|
||||
|
||||
@@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
ExecStart=
|
||||
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
|
||||
--listen-addr=127.0.0.1:2379 \
|
||||
|
||||
@@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
|
||||
|
||||
@@ -13,11 +13,3 @@ output "etcd3" {
|
||||
output "etcd3-gateway" {
|
||||
value = "${matchbox_profile.etcd3-gateway.name}"
|
||||
}
|
||||
|
||||
output "bootkube-controller" {
|
||||
value = "${matchbox_profile.bootkube-controller.name}"
|
||||
}
|
||||
|
||||
output "bootkube-worker" {
|
||||
value = "${matchbox_profile.bootkube-worker.name}"
|
||||
}
|
||||
|
||||
@@ -8,13 +8,29 @@ resource "matchbox_profile" "container-linux-install" {
|
||||
]
|
||||
|
||||
args = [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
]
|
||||
|
||||
container_linux_config = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
|
||||
container_linux_config = "${data.template_file.container-linux-install-config.rendered}"
|
||||
}
|
||||
|
||||
data "template_file" "container-linux-install-config" {
|
||||
template = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
|
||||
|
||||
vars {
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
ignition_endpoint = "${format("%s/ignition", var.matchbox_http_endpoint)}"
|
||||
install_disk = "${var.install_disk}"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
|
||||
# only cached-container-linux profile adds -b baseurl
|
||||
baseurl_flag = ""
|
||||
}
|
||||
}
|
||||
|
||||
// Container Linux Install profile (from matchbox /assets cache)
|
||||
@@ -28,13 +44,29 @@ resource "matchbox_profile" "cached-container-linux-install" {
|
||||
]
|
||||
|
||||
args = [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
]
|
||||
|
||||
container_linux_config = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
|
||||
container_linux_config = "${data.template_file.cached-container-linux-install-config.rendered}"
|
||||
}
|
||||
|
||||
data "template_file" "cached-container-linux-install-config" {
|
||||
template = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
|
||||
|
||||
vars {
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
ignition_endpoint = "${format("%s/ignition", var.matchbox_http_endpoint)}"
|
||||
install_disk = "${var.install_disk}"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
|
||||
# profile uses -b baseurl to install from matchbox cache
|
||||
baseurl_flag = "-b ${var.matchbox_http_endpoint}/assets/coreos"
|
||||
}
|
||||
}
|
||||
|
||||
// etcd3 profile
|
||||
@@ -48,15 +80,3 @@ resource "matchbox_profile" "etcd3-gateway" {
|
||||
name = "etcd3-gateway"
|
||||
container_linux_config = "${file("${path.module}/cl/etcd3-gateway.yaml.tmpl")}"
|
||||
}
|
||||
|
||||
// Self-hosted Kubernetes (bootkube) Controller profile
|
||||
resource "matchbox_profile" "bootkube-controller" {
|
||||
name = "bootkube-controller"
|
||||
container_linux_config = "${file("${path.module}/cl/bootkube-controller.yaml.tmpl")}"
|
||||
}
|
||||
|
||||
// Self-hosted Kubernetes (bootkube) Worker profile
|
||||
resource "matchbox_profile" "bootkube-worker" {
|
||||
name = "bootkube-worker"
|
||||
container_linux_config = "${file("${path.module}/cl/bootkube-worker.yaml.tmpl")}"
|
||||
}
|
||||
|
||||
@@ -12,3 +12,17 @@ variable "container_linux_channel" {
|
||||
type = "string"
|
||||
description = "Container Linux channel corresponding to the container_linux_version"
|
||||
}
|
||||
|
||||
# optional
|
||||
|
||||
variable "install_disk" {
|
||||
type = "string"
|
||||
default = "/dev/sda"
|
||||
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
||||
}
|
||||
|
||||
variable "container_linux_oem" {
|
||||
type = "string"
|
||||
default = ""
|
||||
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -ex
|
||||
curl "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
curl --retry 10 "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
coreos-install -d /dev/sda -C stable -V current -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}}
|
||||
udevadm settle
|
||||
systemctl reboot
|
||||
|
||||
@@ -10,7 +10,7 @@ resource "matchbox_group" "default" {
|
||||
}
|
||||
}
|
||||
|
||||
// Match machines which have CoreOS installed
|
||||
// Match machines which have CoreOS Container Linux installed
|
||||
resource "matchbox_group" "node1" {
|
||||
name = "node1"
|
||||
profile = "${matchbox_profile.simple.name}"
|
||||
|
||||
@@ -8,6 +8,7 @@ resource "matchbox_profile" "coreos-install" {
|
||||
]
|
||||
|
||||
args = [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
|
||||
20
glide.lock
generated
20
glide.lock
generated
@@ -1,20 +1,23 @@
|
||||
hash: 205de0b66ed059a1f10d3fb36c7d465439818123940a9aaa68ddc71cc3bbfddd
|
||||
updated: 2017-04-17T17:09:48.864562358-07:00
|
||||
hash: b404b094b7ff5d83fac658393148a51f2b3f74ce1026502524be71772c30e9b2
|
||||
updated: 2017-11-06T13:24:02.819805752-08:00
|
||||
imports:
|
||||
- name: github.com/ajeddeloh/go-json
|
||||
version: 73d058cf8437a1989030afe571eeab9f90eebbbd
|
||||
- name: github.com/ajeddeloh/yaml
|
||||
version: 1072abfea31191db507785e2e0c1b8d1440d35a5
|
||||
version: 6b94386aeefd8c4b8470aee72bfca084c2f91da9
|
||||
- name: github.com/alecthomas/units
|
||||
version: 6b4e7dc5e3143b85ea77909c72caf89416fc2915
|
||||
- name: github.com/camlistore/camlistore
|
||||
version: 9106ce829629773474c689b34aacd7d3aaa99426
|
||||
- name: github.com/coreos/container-linux-config-transpiler
|
||||
version: 12554ca0a5ce8ea4a6c594242ccb23d8b9bff493
|
||||
version: be4cb16b0aaf0f6b4fdf63b8b2a081397276bf0f
|
||||
subpackages:
|
||||
- config
|
||||
- config/astyaml
|
||||
- config/platform
|
||||
- config/templating
|
||||
- config/types
|
||||
- config/types/util
|
||||
- name: github.com/coreos/coreos-cloudinit
|
||||
version: 5be99bf577f2768193c7fb587ef5a8806c1503cf
|
||||
subpackages:
|
||||
@@ -29,7 +32,7 @@ imports:
|
||||
- journal
|
||||
- unit
|
||||
- name: github.com/coreos/ignition
|
||||
version: d75d0aa3bf307f0954ce4ea8cac56dacec8d16ce
|
||||
version: 01c039a5ce59acd39e5741713e59abfcb74d0782
|
||||
subpackages:
|
||||
- config
|
||||
- config/types
|
||||
@@ -37,8 +40,11 @@ imports:
|
||||
- config/v1/types
|
||||
- config/v2_0
|
||||
- config/v2_0/types
|
||||
- config/v2_1
|
||||
- config/v2_1/types
|
||||
- config/validate
|
||||
- config/validate/astjson
|
||||
- config/validate/astnode
|
||||
- config/validate/report
|
||||
- name: github.com/coreos/pkg
|
||||
version: 66fe44ad037ccb80329115cb4db0dbe8e9beb03a
|
||||
@@ -80,7 +86,7 @@ imports:
|
||||
subpackages:
|
||||
- errorutil
|
||||
- name: golang.org/x/crypto
|
||||
version: 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||
version: 7e9105388ebff089b3f99f0ef676ea55a6da3a7e
|
||||
subpackages:
|
||||
- cast5
|
||||
- openpgp
|
||||
@@ -98,7 +104,7 @@ imports:
|
||||
- internal/timeseries
|
||||
- trace
|
||||
- name: golang.org/x/sys
|
||||
version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03
|
||||
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
|
||||
subpackages:
|
||||
- unix
|
||||
- name: google.golang.org/grpc
|
||||
|
||||
@@ -19,13 +19,13 @@ import:
|
||||
- transport
|
||||
# Container Linux Config Transpiler and Ignition
|
||||
- package: github.com/coreos/container-linux-config-transpiler
|
||||
version: v0.2.2
|
||||
version: v0.5.0
|
||||
subpackages:
|
||||
- config
|
||||
- config/types
|
||||
- config/templating
|
||||
- package: github.com/coreos/ignition
|
||||
version: d75d0aa3bf307f0954ce4ea8cac56dacec8d16ce
|
||||
version: v0.19.0
|
||||
subpackages:
|
||||
- config
|
||||
- config/types
|
||||
@@ -35,7 +35,7 @@ import:
|
||||
- config/validate/astjson
|
||||
- config/validate/report
|
||||
- package: github.com/ajeddeloh/yaml
|
||||
version: 1072abfea31191db507785e2e0c1b8d1440d35a5
|
||||
version: 6b94386aeefd8c4b8470aee72bfca084c2f91da9
|
||||
- package: github.com/vincent-petithory/dataurl
|
||||
version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
|
||||
- package: github.com/alecthomas/units
|
||||
@@ -59,7 +59,7 @@ import:
|
||||
- package: github.com/spf13/cobra
|
||||
version: 65a708cee0a4424f4e353d031ce440643e312f92
|
||||
- package: golang.org/x/crypto
|
||||
version: 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
|
||||
version: 7e9105388ebff089b3f99f0ef676ea55a6da3a7e
|
||||
subpackages:
|
||||
- cast5
|
||||
- openpgp
|
||||
|
||||
16
matchbox/cli/generic.go
Normal file
16
matchbox/cli/generic.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// genericCmd represents the generic command
|
||||
var genericCmd = &cobra.Command{
|
||||
Use: "generic",
|
||||
Short: "Manage Generic templates",
|
||||
Long: `Manage Generic templates`,
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(genericCmd)
|
||||
}
|
||||
48
matchbox/cli/generic_create.go
Normal file
48
matchbox/cli/generic_create.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"context"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
pb "github.com/coreos/matchbox/matchbox/server/serverpb"
|
||||
)
|
||||
|
||||
// genericPutCmd creates and updates Generic templates.
|
||||
var (
|
||||
genericPutCmd = &cobra.Command{
|
||||
Use: "create --file FILENAME",
|
||||
Short: "Create an Generic template",
|
||||
Long: `Create an Generic template`,
|
||||
Run: runGenericPutCmd,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
genericCmd.AddCommand(genericPutCmd)
|
||||
genericPutCmd.Flags().StringVarP(&flagFilename, "filename", "f", "", "filename to use to create an Generic template")
|
||||
genericPutCmd.MarkFlagRequired("filename")
|
||||
}
|
||||
|
||||
func runGenericPutCmd(cmd *cobra.Command, args []string) {
|
||||
if len(flagFilename) == 0 {
|
||||
cmd.Help()
|
||||
return
|
||||
}
|
||||
if err := validateArgs(cmd, args); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client := mustClientFromCmd(cmd)
|
||||
config, err := ioutil.ReadFile(flagFilename)
|
||||
if err != nil {
|
||||
exitWithError(ExitError, err)
|
||||
}
|
||||
req := &pb.GenericPutRequest{Name: filepath.Base(flagFilename), Config: config}
|
||||
_, err = client.Generic.GenericPut(context.TODO(), req)
|
||||
if err != nil {
|
||||
exitWithError(ExitError, err)
|
||||
}
|
||||
}
|
||||
@@ -31,7 +31,7 @@ func runProfileDescribeCmd(cmd *cobra.Command, args []string) {
|
||||
tw := newTabWriter(os.Stdout)
|
||||
defer tw.Flush()
|
||||
// legend
|
||||
fmt.Fprintf(tw, "ID\tNAME\tIGNITION\tCLOUD\tKERNEL\tINITRD\tCMDLINE\n")
|
||||
fmt.Fprintf(tw, "ID\tNAME\tIGNITION\tCLOUD\tKERNEL\tINITRD\tARGS\n")
|
||||
|
||||
client := mustClientFromCmd(cmd)
|
||||
request := &pb.ProfileGetRequest{
|
||||
@@ -42,5 +42,5 @@ func runProfileDescribeCmd(cmd *cobra.Command, args []string) {
|
||||
return
|
||||
}
|
||||
p := resp.Profile
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\t%#v\n", p.Id, p.Name, p.IgnitionId, p.CloudId, p.Boot.Kernel, p.Boot.Initrd, p.Boot.Cmdline)
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", p.Id, p.Name, p.IgnitionId, p.CloudId, p.Boot.Kernel, p.Boot.Initrd, p.Boot.Args)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package client
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
@@ -31,6 +33,8 @@ type Client struct {
|
||||
Groups rpcpb.GroupsClient
|
||||
Profiles rpcpb.ProfilesClient
|
||||
Ignition rpcpb.IgnitionClient
|
||||
Generic rpcpb.GenericClient
|
||||
Select rpcpb.SelectClient
|
||||
conn *grpc.ClientConn
|
||||
}
|
||||
|
||||
@@ -39,6 +43,11 @@ func New(config *Config) (*Client, error) {
|
||||
if len(config.Endpoints) == 0 {
|
||||
return nil, errNoEndpoints
|
||||
}
|
||||
for _, endpoint := range config.Endpoints {
|
||||
if _, _, err := net.SplitHostPort(endpoint); err != nil {
|
||||
return nil, fmt.Errorf("client: invalid host:port endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
return newClient(config)
|
||||
}
|
||||
|
||||
@@ -57,6 +66,8 @@ func newClient(config *Config) (*Client, error) {
|
||||
Groups: rpcpb.NewGroupsClient(conn),
|
||||
Profiles: rpcpb.NewProfilesClient(conn),
|
||||
Ignition: rpcpb.NewIgnitionClient(conn),
|
||||
Generic: rpcpb.NewGenericClient(conn),
|
||||
Select: rpcpb.NewSelectClient(conn),
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@@ -14,3 +14,20 @@ func TestNew_MissingEndpoints(t *testing.T) {
|
||||
assert.Nil(t, client)
|
||||
assert.Equal(t, errNoEndpoints, err)
|
||||
}
|
||||
|
||||
// gRPC expects host:port with no scheme (e.g. matchbox.example.com:8081)
|
||||
func TestNew_InvalidEndpoints(t *testing.T) {
|
||||
invalid := []string{
|
||||
"matchbox.example.com",
|
||||
"http://matchbox.example.com:8081",
|
||||
"https://matchbox.example.com:8081",
|
||||
}
|
||||
|
||||
for _, endpoint := range invalid {
|
||||
client, err := New(&Config{
|
||||
Endpoints: []string{endpoint},
|
||||
})
|
||||
assert.Nil(t, client)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user