Compare commits
266 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
998eeaa1d0 | ||
|
|
873f053d10 | ||
|
|
82e97ed654 | ||
|
|
e1de1ac938 | ||
|
|
515afcbb1d | ||
|
|
97fb6b302c | ||
|
|
bf04fa30ad | ||
|
|
de0b88839c | ||
|
|
f2f00b6d0e | ||
|
|
117d6d07e2 | ||
|
|
c68f411910 | ||
|
|
a0b24a9596 | ||
|
|
0b56acf884 | ||
|
|
ec55f43cdf | ||
|
|
c07eb9aaba | ||
|
|
a7d19dfdd2 | ||
|
|
908e89c3a1 | ||
|
|
0626163494 | ||
|
|
b4f5e574a2 | ||
|
|
74005e901c | ||
|
|
b97328b6ec | ||
|
|
e1d2d8fcc9 | ||
|
|
cefa31c8e4 | ||
|
|
f4623c508f | ||
|
|
901fda0e3e | ||
|
|
1cf7d6cd8c | ||
|
|
7628807c77 | ||
|
|
fe0b3a1ed3 | ||
|
|
8a4da0cf7b | ||
|
|
5bfcb24003 | ||
|
|
a7c13fbe5b | ||
|
|
da35d4d567 | ||
|
|
94fef772a5 | ||
|
|
c1d4c53c2c | ||
|
|
8e3855bbb3 | ||
|
|
67707d83a5 | ||
|
|
7c9a94c6dc | ||
|
|
a5d3a8a4a7 | ||
|
|
dc0a098bf6 | ||
|
|
d34fbb6da6 | ||
|
|
7fbce93101 | ||
|
|
2eea9c7731 | ||
|
|
9cf216e937 | ||
|
|
cac0881206 | ||
|
|
832a95e53b | ||
|
|
b5a6888f3b | ||
|
|
65eff1203d | ||
|
|
0bc1cb8d77 | ||
|
|
cd57013d5b | ||
|
|
b25a379d19 | ||
|
|
e78150218f | ||
|
|
cccb588855 | ||
|
|
9a177e83d7 | ||
|
|
dfd0457e03 | ||
|
|
9de30aea59 | ||
|
|
910ee6f18c | ||
|
|
0994b860b5 | ||
|
|
78f7e8d492 | ||
|
|
e804ace9e2 | ||
|
|
0012d691f4 | ||
|
|
e170c600b3 | ||
|
|
4f229d5d9a | ||
|
|
3cd8ba0a05 | ||
|
|
74f13a2f86 | ||
|
|
4eee84b17d | ||
|
|
845d1d0adc | ||
|
|
5b1c790d0c | ||
|
|
70400b7dd0 | ||
|
|
c6ebdfeb92 | ||
|
|
99acdf4c6b | ||
|
|
be057ed9c8 | ||
|
|
8bb99143e8 | ||
|
|
c802ce5805 | ||
|
|
c4e82c03a4 | ||
|
|
29c93046ef | ||
|
|
34e981dc7c | ||
|
|
3a88a663c3 | ||
|
|
572c8d26eb | ||
|
|
c22b273548 | ||
|
|
c3ef870ce5 | ||
|
|
e9ce7325ab | ||
|
|
948bdee165 | ||
|
|
50e923730e | ||
|
|
1799c8e23e | ||
|
|
454ae972a1 | ||
|
|
fe0c3438fd | ||
|
|
65b410e20b | ||
|
|
dced573acb | ||
|
|
4888c04dee | ||
|
|
4e9d542a87 | ||
|
|
08f4e9908b | ||
|
|
dd96f58417 | ||
|
|
f5ef2d156b | ||
|
|
f673d48007 | ||
|
|
7a58d944d8 | ||
|
|
5d975ec42a | ||
|
|
2404d34b0e | ||
|
|
c9b9711bca | ||
|
|
ae524f57f2 | ||
|
|
f26224c57d | ||
|
|
2c063a4674 | ||
|
|
7d5656ffe3 | ||
|
|
a683e8261e | ||
|
|
c75fc8f88e | ||
|
|
b10c777729 | ||
|
|
5992ba6ad5 | ||
|
|
ca223f800b | ||
|
|
1246d5a0db | ||
|
|
4f7dd0942c | ||
|
|
3e6aa4ee73 | ||
|
|
9c39221b71 | ||
|
|
4103461778 | ||
|
|
9a6d815978 | ||
|
|
6aa8759bfd | ||
|
|
d5027950e2 | ||
|
|
85a2a6b252 | ||
|
|
4bc5fcdc5e | ||
|
|
2f4d5b95e4 | ||
|
|
257f2fa553 | ||
|
|
7829c14d52 | ||
|
|
ce72fb72a0 | ||
|
|
41d5db4723 | ||
|
|
dfd08e48e5 | ||
|
|
347e142db9 | ||
|
|
b63e9b2589 | ||
|
|
4a32b0cd59 | ||
|
|
b0b8d97539 | ||
|
|
581be69da7 | ||
|
|
dc75fcc869 | ||
|
|
fc3e688c97 | ||
|
|
f07dc758c4 | ||
|
|
d2827d7ed0 | ||
|
|
692bf81df8 | ||
|
|
cfcec6ac03 | ||
|
|
592969134c | ||
|
|
2b605c8d9c | ||
|
|
63a95188be | ||
|
|
5aa301b72d | ||
|
|
7647a5d095 | ||
|
|
06f80fa003 | ||
|
|
01a767ab3e | ||
|
|
6be5c0f59c | ||
|
|
5efc514097 | ||
|
|
757f46e96f | ||
|
|
5aeb2d1d3d | ||
|
|
1119bb22f0 | ||
|
|
6195ae377e | ||
|
|
d7783a94e9 | ||
|
|
4228ccb330 | ||
|
|
e5d5280658 | ||
|
|
46f0477614 | ||
|
|
0e4265b2bc | ||
|
|
18de74e85b | ||
|
|
31040e9729 | ||
|
|
f0a4cfd1cb | ||
|
|
aeca5b08f9 | ||
|
|
7c1b9b17dc | ||
|
|
0e6ce19172 | ||
|
|
281fd5226a | ||
|
|
fb0ee0f05a | ||
|
|
7def0d7e86 | ||
|
|
1c076875c2 | ||
|
|
7ba0f1476b | ||
|
|
ec6844a43a | ||
|
|
6857c1319a | ||
|
|
cb6bb3c90d | ||
|
|
5c5be5ce5b | ||
|
|
4cbf2b7448 | ||
|
|
d781e43212 | ||
|
|
3ca88334d2 | ||
|
|
c7a649c731 | ||
|
|
d03f256976 | ||
|
|
9ecfcac0b9 | ||
|
|
035b01634f | ||
|
|
e8d3e8c70c | ||
|
|
cc490ff55d | ||
|
|
df6354ad45 | ||
|
|
3d8a3777f0 | ||
|
|
dfee550522 | ||
|
|
07e9676457 | ||
|
|
a69f6dd2d8 | ||
|
|
26d8b7d480 | ||
|
|
2c02549cd6 | ||
|
|
3c999d27e9 | ||
|
|
52b317dff9 | ||
|
|
97985b213b | ||
|
|
1ba353e5b6 | ||
|
|
398d12e148 | ||
|
|
be8fd3d488 | ||
|
|
27d1139a07 | ||
|
|
ee3445454e | ||
|
|
170f8c09ec | ||
|
|
e10525ded0 | ||
|
|
4c47adf390 | ||
|
|
ce3154cae9 | ||
|
|
5e54960a92 | ||
|
|
e008b8ea5e | ||
|
|
b636fc7a3d | ||
|
|
30cf06853d | ||
|
|
61377d2955 | ||
|
|
a7ba7714f5 | ||
|
|
ff916686e7 | ||
|
|
fbc4b39c59 | ||
|
|
be46b389bf | ||
|
|
a14e6c8bb9 | ||
|
|
c03b7a9627 | ||
|
|
ac40eeedb5 | ||
|
|
9e23f3a86d | ||
|
|
d1baa3fb65 | ||
|
|
c915fc2b52 | ||
|
|
6f02107448 | ||
|
|
ff06990edb | ||
|
|
9bc6edc65b | ||
|
|
5b8006ae35 | ||
|
|
ff5cd0468e | ||
|
|
4d9bd82c12 | ||
|
|
882793f230 | ||
|
|
858e1bda73 | ||
|
|
cfbb9cebd0 | ||
|
|
edbe5bab20 | ||
|
|
299701e7ea | ||
|
|
a20720a0d4 | ||
|
|
5a9c24ceb3 | ||
|
|
82af3f747d | ||
|
|
e955fecd30 | ||
|
|
0c1e20db27 | ||
|
|
8d6d0397ff | ||
|
|
abc7eb8dfb | ||
|
|
149f441ad8 | ||
|
|
cf43908a72 | ||
|
|
523b15ed13 | ||
|
|
aac270e937 | ||
|
|
1cfdce2970 | ||
|
|
9d3d08a26f | ||
|
|
b176de805e | ||
|
|
009b44b25d | ||
|
|
57e473b6f5 | ||
|
|
66cd8da417 | ||
|
|
50a3d11414 | ||
|
|
6fa13007c8 | ||
|
|
500a7b25e1 | ||
|
|
951e5ec4a3 | ||
|
|
f92743fa57 | ||
|
|
d84bb8e398 | ||
|
|
d54562f429 | ||
|
|
395494c1d9 | ||
|
|
ddbe17cd31 | ||
|
|
b1a866370a | ||
|
|
b8326e6db6 | ||
|
|
7864e64fd2 | ||
|
|
89bb5125b5 | ||
|
|
cff053328d | ||
|
|
698b6f6118 | ||
|
|
23f23c1dcb | ||
|
|
51cf859587 | ||
|
|
8061f57346 | ||
|
|
8000c323b6 | ||
|
|
314a317271 | ||
|
|
d437167ebf | ||
|
|
4067702641 | ||
|
|
86c07da76e | ||
|
|
be00fdbca0 | ||
|
|
abbf7faf56 | ||
|
|
76cc8cb13c | ||
|
|
ed6dde528a | ||
|
|
1e095661ad |
33
.travis.yml
@@ -3,22 +3,27 @@ sudo: required
|
||||
services:
|
||||
- docker
|
||||
go:
|
||||
- 1.7.4
|
||||
- 1.8
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- "1.13.4"
|
||||
install:
|
||||
- go get github.com/golang/lint/golint
|
||||
- GO111MODULE=off go get golang.org/x/lint/golint
|
||||
script:
|
||||
- make test
|
||||
- make
|
||||
deploy:
|
||||
provider: script
|
||||
script: scripts/travis-docker-push
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: master
|
||||
go: '1.8'
|
||||
- provider: script
|
||||
script: scripts/dev/travis-docker-push
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: master
|
||||
go: '1.13.4'
|
||||
- provider: script
|
||||
script: contrib/dnsmasq/travis-deploy
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: dnsmasq
|
||||
# pick one, so travis deploys once
|
||||
go: '1.11.x'
|
||||
notifications:
|
||||
email: change
|
||||
|
||||
71
CHANGES.md
@@ -1,9 +1,78 @@
|
||||
# matchbox
|
||||
# Matchbox
|
||||
|
||||
Notable changes between releases.
|
||||
|
||||
## Latest
|
||||
|
||||
## v0.8.3
|
||||
|
||||
* Publish docs to [https://matchbox.psdn.io](https://matchbox.psdn.io/) ([#769](https://github.com/poseidon/matchbox/pull/769))
|
||||
* Update Go version from v1.11.7 to v1.13.4 ([#766](https://github.com/poseidon/matchbox/pull/766), [#770](https://github.com/poseidon/matchbox/pull/770))
|
||||
* Update container image base from `alpine:3.9` to `alpine:3.10` ([#761](https://github.com/poseidon/matchbox/pull/761))
|
||||
* Include `get-fedora-coreos` convenience script ([#763](https://github.com/poseidon/matchbox/pull/763))
|
||||
* Remove Kubernetes provisioning examples ([#759](https://github.com/poseidon/matchbox/pull/759))
|
||||
* Remove rkt tutorials and docs ([#765](https://github.com/poseidon/matchbox/pull/765))
|
||||
|
||||
## v0.8.1 - v0.8.2
|
||||
|
||||
Releases `v0.8.1` and `v0.8.2` were not built cleanly
|
||||
|
||||
* Release tags and container images have been removed
|
||||
* Caused by go get golint (module-aware) mutating `go.mod` on Travis (see [#775](https://github.com/poseidon/matchbox/pull/775), [#777](https://github.com/poseidon/matchbox/pull/777))
|
||||
|
||||
## v0.8.0
|
||||
|
||||
* Transfer Matchbox repo from coreos to poseidon GitHub Org
|
||||
* Publish container images at [quay.io/poseidon/matchbox](https://quay.io/repository/poseidon/matchbox)
|
||||
* Build Matchbox with Go v1.11.7 for images and binaries
|
||||
* Update container image base from alpine:3.6 to alpine:3.9
|
||||
* Render Container Linux Configs as Ignition v2.2.0
|
||||
* Validate raw Ignition configs with the v2.2 spec (warn-only)
|
||||
* Fix warnings that v2.2 configs are too new
|
||||
|
||||
Note: Release signing key [has changed](https://github.com/poseidon/matchbox/blob/v0.8.0/Documentation/deployment.md) with the project move.
|
||||
|
||||
### Examples
|
||||
|
||||
* Update Kubernetes example clusters to v1.14.1 (Terraform-based)
|
||||
|
||||
## v0.7.1 (2018-11-01)
|
||||
|
||||
* Add `kernel_args` variable to the terraform bootkube-install cluster definition
|
||||
* Add `get-flatcar` helper script
|
||||
* Add optional TLS support to read-only HTTP API
|
||||
* Build Matchbox with Go 1.11.1 for images and binaries
|
||||
|
||||
### Examples
|
||||
|
||||
* Upgrade Kubernetes example clusters to v1.10.0 (Terraform-based)
|
||||
* Upgrade Kubernetes example clusters to v1.8.5
|
||||
|
||||
## v0.7.0 (2017-12-12)
|
||||
|
||||
* Add gRPC API endpoints for managing generic (experimental) templates
|
||||
* Update Container Linux config transpiler to v0.5.0
|
||||
* Update Ignition to v0.19.0, render v2.1.0 Ignition configs
|
||||
* Drop support for Container Linux versions below 1465.0.0 (breaking)
|
||||
* Build Matchbox with Go 1.8.5 for images and binaries
|
||||
* Remove Profile `Cmdline` map (deprecated in v0.5.0), use `Args` slice instead
|
||||
* Remove pixiecore support (deprecated in v0.5.0)
|
||||
* Remove `ContextHandler`, `ContextHandlerFunc`, and `NewHandler` from the `matchbox/http` package.
|
||||
|
||||
### Examples / Modules
|
||||
|
||||
* Upgrade Kubernetes example clusters to v1.8.4
|
||||
* Kubernetes examples clusters enable etcd TLS
|
||||
* Deploy the Container Linux Update Operator (CLUO) to coordinate reboots of Container Linux nodes in Kubernetes clusters. See the cluster [addon docs](Documentation/cluster-addons.md).
|
||||
* Kubernetes examples (terraform and non-terraform) mask locksmithd
|
||||
* Terraform modules `bootkube` and `profiles` (Kubernetes) mask locksmithd
|
||||
|
||||
## v0.6.1 (2017-05-25)
|
||||
|
||||
* Improve the installation documentation
|
||||
* Move examples/etc/matchbox/cert-gen to scripts/tls
|
||||
* Build Matchbox with Go 1.8.3 for images and binaries
|
||||
|
||||
### Examples
|
||||
|
||||
* Upgrade self-hosted Kubernetes cluster examples to v1.6.4
|
||||
|
||||
@@ -1,77 +1,5 @@
|
||||
# How to Contribute
|
||||
# Contributing
|
||||
|
||||
CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
|
||||
GitHub pull requests. This document outlines some of the conventions on
|
||||
development workflow, commit message formatting, contact points and other
|
||||
resources to make it easier to get your contribution accepted.
|
||||
## Developer Certificate of Origin
|
||||
|
||||
# Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution. See the [DCO](DCO) file for details.
|
||||
|
||||
# Email and Chat
|
||||
|
||||
The project currently uses the general CoreOS email list and IRC channel:
|
||||
- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
|
||||
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
|
||||
|
||||
Please avoid emailing maintainers found in the MAINTAINERS file directly. They
|
||||
are very busy and read the mailing lists.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- Read the [README](README.md) for build and test instructions
|
||||
- Play with the project, submit bugs, submit patches!
|
||||
|
||||
## Contribution Flow
|
||||
|
||||
This is a rough outline of what a contributor's workflow looks like:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- Make sure the tests pass, and add any new tests as appropriate.
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
Thanks for your contributions!
|
||||
|
||||
### Coding Style
|
||||
|
||||
CoreOS projects written in Go follow a set of style guidelines that we've documented
|
||||
[here](https://github.com/coreos/docs/tree/master/golang). Please follow them when
|
||||
working on your contributions.
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
|
||||
The first line is the subject and should be no longer than 70 characters, the
|
||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various
|
||||
git tools.
|
||||
By contributing, you agree to the Linux Foundation's Developer Certificate of Origin ([DCO](DCO)). The DCO is a statement that you, the contributor, have the legal right to make your contribution and understand the contribution will be distributed as part of this project.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM alpine:3.5
|
||||
MAINTAINER Dalton Hubble <dalton.hubble@coreos.com>
|
||||
FROM alpine:3.10
|
||||
LABEL maintainer="Dalton Hubble <dghubble@gmail.com>"
|
||||
COPY bin/matchbox /matchbox
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/matchbox"]
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
|
||||
# Upgrading self-hosted Kubernetes
|
||||
|
||||
[Self-hosted](bootkube.md) Kubernetes clusters schedule Kubernetes components such as the apiserver, kubelet, scheduler, and controller-manager as pods like other applications (except with node selectors). This allows Kubernetes level operations to be performed to upgrade clusters in place, rather than by re-provisioning.
|
||||
|
||||
Let's upgrade a self-hosted Kubernetes v1.4.1 cluster to v1.4.3 as an example.
|
||||
|
||||
## Inspect
|
||||
|
||||
Show the control plane daemonsets and deployments which will need to be updated.
|
||||
|
||||
```sh
|
||||
$ kubectl get daemonsets -n=kube-system
|
||||
NAME DESIRED CURRENT NODE-SELECTOR AGE
|
||||
kube-apiserver 1 1 master=true 5m
|
||||
kube-proxy 3 3 <none> 5m
|
||||
kubelet 3 3 <none> 5m
|
||||
|
||||
$ kubectl get deployments -n=kube-system
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
kube-controller-manager 1 1 1 1 5m
|
||||
kube-dns-v20 1 1 1 1 5m
|
||||
kube-scheduler 1 1 1 1 5m
|
||||
```
|
||||
|
||||
Check the current Kubernetes version.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.0", GitCommit:"a16c0a7f71a6f93c7e0f222d961f4675cd97a46b", GitTreeState:"clean", BuildDate:"2016-09-26T18:16:57Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.1+coreos.0", GitCommit:"b7a02f46b972c5211e5c04fdb1d5b86ac16c00eb", GitTreeState:"clean", BuildDate:"2016-10-11T20:13:55Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
In this case, Kubernetes is `v1.4.1+coreos.0` and our goal is to upgrade to `v1.4.3+coreos.0`. First, update the control plane pods. Then the kubelets and proxies on all nodes.
|
||||
|
||||
**Tip**: Follow along with a QEMU/KVM self-hosted Kubernetes cluster the first time, before upgrading your production bare-metal clusters ([tutorial](bootkube.md)).
|
||||
|
||||
## Control Plane
|
||||
|
||||
### kube-apiserver
|
||||
|
||||
Edit the kube-apiserver daemonset. Change the container image name to `quay.io/coreos/hyperkube:v1.4.3_coreos.0`.
|
||||
|
||||
```sh
|
||||
$ kubectl edit daemonset kube-apiserver -n=kube-system
|
||||
```
|
||||
|
||||
Since daemonsets don't yet support rolling, manually delete each apiserver one by one and wait for each to be re-scheduled.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods -n=kube-system
|
||||
# WARNING: Self-hosted Kubernetes is still new and this may fail
|
||||
$ kubectl delete pod kube-apiserver-s62kb -n=kube-system
|
||||
```
|
||||
|
||||
If you only have one, your cluster will be temporarily unavailable. Remember the Hyperkube image is quite large and this can take a minute.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods -n=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-api-checkpoint-node1.example.com 1/1 Running 0 12m
|
||||
kube-apiserver-vyg3t 2/2 Running 0 2m
|
||||
kube-controller-manager-1510822774-qebia 1/1 Running 2 12m
|
||||
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 12m
|
||||
kube-proxy-8jthl 1/1 Running 0 12m
|
||||
kube-proxy-bnvgy 1/1 Running 0 12m
|
||||
kube-proxy-gkyx8 1/1 Running 0 12m
|
||||
kube-scheduler-2099299605-67ezp 1/1 Running 2 12m
|
||||
kubelet-exe5k 1/1 Running 0 12m
|
||||
kubelet-p3g98 1/1 Running 0 12m
|
||||
kubelet-quhhg 1/1 Running 0 12m
|
||||
```
|
||||
|
||||
### kube-scheduler
|
||||
|
||||
Edit the scheduler deployment to rolling update the scheduler. Change the container image name for the hyperkube.
|
||||
|
||||
```sh
|
||||
$ kubectl edit deployments kube-scheduler -n=kube-system
|
||||
```
|
||||
|
||||
Wait for the schduler to be deployed.
|
||||
|
||||
### kube-controller-manager
|
||||
|
||||
Edit the controller-manager deployment to rolling update the controller manager. Change the container image name for the hyperkube.
|
||||
|
||||
```sh
|
||||
$ kubectl edit deployments kube-controller-manager -n=kube-system
|
||||
```
|
||||
|
||||
Wait for the controller manager to be deployed.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods -n=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-api-checkpoint-node1.example.com 1/1 Running 0 28m
|
||||
kube-apiserver-vyg3t 2/2 Running 0 18m
|
||||
kube-controller-manager-1709527928-zj8c4 1/1 Running 0 4m
|
||||
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 28m
|
||||
kube-proxy-8jthl 1/1 Running 0 28m
|
||||
kube-proxy-bnvgy 1/1 Running 0 28m
|
||||
kube-proxy-gkyx8 1/1 Running 0 28m
|
||||
kube-scheduler-2255275287-hti6w 1/1 Running 0 6m
|
||||
kubelet-exe5k 1/1 Running 0 28m
|
||||
kubelet-p3g98 1/1 Running 0 28m
|
||||
kubelet-quhhg 1/1 Running 0 28m
|
||||
```
|
||||
|
||||
### Verify
|
||||
|
||||
At this point, the control plane components have been upgraded to v1.4.3.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.0", GitCommit:"a16c0a7f71a6f93c7e0f222d961f4675cd97a46b", GitTreeState:"clean", BuildDate:"2016-09-26T18:16:57Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.3+coreos.0", GitCommit:"7819c84f25e8c661321ee80d6b9fa5f4ff06676f", GitTreeState:"clean", BuildDate:"2016-10-17T21:19:17Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
Finally, upgrade the kubelets and kube-proxies.
|
||||
|
||||
## kubelet and kube-proxy
|
||||
|
||||
Show the current kubelet and kube-proxy version on each node.
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes -o yaml | grep 'kubeletVersion\|kubeProxyVersion'
|
||||
kubeProxyVersion: v1.4.1+coreos.0
|
||||
kubeletVersion: v1.4.1+coreos.0
|
||||
kubeProxyVersion: v1.4.1+coreos.0
|
||||
kubeletVersion: v1.4.1+coreos.0
|
||||
kubeProxyVersion: v1.4.1+coreos.0
|
||||
kubeletVersion: v1.4.1+coreos.0
|
||||
```
|
||||
|
||||
Edit the kubelet and kube-proxy daemonsets. Change the container image name for the hyperkube.
|
||||
|
||||
```sh
|
||||
$ kubectl edit daemonset kubelet -n=kube-system
|
||||
$ kubectl edit daemonset kube-proxy -n=kube-system
|
||||
```
|
||||
|
||||
Since daemonsets don't yet support rolling, manually delete each kubelet and each kube-proxy. The daemonset controller will create new (upgraded) replics.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods -n=kube-system
|
||||
$ kubectl delete pod kubelet-quhhg
|
||||
...repeat
|
||||
$ kubectl delete pod kube-proxy-8jthl -n=kube-system
|
||||
...repeat
|
||||
|
||||
$ kubectl get pods -n=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-api-checkpoint-node1.example.com 1/1 Running 0 1h
|
||||
kube-apiserver-vyg3t 2/2 Running 0 1h
|
||||
kube-controller-manager-1709527928-zj8c4 1/1 Running 0 47m
|
||||
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 1h
|
||||
kube-proxy-6dbne 1/1 Running 0 1s
|
||||
kube-proxy-sm4jv 1/1 Running 0 8s
|
||||
kube-proxy-xmuao 1/1 Running 0 14s
|
||||
kube-scheduler-2255275287-hti6w 1/1 Running 0 49m
|
||||
kubelet-hfdwr 1/1 Running 0 38s
|
||||
kubelet-oia47 1/1 Running 0 52s
|
||||
kubelet-s6dab 1/1 Running 0 59s
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
Verify that the kubelet and kube-proxy on each node have been upgraded.
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes -o yaml | grep 'kubeletVersion\|kubeProxyVersion'
|
||||
kubeProxyVersion: v1.4.3+coreos.0
|
||||
kubeletVersion: v1.4.3+coreos.0
|
||||
kubeProxyVersion: v1.4.3+coreos.0
|
||||
kubeletVersion: v1.4.3+coreos.0
|
||||
kubeProxyVersion: v1.4.3+coreos.0
|
||||
kubeletVersion: v1.4.3+coreos.0
|
||||
```
|
||||
|
||||
Now, Kubernetes components have been upgraded to a new version of Kubernetes!
|
||||
|
||||
## Going further
|
||||
|
||||
Bare-metal or virtualized self-hosted Kubernetes clusters can be upgraded in place in 5-10 minutes. Here is a bare-metal example:
|
||||
|
||||
```sh
|
||||
$ kubectl -n=kube-system get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
kube-api-checkpoint-ibm0.lab.dghubble.io 1/1 Running 0 2d
|
||||
kube-apiserver-j6atn 2/2 Running 0 5m
|
||||
kube-controller-manager-1709527928-y05n5 1/1 Running 0 1m
|
||||
kube-dns-v20-3531996453-zwbl8 3/3 Running 0 2d
|
||||
kube-proxy-e49p5 1/1 Running 0 14s
|
||||
kube-proxy-eu5dc 1/1 Running 0 8s
|
||||
kube-proxy-gjrzq 1/1 Running 0 3s
|
||||
kube-scheduler-2255275287-96n56 1/1 Running 0 2m
|
||||
kubelet-9ob0e 1/1 Running 0 19s
|
||||
kubelet-bvwp0 1/1 Running 0 14s
|
||||
kubelet-xlrql 1/1 Running 0 24s
|
||||
```
|
||||
|
||||
Check upstream for updates to addons like `kube-dns` or `kube-dashboard` and update them like any other applications. Some kube-system components use version labels and you may wish to clean those up as well.
|
||||
@@ -1,126 +0,0 @@
|
||||
# Self-hosted Kubernetes
|
||||
|
||||
The self-hosted Kubernetes example provisions a 3 node "self-hosted" Kubernetes v1.6.4 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting. An etcd3 cluster across controllers is used to back Kubernetes and coordinate Container Linux auto-updates (enabled for disk installs).
|
||||
|
||||
## Requirements
|
||||
|
||||
Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md) or [matchbox with docker](getting-started-docker.md) guide and understand the basics. In particular, you should be able to:
|
||||
|
||||
* Use rkt or Docker to start `matchbox`
|
||||
* Create a network boot environment with `coreos/dnsmasq`
|
||||
* Create the example libvirt client VMs
|
||||
* `/etc/hosts` entries for `node[1-3].example.com` (or pass custom names to `k8s-certgen`)
|
||||
|
||||
Install [bootkube](https://github.com/kubernetes-incubator/bootkube/releases) v0.4.4 and add it somewhere on your PATH.
|
||||
|
||||
```sh
|
||||
$ bootkube version
|
||||
Version: v0.4.4
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. The examples can be used for physical machines if you update the MAC addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
* [bootkube](../examples/groups/bootkube) - iPXE boot a self-hosted Kubernetes cluster
|
||||
* [bootkube-install](../examples/groups/bootkube-install) - Install a self-hosted Kubernetes cluster
|
||||
|
||||
## Assets
|
||||
|
||||
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
|
||||
```
|
||||
|
||||
Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
|
||||
```json
|
||||
{
|
||||
"profile": "bootkube-worker",
|
||||
"metadata": {
|
||||
"ssh_authorized_keys": ["ssh-rsa pub-key-goes-here"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Later, `bootkube` will schedule these manifests during bootstrapping and the credentials will be used to access your cluster.
|
||||
|
||||
```sh
|
||||
$ bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=http://127.0.0.1:2379
|
||||
```
|
||||
|
||||
## Containers
|
||||
|
||||
Use rkt or docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with rkt](getting-started-rkt.md) or [matchbox with Docker](getting-started-docker.md) for help.
|
||||
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot CoreOS and become available via SSH in about 1 minute. If you chose `bootkube-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
|
||||
## bootkube
|
||||
|
||||
We're ready to use bootkube to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster.
|
||||
|
||||
Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every** node which will path activate the `kubelet.service`.
|
||||
|
||||
```bash
|
||||
for node in 'node1' 'node2' 'node3'; do
|
||||
scp assets/auth/kubeconfig core@$node.example.com:/home/core/kubeconfig
|
||||
ssh core@$node.example.com 'sudo mv kubeconfig /etc/kubernetes/kubeconfig'
|
||||
done
|
||||
```
|
||||
|
||||
Secure copy the `bootkube` generated assets to any controller node and run `bootkube-start`.
|
||||
|
||||
```sh
|
||||
$ scp -r assets core@node1.example.com:/home/core
|
||||
$ ssh core@node1.example.com 'sudo mv assets /opt/bootkube/assets && sudo systemctl start bootkube'
|
||||
```
|
||||
|
||||
Optionally watch the Kubernetes control plane bootstrapping with the bootkube temporary api-server. You will see quite a bit of output.
|
||||
|
||||
```sh
|
||||
$ ssh core@node1.example.com 'journalctl -f -u bootkube'
|
||||
[ 299.241291] bootkube[5]: Pod Status: kube-api-checkpoint Running
|
||||
[ 299.241618] bootkube[5]: Pod Status: kube-apiserver Running
|
||||
[ 299.241804] bootkube[5]: Pod Status: kube-scheduler Running
|
||||
[ 299.241993] bootkube[5]: Pod Status: kube-controller-manager Running
|
||||
[ 299.311743] bootkube[5]: All self-hosted control plane components successfully started
|
||||
```
|
||||
|
||||
You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` used to access the cluster.
|
||||
|
||||
## Verify
|
||||
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the kubelet, apiserver, scheduler, and controller-manager are running as pods.
|
||||
|
||||
```sh
|
||||
$ KUBECONFIG=assets/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
node1.example.com Ready 3m
|
||||
node2.example.com Ready 3m
|
||||
node3.example.com Ready 3m
|
||||
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system checkpoint-installer-p8g8r 1/1 Running 1 13m
|
||||
kube-system kube-apiserver-s5gnx 1/1 Running 1 41s
|
||||
kube-system kube-controller-manager-3438979800-jrlnd 1/1 Running 1 13m
|
||||
kube-system kube-controller-manager-3438979800-tkjx7 1/1 Running 1 13m
|
||||
kube-system kube-dns-4101612645-xt55f 4/4 Running 4 13m
|
||||
kube-system kube-flannel-pl5c2 2/2 Running 0 13m
|
||||
kube-system kube-flannel-r9t5r 2/2 Running 3 13m
|
||||
kube-system kube-flannel-vfb0s 2/2 Running 4 13m
|
||||
kube-system kube-proxy-cvhmj 1/1 Running 0 13m
|
||||
kube-system kube-proxy-hf9mh 1/1 Running 1 13m
|
||||
kube-system kube-proxy-kpl73 1/1 Running 1 13m
|
||||
kube-system kube-scheduler-694795526-1l23b 1/1 Running 1 13m
|
||||
kube-system kube-scheduler-694795526-fks0b 1/1 Running 1 13m
|
||||
kube-system pod-checkpointer-node1.example.com 1/1 Running 2 10m
|
||||
```
|
||||
|
||||
Try deleting pods to see that the cluster is resilient to failures and machine restarts (CoreOS auto-updates).
|
||||
|
||||
## Going further
|
||||
|
||||
[Learn](bootkube-upgrades.md) to upgrade a self-hosted Kubernetes cluster.
|
||||
@@ -1,183 +0,0 @@
|
||||
# Getting started with rkt
|
||||
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with `rkt` and `CNI` to network boot and provision a cluster of QEMU/KVM CoreOS machines locally. You'll be able to create Kubernetes clustes, etcd3 clusters, and test network setups.
|
||||
|
||||
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
## Requirements
|
||||
|
||||
Install [rkt](https://coreos.com/rkt/docs/latest/distributions.html) 1.12.0 or higher ([example script](https://github.com/dghubble/phoenix/blob/master/fedora/sources.sh)) and setup rkt [privilege separation](https://coreos.com/rkt/docs/latest/trying-out-rkt.html).
|
||||
|
||||
Next, install the package dependencies.
|
||||
|
||||
```sh
|
||||
# Fedora
|
||||
$ sudo dnf install virt-install virt-manager
|
||||
|
||||
# Debian/Ubuntu
|
||||
$ sudo apt-get install virt-manager virtinst qemu-kvm systemd-container
|
||||
```
|
||||
|
||||
**Note**: rkt does not yet integrate with SELinux on Fedora. As a workaround, temporarily set enforcement to permissive if you are comfortable (`sudo setenforce Permissive`). Check the rkt [distribution notes](https://github.com/coreos/rkt/blob/master/Documentation/distributions.md) or see the tracking [issue](https://github.com/coreos/rkt/issues/1727).
|
||||
|
||||
Clone the [matchbox](https://github.com/coreos/matchbox) source which contains the examples and scripts.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/matchbox.git
|
||||
$ cd matchbox
|
||||
```
|
||||
|
||||
Download CoreOS image assets referenced by the `etcd` [example](../examples) to `examples/assets`.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
|
||||
```
|
||||
|
||||
## Network
|
||||
|
||||
Define the `metal0` virtual bridge with [CNI](https://github.com/appc/cni).
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /etc/rkt/net.d
|
||||
sudo bash -c 'cat > /etc/rkt/net.d/20-metal.conf << EOF
|
||||
{
|
||||
"name": "metal0",
|
||||
"type": "bridge",
|
||||
"bridge": "metal0",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "172.18.0.0/24",
|
||||
"routes" : [ { "dst" : "0.0.0.0/0" } ]
|
||||
}
|
||||
}
|
||||
EOF'
|
||||
```
|
||||
|
||||
On Fedora, add the `metal0` interface to the trusted zone in your firewall configuration.
|
||||
|
||||
```sh
|
||||
$ sudo firewall-cmd --add-interface=metal0 --zone=trusted
|
||||
$ sudo firewall-cmd --add-interface=metal0 --zone=trusted --permanent
|
||||
```
|
||||
|
||||
For development convenience, you may wish to add `/etc/hosts` entries for nodes to refer to them by name.
|
||||
|
||||
```
|
||||
# /etc/hosts
|
||||
...
|
||||
172.18.0.21 node1.example.com
|
||||
172.18.0.22 node2.example.com
|
||||
172.18.0.23 node3.example.com
|
||||
```
|
||||
|
||||
## Containers
|
||||
|
||||
Run the `matchbox` and `dnsmasq` services on the `metal0` bridge. `dnsmasq` will run DHCP, DNS, and TFTP services to create a suitable network boot environment. `matchbox` will serve provisioning configs to machines on the network which attempt to PXE boot.
|
||||
|
||||
The `devnet` wrapper script rkt runs `matchbox` and `dnsmasq` in systemd transient units. Create can take the name of any example cluster in [examples](../examples).
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/devnet create etcd3
|
||||
```
|
||||
|
||||
Inspect the journal logs or check the status of the systemd services.
|
||||
|
||||
```
|
||||
$ sudo ./scripts/devnet status
|
||||
$ journalctl -f -u dev-matchbox
|
||||
$ journalctl -f -u dev-dnsmasq
|
||||
```
|
||||
|
||||
Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of how machines are mapped to Profiles. Explore some endpoints exposed by the service, say for QEMU/KVM node1.
|
||||
|
||||
* iPXE [http://172.18.0.2:8080/ipxe?mac=52:54:00:a1:9c:ae](http://172.18.0.2:8080/ipxe?mac=52:54:00:a1:9c:ae)
|
||||
* Ignition [http://172.18.0.2:8080/ignition?mac=52:54:00:a1:9c:ae](http://172.18.0.2:8080/ignition?mac=52:54:00:a1:9c:ae)
|
||||
* Metadata [http://172.18.0.2:8080/metadata?mac=52:54:00:a1:9c:ae](http://172.18.0.2:8080/metadata?mac=52:54:00:a1:9c:ae)
|
||||
|
||||
### Manual
|
||||
|
||||
If you prefer to start the containers yourself, instead of using `devnet`,
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=metal0:IP=172.18.0.2 \
|
||||
--mount volume=data,target=/var/lib/matchbox \
|
||||
--volume data,kind=host,source=$PWD/examples \
|
||||
--mount volume=groups,target=/var/lib/matchbox/groups \
|
||||
--volume groups,kind=host,source=$PWD/examples/groups/etcd3 \
|
||||
quay.io/coreos/matchbox:v0.6.0 -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
```sh
|
||||
sudo rkt run --net=metal0:IP=172.18.0.3 \
|
||||
--dns=host \
|
||||
--mount volume=config,target=/etc/dnsmasq.conf \
|
||||
--volume config,kind=host,source=$PWD/contrib/dnsmasq/metal0.conf \
|
||||
quay.io/coreos/dnsmasq:v0.4.0 \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW
|
||||
```
|
||||
|
||||
If you get an error about the IP assignment, stop old pods and run garbage collection.
|
||||
|
||||
```sh
|
||||
$ sudo rkt gc --grace-period=0
|
||||
```
|
||||
|
||||
## Client VMs
|
||||
|
||||
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `metal0` bridge, where your pods run.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create
|
||||
```
|
||||
|
||||
You can connect to the serial console of any node. If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
|
||||
```sh
|
||||
$ sudo virsh console node1
|
||||
```
|
||||
|
||||
You can also use `virt-manager` to watch the console.
|
||||
|
||||
```sh
|
||||
$ sudo virt-manager
|
||||
```
|
||||
|
||||
Use the wrapper script to act on all nodes.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt [start|reboot|shutdown|poweroff|destroy]
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
The VMs should network boot and provision themselves into a three node etcd3 cluster, with other nodes behaving as etcd3 gateways.
|
||||
|
||||
The example profile added autologin so you can verify that etcd3 works between nodes.
|
||||
|
||||
```sh
|
||||
$ systemctl status etcd-member
|
||||
$ ETCDCTL_API=3
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
```
|
||||
|
||||
## Clean up
|
||||
|
||||
Clean up the systemd units running `matchbox` and `dnsmasq`.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/devnet destroy
|
||||
```
|
||||
|
||||
Clean up VM machines.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt destroy
|
||||
```
|
||||
|
||||
Press ^] three times to stop any rkt pod.
|
||||
|
||||
## Going further
|
||||
|
||||
Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](bootkube.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`.
|
||||
@@ -1,66 +0,0 @@
|
||||
# GRUB2 netboot
|
||||
|
||||
Use GRUB to network boot UEFI hardware.
|
||||
|
||||
## Requirements
|
||||
|
||||
For local development, install the dependencies for libvirt with UEFI.
|
||||
|
||||
* [UEFI with QEMU](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU)
|
||||
|
||||
Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md) and [matchbox](matchbox.md) guides and understand the basics.
|
||||
|
||||
## Containers
|
||||
|
||||
Run `matchbox` with rkt, but mount the [grub](../examples/groups/grub) group example.
|
||||
|
||||
## Network
|
||||
|
||||
On Fedora, add the `metal0` interface to the trusted zone in your firewall configuration.
|
||||
|
||||
```sh
|
||||
$ sudo firewall-cmd --add-interface=metal0 --zone=trusted
|
||||
```
|
||||
|
||||
Run the `quay.io/coreos/dnsmasq` container image with rkt or docker.
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=metal0:IP=172.18.0.3 quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=172.18.0.50,172.18.0.99 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:efi-bc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efi-bc,grub.efi \
|
||||
--dhcp-userclass=set:grub,GRUB2 \
|
||||
--dhcp-boot=tag:grub,"(http;matchbox.example.com:8080)/grub","172.18.0.2" \
|
||||
--log-queries \
|
||||
--log-dhcp \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:pxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.foo/172.18.0.2
|
||||
```
|
||||
|
||||
## Client VM
|
||||
|
||||
Create UEFI VM nodes which have known hardware attributes.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-uefi
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
If you use Docker, run `matchbox` according to [matchbox with Docker](getting-started-docker.md), but mount the [grub](../examples/groups/grub) group example. Then start the `coreos/dnsmasq` Docker image, which bundles a `grub.efi`.
|
||||
|
||||
```sh
|
||||
$ sudo docker run --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-range=172.17.0.43,172.17.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-match=set:efi-bc,option:client-arch,7 --dhcp-boot=tag:efi-bc,grub.efi --dhcp-userclass=set:grub,GRUB2 --dhcp-boot=tag:grub,"(http;matchbox.foo:8080)/grub","172.17.0.2" --log-queries --log-dhcp --dhcp-option=3,172.17.0.1 --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:pxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe --address=/matchbox.foo/172.17.0.2
|
||||
```
|
||||
|
||||
Create a VM to verify the machine network boots.
|
||||
|
||||
```sh
|
||||
$ sudo virt-install --name uefi-test --pxe --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
|
||||
```
|
||||
|
Before Width: | Height: | Size: 116 KiB |
|
Before Width: | Height: | Size: 107 KiB |
@@ -1,15 +0,0 @@
|
||||
# Lifecycle of a physical machine
|
||||
|
||||
## About boot environment
|
||||
|
||||
Physical machines [network boot](network-booting.md) in an network boot environment with DHCP/TFTP/DNS services or with [coreos/dnsmasq](../contrib/dnsmasq).
|
||||
|
||||
`matchbox` serves iPXE, GRUB, or Pixiecore boot configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS. The "first boot" Ignition config if fetched and CoreOS is installed.
|
||||
|
||||
CoreOS boots ("first boot" from disk) and runs Ignition to provision its disk with systemd units, files, keys, and more to become a cluster node. Systemd units may fetch metadata from a remote source if needed.
|
||||
|
||||
Coordinated auto-updates are enabled. Systems like [fleet](https://coreos.com/docs/#fleet) or [Kubernetes](http://kubernetes.io/docs/) coordinate container services. IPMI, vendor utilities, or first-boot are used to re-provision machines into new roles.
|
||||
|
||||
## Machine lifecycle
|
||||
|
||||

|
||||
@@ -1,87 +0,0 @@
|
||||
# Kubernetes (with rkt)
|
||||
|
||||
The `rktnetes` example provisions a 3 node Kubernetes v1.5.5 cluster with [rkt](https://github.com/coreos/rkt) as the container runtime. The cluster has one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs).
|
||||
|
||||
## Requirements
|
||||
|
||||
Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md) or [matchbox with docker](getting-started-docker.md) guide and understand the basics. In particular, you should be able to:
|
||||
|
||||
* Use rkt or Docker to start `matchbox`
|
||||
* Create a network boot environment with `coreos/dnsmasq`
|
||||
* Create the example libvirt client VMs
|
||||
* `/etc/hosts` entries for `node[1-3].example.com` (or pass custom names to `k8s-certgen`)
|
||||
|
||||
## Examples
|
||||
|
||||
The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. VMs are setup on the `metal0` CNI bridge for rkt or the `docker0` bridge for Docker. The examples can be used for physical machines if you update the MAC addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
* [rktnetes](../examples/groups/rktnetes) - iPXE boot a Kubernetes cluster
|
||||
* [rktnetes-install](../examples/groups/rktnetes-install) - Install a Kubernetes cluster to disk
|
||||
* [Lab examples](https://github.com/dghubble/metal) - Lab hardware examples
|
||||
|
||||
## Assets
|
||||
|
||||
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
|
||||
```
|
||||
|
||||
Optionally, add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
|
||||
Generate a root CA and Kubernetes TLS assets for components (`admin`, `apiserver`, `worker`) with SANs for `node1.example.com`, etc.
|
||||
|
||||
```sh
|
||||
$ rm -rf examples/assets/tls
|
||||
$ ./scripts/tls/k8s-certgen
|
||||
```
|
||||
|
||||
**Note**: TLS assets are served to any machines which request them, which requires a trusted network. Alternately, provisioning may be tweaked to require TLS assets be securely copied to each host.
|
||||
|
||||
## Containers
|
||||
|
||||
Use rkt or docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with rkt](getting-started-rkt.md) or [matchbox with Docker](getting-started-docker.md) for help.
|
||||
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot CoreOS in about a 1 minute and the Kubernetes API should be available after 3-4 minutes (each node downloads a ~160MB Hyperkube). If you chose `rktnetes-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision Kubernetes clusters on physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
|
||||
## Verify
|
||||
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster created on rkt `metal0` or `docker0`.
|
||||
|
||||
```sh
|
||||
$ KUBECONFIG=examples/assets/tls/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
node1.example.com Ready 3m
|
||||
node2.example.com Ready 3m
|
||||
node3.example.com Ready 3m
|
||||
```
|
||||
|
||||
Get all pods.
|
||||
|
||||
```sh
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system heapster-v1.2.0-4088228293-k3yn8 2/2 Running 0 3m
|
||||
kube-system kube-apiserver-node1.example.com 1/1 Running 0 4m
|
||||
kube-system kube-controller-manager-node1.example.com 1/1 Running 0 3m
|
||||
kube-system kube-dns-v19-l2u8r 3/3 Running 0 4m
|
||||
kube-system kube-proxy-node1.example.com 1/1 Running 0 3m
|
||||
kube-system kube-proxy-node2.example.com 1/1 Running 0 3m
|
||||
kube-system kube-proxy-node3.example.com 1/1 Running 0 3m
|
||||
kube-system kube-scheduler-node1.example.com 1/1 Running 0 3m
|
||||
kube-system kubernetes-dashboard-v1.4.1-0iy07 1/1 Running 0 4m
|
||||
```
|
||||
|
||||
## Kubernetes Dashboard
|
||||
|
||||
Access the Kubernetes Dashboard with `kubeconfig` credentials by port forwarding to the dashboard pod.
|
||||
|
||||
```sh
|
||||
$ kubectl port-forward kubernetes-dashboard-v1.4.1-SOME-ID 9090 -n=kube-system
|
||||
Forwarding from 127.0.0.1:9090 -> 9090
|
||||
```
|
||||
|
||||
Then visit [http://127.0.0.1:9090](http://127.0.0.1:9090/).
|
||||
|
||||
<img src='img/kubernetes-dashboard.png' class="img-center" alt="Kubernetes Dashboard"/>
|
||||
@@ -1,19 +0,0 @@
|
||||
# Troubleshooting
|
||||
|
||||
## Firewall
|
||||
|
||||
Running DHCP or proxyDHCP with `coreos/dnsmasq` on a host requires that the Firewall allow DHCP and TFTP (for chainloading) services to run.
|
||||
|
||||
## Port collision
|
||||
|
||||
Running DHCP or proxyDHCP can cause port already in use collisions depending on what's running. Fedora runs bootp listening on udp/67 for example. Find the service using the port.
|
||||
|
||||
```sh
|
||||
$ sudo lsof -i :67
|
||||
```
|
||||
|
||||
Evaluate whether you can configure the existing service or whether you'd like to stop it and test with `coreos/dnsmasq`.
|
||||
|
||||
## No boot filename received
|
||||
|
||||
PXE client firmware did not receive a DHCP Offer with PXE-Options after several attempts. If you're using the `coreos/dnsmasq` image with `-d`, each request should log to stdout. Using the wrong `-i` interface is the most common reason DHCP requests are not received. Otherwise, wireshark can be useful for investigating.
|
||||
46
Jenkinsfile
vendored
@@ -1,46 +0,0 @@
|
||||
properties([
|
||||
[$class: 'BuildDiscarderProperty', strategy: [$class: 'LogRotator', numToKeepStr: '20']],
|
||||
[$class: 'GithubProjectProperty', projectUrlStr: 'https://github.com/coreos/matchbox'],
|
||||
[$class: 'PipelineTriggersJobProperty', triggers: [
|
||||
[$class: 'GitHubPushTrigger'],
|
||||
]]
|
||||
])
|
||||
parallel (
|
||||
etcd3: {
|
||||
node('fedora && bare-metal') {
|
||||
stage('etcd3') {
|
||||
timeout(time:5, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
export ASSETS_DIR=~/assets; ./tests/smoke/etcd3
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
bootkube: {
|
||||
node('fedora && bare-metal') {
|
||||
stage('bootkube') {
|
||||
timeout(time:12, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
chmod 600 ./tests/smoke/fake_rsa
|
||||
export ASSETS_DIR=~/assets; ./tests/smoke/bootkube
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"etcd3-terraform": {
|
||||
node('fedora && bare-metal') {
|
||||
stage('etcd3-terraform') {
|
||||
timeout(time:10, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/etcd3-terraform
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
64
Makefile
@@ -1,52 +1,72 @@
|
||||
export CGO_ENABLED:=0
|
||||
export GO111MODULE=on
|
||||
export GOFLAGS=-mod=vendor
|
||||
|
||||
VERSION=$(shell ./scripts/git-version)
|
||||
LD_FLAGS="-w -X github.com/coreos/matchbox/matchbox/version.Version=$(VERSION)"
|
||||
VERSION=$(shell git describe --tags --match=v* --always --dirty)
|
||||
LD_FLAGS="-w -X github.com/poseidon/matchbox/matchbox/version.Version=$(VERSION)"
|
||||
|
||||
REPO=github.com/coreos/matchbox
|
||||
IMAGE_REPO=coreos/matchbox
|
||||
QUAY_REPO=quay.io/coreos/matchbox
|
||||
REPO=github.com/poseidon/matchbox
|
||||
LOCAL_REPO=poseidon/matchbox
|
||||
IMAGE_REPO=quay.io/poseidon/matchbox
|
||||
|
||||
all: build
|
||||
.PHONY: all
|
||||
all: build test vet lint fmt
|
||||
|
||||
.PHONY: build
|
||||
build: clean bin/matchbox
|
||||
|
||||
bin/%:
|
||||
@go build -o bin/$* -v -ldflags $(LD_FLAGS) $(REPO)/cmd/$*
|
||||
git describe --tags --match=v* --always --dirty
|
||||
git status
|
||||
git diff
|
||||
@go build -o bin/$* -ldflags $(LD_FLAGS) $(REPO)/cmd/$*
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@./scripts/test
|
||||
@go test ./... -cover
|
||||
|
||||
.PHONY: aci
|
||||
aci: clean build
|
||||
@sudo ./scripts/build-aci
|
||||
.PHONY: vet
|
||||
vet:
|
||||
@go vet -all ./...
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
@golint -set_exit_status `go list ./... | grep -v pb`
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
@test -z $$(go fmt ./...)
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image:
|
||||
@sudo docker build --rm=true -t $(IMAGE_REPO):$(VERSION) .
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
@sudo docker build --rm=true -t $(LOCAL_REPO):$(VERSION) .
|
||||
@sudo docker tag $(LOCAL_REPO):$(VERSION) $(LOCAL_REPO):latest
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: docker-image
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(QUAY_REPO):latest
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(QUAY_REPO):$(VERSION)
|
||||
@sudo docker push $(QUAY_REPO):latest
|
||||
@sudo docker push $(QUAY_REPO):$(VERSION)
|
||||
@sudo docker tag $(LOCAL_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
@sudo docker tag $(LOCAL_REPO):$(VERSION) $(IMAGE_REPO):$(VERSION)
|
||||
@sudo docker push $(IMAGE_REPO):latest
|
||||
@sudo docker push $(IMAGE_REPO):$(VERSION)
|
||||
|
||||
.PHONY: update
|
||||
update:
|
||||
@GOFLAGS="" go get -u
|
||||
@go mod tidy
|
||||
|
||||
.PHONY: vendor
|
||||
vendor:
|
||||
@glide update --strip-vendor
|
||||
@glide-vc --use-lock-file --no-tests --only-code
|
||||
@go mod vendor
|
||||
|
||||
.PHONY: codegen
|
||||
codegen: tools
|
||||
@./scripts/codegen
|
||||
@./scripts/dev/codegen
|
||||
|
||||
.PHONY: tools
|
||||
tools: bin/protoc bin/protoc-gen-go
|
||||
|
||||
bin/protoc:
|
||||
@./scripts/get-protoc
|
||||
@./scripts/dev/get-protoc
|
||||
|
||||
bin/protoc-gen-go:
|
||||
@go build -o bin/protoc-gen-go $(REPO)/vendor/github.com/golang/protobuf/protoc-gen-go
|
||||
@@ -78,7 +98,7 @@ _output/matchbox-%.tar.gz: DEST=_output/$(NAME)
|
||||
_output/matchbox-%.tar.gz: bin/%/matchbox
|
||||
mkdir -p $(DEST)
|
||||
cp bin/$*/matchbox $(DEST)
|
||||
./scripts/release-files $(DEST)
|
||||
./scripts/dev/release-files $(DEST)
|
||||
tar zcvf $(DEST).tar.gz -C _output $(NAME)
|
||||
|
||||
.PHONY: all build clean test release
|
||||
|
||||
5
NOTICE
@@ -1,5 +0,0 @@
|
||||
CoreOS Project
|
||||
Copyright 2015 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
||||
80
README.md
@@ -1,71 +1,37 @@
|
||||
# matchbox [](https://travis-ci.org/coreos/matchbox) [](https://godoc.org/github.com/coreos/matchbox) [](https://quay.io/repository/coreos/matchbox) [](https://botbot.me/freenode/coreos)
|
||||
# matchbox [](https://travis-ci.org/poseidon/matchbox) [](https://godoc.org/github.com/poseidon/matchbox) [](https://quay.io/repository/poseidon/matchbox)
|
||||
|
||||
**Announcement**: Matchbox [v0.6.0](https://github.com/coreos/matchbox/releases) is released with a new [Matchbox Terraform Provider][terraform] and [tutorial](Documentation/getting-started.md).
|
||||
`matchbox` is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. Machines are matched by labels like MAC or UUID during PXE and profiles specify a kernel/initrd, iPXE config, and Container Linux or Fedora CoreOS config.
|
||||
|
||||
`matchbox` is a service that matches bare-metal machines (based on labels like MAC, UUID, etc.) to profiles to PXE boot and provision Container Linux clusters. Profiles specify the kernel/initrd, kernel arguments, iPXE config, GRUB config, [Container Linux Config][cl-config], [Cloud-Config][cloud-config], or other configs a machine should use. Matchbox can be [installed](Documentation/deployment.md) as a binary, RPM, container image, or deployed on a Kubernetes cluster and it provides an authenticated gRPC API for clients like [terraform][terraform].
|
||||
## Features
|
||||
|
||||
* [Documentation][docs]
|
||||
* [matchbox Service](Documentation/matchbox.md)
|
||||
* [Profiles](Documentation/matchbox.md#profiles)
|
||||
* [Groups](Documentation/matchbox.md#groups)
|
||||
* Config Templates
|
||||
* [Container Linux Config][cl-config]
|
||||
* [Cloud-Config][cloud-config]
|
||||
* [Configuration](Documentation/config.md)
|
||||
* [HTTP API](Documentation/api.md) / [gRPC API](https://godoc.org/github.com/coreos/matchbox/matchbox/client)
|
||||
* [Background: Machine Lifecycle](Documentation/machine-lifecycle.md)
|
||||
* [Background: PXE Booting](Documentation/network-booting.md)
|
||||
* Chainload via iPXE and match hardware labels
|
||||
* Provision Container Linux and Fedora CoreOS (powered by [Ignition](https://github.com/coreos/ignition))
|
||||
* Authenticated gRPC API for clients (e.g. Terraform)
|
||||
|
||||
### Installation
|
||||
## Documentation
|
||||
|
||||
* Installation
|
||||
* Installing on [CoreOS / Linux distros](Documentation/deployment.md)
|
||||
* Installing on [Kubernetes](Documentation/deployment.md#kubernetes)
|
||||
* Running with [rkt](Documentation/deployment.md#rkt) / [docker](Documentation/deployment.md#docker)
|
||||
* [Network Setup](Documentation/network-setup.md)
|
||||
* [Docs](https://matchbox.psdn.io/)
|
||||
* [Configuration](docs/config.md)
|
||||
* [HTTP API](docs/api-http.md) / [gRPC API](docs/grpc-api.md)
|
||||
|
||||
### Tutorials
|
||||
## Installation
|
||||
|
||||
* [Getting Started](Documentation/getting-started.md)
|
||||
Matchbox can be installed from a binary or a container image.
|
||||
|
||||
Local QEMU/KVM
|
||||
* Install Matchbox on [Kubernetes](docs/deployment.md#kubernetes), on a [Linux](docs/deployment.md) host, or as a [container](docs/deployment.md#docker)
|
||||
* Setup a PXE-enabled [network](docs/network-setup.md)
|
||||
|
||||
* [matchbox with rkt](Documentation/getting-started-rkt.md)
|
||||
* [matchbox with Docker](Documentation/getting-started-docker.md)
|
||||
## Tutorials
|
||||
|
||||
### Example Clusters
|
||||
[Getting started](docs/getting-started.md) provisioning machines with Container Linux.
|
||||
|
||||
Create [example](examples) clusters on-premise or locally with [QEMU/KVM](scripts/README.md#libvirt).
|
||||
|
||||
**Terraform-based**
|
||||
|
||||
* [simple-install](Documentation/getting-started.md) - Install Container Linux with an SSH key on all machines (beginner)
|
||||
* [etcd3](examples/terraform/etcd3-install/README.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](examples/terraform/bootkube-install/README.md) - Install a 3-node self-hosted Kubernetes v1.6.4 cluster
|
||||
* Terraform [Modules](examples/terraform/modules) - Re-usable Terraform Modules
|
||||
|
||||
**Manual**
|
||||
|
||||
* [etcd3](Documentation/getting-started-rkt.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](Documentation/bootkube.md) - Install a 3-node self-hosted Kubernetes v1.6.4 cluster
|
||||
* Local QEMU/KVM
|
||||
* [matchbox with Docker](docs/getting-started-docker.md)
|
||||
* Clusters
|
||||
* [etcd3](docs/getting-started-docker.md) - Install a 3-node etcd3 cluster
|
||||
* [etcd3](https://github.com/poseidon/matchbox/tree/master/examples/terraform/etcd3-install) - Install a 3-node etcd3 cluster (terraform-based)
|
||||
|
||||
## Contrib
|
||||
|
||||
* [dnsmasq](contrib/dnsmasq/README.md) - Run DHCP, TFTP, and DNS services with docker or rkt
|
||||
* [squid](contrib/squid/README.md) - Run a transparent cache proxy
|
||||
* [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) - Terraform plugin which supports "matchbox" provider
|
||||
|
||||
## Enterprise
|
||||
|
||||
[Tectonic](https://coreos.com/tectonic/) is the enterprise-ready Kubernetes offering from CoreOS (free for 10 nodes!). The [Tectonic Installer](https://coreos.com/tectonic/docs/latest/install/bare-metal/#4-tectonic-installer) app integrates directly with `matchbox` through its gRPC API to provide a rich graphical client for populating `matchbox` with machine configs.
|
||||
|
||||
Learn more from our [docs](https://coreos.com/tectonic/docs/latest/) or [blog](https://coreos.com/blog/announcing-tectonic-1.6).
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
[docs]: https://coreos.com/matchbox/docs/latest
|
||||
[terraform]: https://github.com/coreos/terraform-provider-matchbox
|
||||
[cl-config]: Documentation/container-linux-config.md
|
||||
[cloud-config]: Documentation/cloud-config.md
|
||||
* [dnsmasq](contrib/dnsmasq/README.md) - Run DHCP, TFTP, and DNS services as a container
|
||||
* [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) - Terraform provider plugin for Matchbox
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package main
|
||||
|
||||
import "github.com/coreos/matchbox/matchbox/cli"
|
||||
import "github.com/poseidon/matchbox/matchbox/cli"
|
||||
|
||||
func main() {
|
||||
cli.Execute()
|
||||
|
||||
@@ -7,16 +7,15 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/coreos/pkg/flagutil"
|
||||
|
||||
web "github.com/coreos/matchbox/matchbox/http"
|
||||
"github.com/coreos/matchbox/matchbox/rpc"
|
||||
"github.com/coreos/matchbox/matchbox/server"
|
||||
"github.com/coreos/matchbox/matchbox/sign"
|
||||
"github.com/coreos/matchbox/matchbox/storage"
|
||||
"github.com/coreos/matchbox/matchbox/tlsutil"
|
||||
"github.com/coreos/matchbox/matchbox/version"
|
||||
web "github.com/poseidon/matchbox/matchbox/http"
|
||||
"github.com/poseidon/matchbox/matchbox/rpc"
|
||||
"github.com/poseidon/matchbox/matchbox/server"
|
||||
"github.com/poseidon/matchbox/matchbox/sign"
|
||||
"github.com/poseidon/matchbox/matchbox/storage"
|
||||
"github.com/poseidon/matchbox/matchbox/tlsutil"
|
||||
"github.com/poseidon/matchbox/matchbox/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -26,35 +25,44 @@ var (
|
||||
|
||||
func main() {
|
||||
flags := struct {
|
||||
address string
|
||||
rpcAddress string
|
||||
dataPath string
|
||||
assetsPath string
|
||||
logLevel string
|
||||
certFile string
|
||||
keyFile string
|
||||
caFile string
|
||||
keyRingPath string
|
||||
version bool
|
||||
help bool
|
||||
address string
|
||||
rpcAddress string
|
||||
dataPath string
|
||||
assetsPath string
|
||||
logLevel string
|
||||
grpcCAFile string
|
||||
grpcCertFile string
|
||||
grpcKeyFile string
|
||||
tlsCertFile string
|
||||
tlsKeyFile string
|
||||
tlsEnabled bool
|
||||
keyRingPath string
|
||||
version bool
|
||||
help bool
|
||||
}{}
|
||||
flag.StringVar(&flags.address, "address", "127.0.0.1:8080", "HTTP listen address")
|
||||
flag.StringVar(&flags.rpcAddress, "rpc-address", "", "RPC listen address")
|
||||
flag.StringVar(&flags.dataPath, "data-path", "/var/lib/matchbox", "Path to data directory")
|
||||
flag.StringVar(&flags.assetsPath, "assets-path", "/var/lib/matchbox/assets", "Path to static assets")
|
||||
|
||||
// Log levels https://github.com/Sirupsen/logrus/blob/master/logrus.go#L36
|
||||
// Log levels https://github.com/sirupsen/logrus/blob/master/logrus.go#L36
|
||||
flag.StringVar(&flags.logLevel, "log-level", "info", "Set the logging level")
|
||||
|
||||
// gRPC Server TLS
|
||||
flag.StringVar(&flags.certFile, "cert-file", "/etc/matchbox/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.keyFile, "key-file", "/etc/matchbox/server.key", "Path to the server TLS key file")
|
||||
// TLS Client Authentication
|
||||
flag.StringVar(&flags.caFile, "ca-file", "/etc/matchbox/ca.crt", "Path to the CA verify and authenticate client certificates")
|
||||
flag.StringVar(&flags.grpcCertFile, "cert-file", "/etc/matchbox/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.grpcKeyFile, "key-file", "/etc/matchbox/server.key", "Path to the server TLS key file")
|
||||
|
||||
// gRPC TLS Client Authentication
|
||||
flag.StringVar(&flags.grpcCAFile, "ca-file", "/etc/matchbox/ca.crt", "Path to the CA verify and authenticate client certificates")
|
||||
|
||||
// Signing
|
||||
flag.StringVar(&flags.keyRingPath, "key-ring-path", "", "Path to a private keyring file")
|
||||
|
||||
// SSL flags
|
||||
flag.StringVar(&flags.tlsCertFile, "web-cert-file", "/etc/matchbox/ssl/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.tlsKeyFile, "web-key-file", "/etc/matchbox/ssl/server.key", "Path to the server TLS key file")
|
||||
flag.BoolVar(&flags.tlsEnabled, "web-ssl", false, "True to enable HTTPS")
|
||||
|
||||
// subcommands
|
||||
flag.BoolVar(&flags.version, "version", false, "print version and exit")
|
||||
flag.BoolVar(&flags.help, "help", false, "print usage and exit")
|
||||
@@ -87,16 +95,24 @@ func main() {
|
||||
}
|
||||
}
|
||||
if flags.rpcAddress != "" {
|
||||
if _, err := os.Stat(flags.certFile); err != nil {
|
||||
if _, err := os.Stat(flags.grpcCertFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS server certificate with -cert-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.keyFile); err != nil {
|
||||
if _, err := os.Stat(flags.grpcKeyFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS server key with -key-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.caFile); err != nil {
|
||||
if _, err := os.Stat(flags.grpcCAFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS certificate authority for authorizing client certificates: %v", err)
|
||||
}
|
||||
}
|
||||
if flags.tlsEnabled {
|
||||
if _, err := os.Stat(flags.tlsCertFile); err != nil {
|
||||
log.Fatalf("Provide a valid SSL server certificate with -web-cert-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.tlsKeyFile); err != nil {
|
||||
log.Fatalf("Provide a valid SSL server key with -web-key-file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// logging setup
|
||||
lvl, err := logrus.ParseLevel(flags.logLevel)
|
||||
@@ -130,17 +146,17 @@ func main() {
|
||||
// gRPC Server (feature disabled by default)
|
||||
if flags.rpcAddress != "" {
|
||||
log.Infof("Starting matchbox gRPC server on %s", flags.rpcAddress)
|
||||
log.Infof("Using TLS server certificate: %s", flags.certFile)
|
||||
log.Infof("Using TLS server key: %s", flags.keyFile)
|
||||
log.Infof("Using CA certificate: %s to authenticate client certificates", flags.caFile)
|
||||
log.Infof("Using TLS server certificate: %s", flags.grpcCertFile)
|
||||
log.Infof("Using TLS server key: %s", flags.grpcKeyFile)
|
||||
log.Infof("Using CA certificate: %s to authenticate client certificates", flags.grpcCAFile)
|
||||
lis, err := net.Listen("tcp", flags.rpcAddress)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
tlsinfo := tlsutil.TLSInfo{
|
||||
CertFile: flags.certFile,
|
||||
KeyFile: flags.keyFile,
|
||||
CAFile: flags.caFile,
|
||||
CertFile: flags.grpcCertFile,
|
||||
KeyFile: flags.grpcKeyFile,
|
||||
CAFile: flags.grpcCAFile,
|
||||
}
|
||||
tlscfg, err := tlsinfo.ServerConfig()
|
||||
if err != nil {
|
||||
@@ -151,7 +167,6 @@ func main() {
|
||||
defer grpcServer.Stop()
|
||||
}
|
||||
|
||||
// HTTP Server
|
||||
config := &web.Config{
|
||||
Core: server,
|
||||
Logger: log,
|
||||
@@ -160,9 +175,23 @@ func main() {
|
||||
ArmoredSigner: armoredSigner,
|
||||
}
|
||||
httpServer := web.NewServer(config)
|
||||
log.Infof("Starting matchbox HTTP server on %s", flags.address)
|
||||
err = http.ListenAndServe(flags.address, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
|
||||
if flags.tlsEnabled {
|
||||
// HTTPS Server
|
||||
log.Infof("Starting matchbox HTTPS server on %s", flags.address)
|
||||
log.Infof("Using SSL server certificate: %s", flags.tlsCertFile)
|
||||
log.Infof("Using SSL server key: %s", flags.tlsKeyFile)
|
||||
err = http.ListenAndServeTLS(flags.address, flags.tlsCertFile, flags.tlsKeyFile, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
} else {
|
||||
// HTTP Server
|
||||
log.Infof("Starting matchbox HTTP server on %s", flags.address)
|
||||
err = http.ListenAndServe(flags.address, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -2,6 +2,11 @@
|
||||
|
||||
Notable changes image releases. The dnsmasq project [upstream](http://www.thekelleys.org.uk/dnsmasq/doc.html) has its own [changelog](http://www.thekelleys.org.uk/dnsmasq/CHANGELOG).
|
||||
|
||||
## v0.4.1
|
||||
|
||||
* Rebuild with alpine:3.6 base image
|
||||
* Add EXPOSE ports 67 and 69 to Dockerfile
|
||||
|
||||
## v0.4.0
|
||||
|
||||
* `dnsmasq` package version 2.76
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM alpine:3.5
|
||||
MAINTAINER Dalton Hubble <dalton.hubble@coreos.com>
|
||||
FROM alpine:3.10
|
||||
LABEL maintainer="Dalton Hubble <dghubble@gmail.com>"
|
||||
RUN apk -U add dnsmasq curl
|
||||
COPY tftpboot /var/lib/tftpboot
|
||||
EXPOSE 53
|
||||
ENTRYPOINT ["/usr/sbin/dnsmasq"]
|
||||
EXPOSE 53 67 69
|
||||
ENTRYPOINT ["/usr/sbin/dnsmasq"]
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
VERSION=v0.4.0
|
||||
DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))
|
||||
VERSION=$(shell git rev-parse HEAD)
|
||||
|
||||
IMAGE_REPO=coreos/dnsmasq
|
||||
QUAY_REPO=quay.io/coreos/dnsmasq
|
||||
IMAGE_REPO=poseidon/dnsmasq
|
||||
QUAY_REPO=quay.io/poseidon/dnsmasq
|
||||
|
||||
.PHONY: all
|
||||
all: docker-image
|
||||
|
||||
.PHONY: undionly
|
||||
undionly:
|
||||
@./get-tftp-files
|
||||
.PHONY: tftp
|
||||
tftp:
|
||||
@$(DIR)/get-tftp-files
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: undionly
|
||||
docker-image: tftp
|
||||
@sudo docker build --rm=true -t $(IMAGE_REPO):$(VERSION) .
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
|
||||
|
||||
@@ -1,42 +1,34 @@
|
||||
# dnsmasq [](https://quay.io/repository/coreos/dnsmasq)
|
||||
# dnsmasq [](https://quay.io/repository/poseidon/dnsmasq)
|
||||
|
||||
`dnsmasq` provides a container image for running DHCP, proxy DHCP, DNS, and/or TFTP with [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html). Use it to test different network setups with clusters of network bootable machines.
|
||||
|
||||
The image bundles `undionly.kpxe` which chainloads PXE clients to iPXE and `grub.efi` (experimental) which chainloads UEFI architectures to GRUB2.
|
||||
The image bundles `undionly.kpxe`, `ipxe.efi`, and `grub.efi` (experimental) for chainloading BIOS and UEFI clients to iPXE.
|
||||
|
||||
## Usage
|
||||
|
||||
Run the container image as a DHCP, DNS, and TFTP service.
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/poseidon/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:#ipxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:#ipxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
|
||||
Press ^] three times to stop the rkt pod. Press ctrl-C to stop the Docker container.
|
||||
Press ctrl-C to stop the Docker container.
|
||||
|
||||
## Configuration Flags
|
||||
|
||||
@@ -53,8 +45,13 @@ Configuration arguments can be provided as flags. Check the dnsmasq [man pages](
|
||||
|
||||
Build a container image locally.
|
||||
|
||||
make docker-image
|
||||
```
|
||||
make docker-image
|
||||
```
|
||||
|
||||
Run the image with Docker on the `docker0` bridge (default).
|
||||
|
||||
sudo docker run --rm --cap-add=NET_ADMIN coreos/dnsmasq -d -q
|
||||
```
|
||||
sudo docker run --rm --cap-add=NET_ADMIN poseidon/dnsmasq -d -q
|
||||
```
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# dnsmasq.conf
|
||||
|
||||
no-daemon
|
||||
dhcp-range=172.17.0.50,172.17.0.99
|
||||
dhcp-option=3,172.17.0.1
|
||||
dhcp-host=52:54:00:a1:9c:ae,172.17.0.21,1h
|
||||
@@ -10,15 +11,27 @@ dhcp-host=52:54:00:d7:99:c7,172.17.0.24,1h
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
|
||||
# Legacy PXE
|
||||
dhcp-match=set:bios,option:client-arch,0
|
||||
dhcp-boot=tag:bios,undionly.kpxe
|
||||
|
||||
# UEFI
|
||||
dhcp-match=set:efi32,option:client-arch,6
|
||||
dhcp-boot=tag:efi32,ipxe.efi
|
||||
|
||||
dhcp-match=set:efibc,option:client-arch,7
|
||||
dhcp-boot=tag:efibc,ipxe.efi
|
||||
|
||||
dhcp-match=set:efi64,option:client-arch,9
|
||||
dhcp-boot=tag:efi64,ipxe.efi
|
||||
|
||||
# iPXE
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:#ipxe,undionly.kpxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
address=/bootcfg.foo/172.18.0.2
|
||||
address=/matchbox.foo/172.17.0.2
|
||||
address=/matchbox.example.com/172.17.0.2
|
||||
address=/node1.example.com/172.17.0.21
|
||||
address=/node2.example.com/172.17.0.22
|
||||
|
||||
@@ -10,6 +10,7 @@ fi
|
||||
|
||||
curl -s -o $DEST/undionly.kpxe http://boot.ipxe.org/undionly.kpxe
|
||||
cp $DEST/undionly.kpxe $DEST/undionly.kpxe.0
|
||||
curl -s -o $DEST/ipxe.efi http://boot.ipxe.org/ipxe.efi
|
||||
|
||||
# Any vaguely recent CoreOS grub.efi is fine
|
||||
curl -s -o $DEST/grub.efi https://stable.release.core-os.net/amd64-usr/1298.7.0/coreos_production_pxe_grub.efi
|
||||
curl -s -o $DEST/grub.efi https://stable.release.core-os.net/amd64-usr/1353.7.0/coreos_production_pxe_grub.efi
|
||||
|
||||
@@ -13,13 +13,11 @@ tftp-root=/var/lib/tftpboot
|
||||
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:#ipxe,undionly.kpxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
address=/bootcfg.foo/172.18.0.2
|
||||
address=/matchbox.foo/172.18.0.2
|
||||
address=/matchbox.example.com/172.18.0.2
|
||||
address=/node1.example.com/172.18.0.21
|
||||
address=/node2.example.com/172.18.0.22
|
||||
@@ -27,6 +25,3 @@ address=/node3.example.com/172.18.0.23
|
||||
address=/node4.example.com/172.18.0.24
|
||||
address=/cluster.example.com/172.18.0.21
|
||||
|
||||
# for a Tectonic test, ignore
|
||||
address=/tectonic.example.com/172.18.0.22
|
||||
address=/tectonic.example.com/172.18.0.23
|
||||
|
||||
11
contrib/dnsmasq/travis-deploy
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# dirty hack
|
||||
cd "$(dirname $0)"
|
||||
|
||||
docker info
|
||||
make docker-image
|
||||
docker login -u="$DOCKER_USERNAME" -p=$DOCKER_PASSWORD quay.io
|
||||
make docker-push
|
||||
|
||||
@@ -15,7 +15,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: matchbox
|
||||
image: quay.io/coreos/matchbox:v0.6.0
|
||||
image: quay.io/poseidon/matchbox:v0.8.0
|
||||
env:
|
||||
- name: MATCHBOX_ADDRESS
|
||||
value: "0.0.0.0:8080"
|
||||
|
||||
@@ -2,12 +2,7 @@ apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
annotations:
|
||||
ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- matchbox-rpc.example.com
|
||||
rules:
|
||||
- host: matchbox.example.com
|
||||
http:
|
||||
@@ -16,6 +11,18 @@ spec:
|
||||
backend:
|
||||
serviceName: matchbox
|
||||
servicePort: 8080
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- matchbox-rpc.example.com
|
||||
rules:
|
||||
- host: matchbox-rpc.example.com
|
||||
http:
|
||||
paths:
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
# Squid Proxy (DRAFT)
|
||||
|
||||
This guide shows how to setup a [Squid](http://www.squid-cache.org/) cache proxy for providing kernel/initrd files to PXE, iPXE, or GRUB2 client machines. This setup runs Squid as a Docker container using the [sameersbn/squid](https://quay.io/repository/sameersbn/squid)
|
||||
image.
|
||||
|
||||
The Squid container requires a squid.conf file to run. Download the example squid.conf file from the [sameersbn/docker-squid](https://github.com/sameersbn/docker-squid) repo:
|
||||
```
|
||||
curl -O https://raw.githubusercontent.com/sameersbn/docker-squid/master/squid.conf
|
||||
```
|
||||
|
||||
Squid [interception caching](http://wiki.squid-cache.org/SquidFaq/InterceptionProxy#Concepts_of_Interception_Caching) is required for proxying PXE, iPXE, or GRUB2 client machines. Set the intercept mode in squid.conf:
|
||||
```
|
||||
sed -ie 's/http_port 3128/http_port 3128 intercept/g' squid.conf
|
||||
```
|
||||
|
||||
By default, Squid caches objects that are 4MB or less. Increase the maximum object size to cache large files such as kernel and initrd images. The following example increases the maximum object size to 300MB:
|
||||
```
|
||||
sed -ie 's/# maximum_object_size 4 MB/maximum_object_size 300 MB/g' squid.conf
|
||||
```
|
||||
|
||||
Squid supports a wide range of cache configurations. Review the Squid [documentation](http://www.squid-cache.org/Doc/) to learn more about configuring Squid.
|
||||
|
||||
This example uses systemd to manage squid. Create the squid service systemd unit file:
|
||||
```
|
||||
cat /etc/systemd/system/squid.service
|
||||
#/etc/systemd/system/squid.service
|
||||
[Unit]
|
||||
Description=squid proxy service
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
TimeoutStartSec=0
|
||||
ExecStart=/usr/bin/docker run --net=host --rm \
|
||||
-v /path/to/squid.conf:/etc/squid3/squid.conf:Z \
|
||||
-v /srv/docker/squid/cache:/var/spool/squid3:Z \
|
||||
quay.io/sameersbn/squid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Start Squid:
|
||||
```
|
||||
systemctl start squid
|
||||
```
|
||||
|
||||
If your Squid host is running iptables or firewalld, modify rules to allow the interception and redirection of traffic. In the following example, 192.168.10.1 is the IP address of the interface facing PXE, iPXE, or GRUB2 client machines. The default port number used by squid is 3128.
|
||||
|
||||
For firewalld:
|
||||
```
|
||||
firewall-cmd --permanent --zone=internal --add-forward-port=port=80:proto=tcp:toport=3128:toaddr=192.168.10.1
|
||||
firewall-cmd --permanent --zone=internal --add-port=3128/tcp
|
||||
firewall-cmd --reload
|
||||
firewall-cmd --zone=internal --list-all
|
||||
```
|
||||
|
||||
For iptables:
|
||||
```
|
||||
iptables -t nat -A POSTROUTING -o enp15s0 -j MASQUERADE
|
||||
iptables -t nat -A PREROUTING -i enp14s0 -p tcp --dport 80 -j REDIRECT --to-port 3128
|
||||
```
|
||||
**Note**: enp14s0 faces PXE, iPXE, or GRUB2 clients and enp15s0 faces Internet access.
|
||||
|
||||
Your DHCP server should be configured so the Squid host is the default gateway for PXE, iPXE, or GRUB2 clients. For deployments that run Squid on the same host as dnsmasq, remove any DHCP option 3 settings. For example ```--dhcp-option=3,192.168.10.1"```
|
||||
|
||||
Update Matchbox policies to use the url of the CoreOS kernel/initrd download site:
|
||||
```
|
||||
cat policy/etcd3.json
|
||||
{
|
||||
"id": "etcd3",
|
||||
"name": "etcd3",
|
||||
"boot": {
|
||||
"kernel": "http://stable.release.core-os.net/amd64-usr/1235.9.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["http://stable.release.core-os.net/amd64-usr/1235.9.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "etcd3.yaml"
|
||||
}
|
||||
```
|
||||
|
||||
(Optional) Configure Matchbox to not serve static assets by providing an empty assets-path value.
|
||||
```
|
||||
# /etc/systemd/system/matchbox.service.d/override.conf
|
||||
[Service]
|
||||
Environment="MATCHBOX_ASSETS_PATHS="
|
||||
```
|
||||
|
||||
Boot your PXE, iPXE, or GRUB2 clients.
|
||||
@@ -1,24 +0,0 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
Environment="IMAGE=quay.io/coreos/matchbox"
|
||||
Environment="VERSION=v0.6.0"
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
|
||||
Environment="MATCHBOX_LOG_LEVEL=debug"
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/matchbox
|
||||
ExecStartPre=/usr/bin/mkdir -p /var/lib/matchbox/assets
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--net=host \
|
||||
--inherit-env \
|
||||
--trust-keys-from-https \
|
||||
--mount volume=data,target=/var/lib/matchbox \
|
||||
--mount volume=config,target=/etc/matchbox \
|
||||
--volume data,kind=host,source=/var/lib/matchbox \
|
||||
--volume config,kind=host,source=/etc/matchbox \
|
||||
${IMAGE}:${VERSION}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -3,8 +3,8 @@ Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
Environment="IMAGE=quay.io/coreos/matchbox"
|
||||
Environment="VERSION=v0.6.0"
|
||||
Environment="IMAGE=quay.io/poseidon/matchbox"
|
||||
Environment="VERSION=v0.8.0"
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/matchbox
|
||||
ExecStartPre=/usr/bin/mkdir -p /var/lib/matchbox/assets
|
||||
|
||||
16
docs/api-grpc.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# gRPC API
|
||||
|
||||
## Protos
|
||||
|
||||
* [rpc.proto](https://github.com/poseidon/matchbox/blob/master/matchbox/rpc/rpcpb/rpc.proto)
|
||||
* [storage.proto](https://github.com/poseidon/matchbox/blob/master/matchbox/storage/storagepb/storage.proto)
|
||||
|
||||
## Client Libraries
|
||||
|
||||
gRPC client libraries
|
||||
|
||||
* [Go](https://godoc.org/github.com/poseidon/matchbox/matchbox/client)
|
||||
|
||||
## Client Plugins
|
||||
|
||||
* [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox)
|
||||
@@ -39,8 +39,8 @@ GET http://matchbox.foo/ipxe?label=value
|
||||
|
||||
```
|
||||
#!ipxe
|
||||
kernel /assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp} coreos.first_boot=1 coreos.autologin
|
||||
initrd /assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz
|
||||
kernel /assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp} coreos.first_boot=1 coreos.autologin
|
||||
initrd /assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz
|
||||
boot
|
||||
```
|
||||
|
||||
@@ -67,15 +67,15 @@ default=0
|
||||
timeout=1
|
||||
menuentry "CoreOS" {
|
||||
echo "Loading kernel"
|
||||
linuxefi "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz" "coreos.autologin" "coreos.config.url=http://matchbox.foo:8080/ignition" "coreos.first_boot"
|
||||
linuxefi "(http;matchbox.foo:8080)/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz" "coreos.autologin" "coreos.config.url=http://matchbox.foo:8080/ignition" "coreos.first_boot"
|
||||
echo "Loading initrd"
|
||||
initrdefi "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"
|
||||
initrdefi "(http;matchbox.foo:8080)/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud config
|
||||
|
||||
Finds the profile matching the machine and renders the corresponding Cloud-Config with group metadata, selectors, and query params.
|
||||
DEPRECATED: Finds the profile matching the machine and renders the corresponding Cloud-Config with group metadata, selectors, and query params.
|
||||
|
||||
```
|
||||
GET http://matchbox.foo/cloud?label=value
|
||||
@@ -101,7 +101,7 @@ coreos:
|
||||
command: start
|
||||
```
|
||||
|
||||
## Ignition Config
|
||||
## Container Linux Config / Ignition Config
|
||||
|
||||
Finds the profile matching the machine and renders the corresponding Ignition Config with group metadata, selectors, and query params.
|
||||
|
||||
@@ -231,7 +231,7 @@ If you need to serve static assets (e.g. kernel, initrd), `matchbox` can serve a
|
||||
```
|
||||
matchbox.foo/assets/
|
||||
└── coreos
|
||||
└── 1298.7.0
|
||||
└── 1967.3.0
|
||||
├── coreos_production_pxe.vmlinuz
|
||||
└── coreos_production_pxe_image.cpio.gz
|
||||
└── 1153.0.0
|
||||
@@ -1,7 +1,7 @@
|
||||
# Cloud Config
|
||||
|
||||
# Cloud config
|
||||
|
||||
**Note:** We recommend migrating to [Container Linux Configs](container-linux-config.md) for hardware provisioning.
|
||||
!!! warning
|
||||
Migrate to [Container Linux Configs](container-linux-config.md). Cloud-Config support will be removed in the future.
|
||||
|
||||
CoreOS Cloud-Config is a system for configuring machines with a Cloud-Config file or executable script from user-data. Cloud-Config runs in userspace on each boot and implements a subset of the [cloud-init spec](http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data). See the cloud-config [docs](https://coreos.com/os/docs/latest/cloud-config.html) for details.
|
||||
|
||||
@@ -18,13 +18,12 @@ Cloud-Config template files can be added in `/var/lib/matchbox/cloud` or in a `c
|
||||
|
||||
## Reference
|
||||
|
||||
Reference a Cloud-Config in a [Profile](matchbox.md#profiles) with `cloud_id`. When PXE booting, use the kernel option `cloud-config-url` to point to `matchbox` [cloud-config endpoint](api.md#cloud-config).
|
||||
Reference a Cloud-Config in a [Profile](matchbox.md#profiles) with `cloud_id`. When PXE booting, use the kernel option `cloud-config-url` to point to `matchbox` [cloud-config endpoint](api-http.md#cloud-config).
|
||||
|
||||
## Examples
|
||||
|
||||
Here is an example Cloud-Config which starts some units and writes a file.
|
||||
|
||||
<!-- {% raw %} -->
|
||||
```yaml
|
||||
#cloud-config
|
||||
coreos:
|
||||
@@ -40,7 +39,6 @@ write_files:
|
||||
content: |
|
||||
{{.greeting}}
|
||||
```
|
||||
<!-- {% endraw %} -->
|
||||
|
||||
The Cloud-Config [Validator](https://coreos.com/validate/) is also useful for checking your Cloud-Config files for errors.
|
||||
|
||||
@@ -35,8 +35,7 @@ Configuration arguments can be provided as flags or as environment variables.
|
||||
|
||||
```sh
|
||||
$ ./bin/matchbox -version
|
||||
$ sudo rkt run quay.io/coreos/matchbox:latest -- -version
|
||||
$ sudo docker run quay.io/coreos/matchbox:latest -version
|
||||
$ sudo docker run quay.io/poseidon/matchbox:latest -version
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -47,30 +46,18 @@ Run the binary.
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -log-level=debug -data-path=examples -assets-path=examples/assets
|
||||
```
|
||||
|
||||
Run the latest ACI with rkt.
|
||||
|
||||
```sh
|
||||
$ sudo rkt run --mount volume=assets,target=/var/lib/matchbox/assets --volume assets,kind=host,source=$PWD/examples/assets quay.io/coreos/matchbox:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
Run the latest Docker image.
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples/assets:/var/lib/matchbox/assets:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples/assets:/var/lib/matchbox/assets:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
### With examples
|
||||
|
||||
Mount `examples` to pre-load the [example](../examples/README.md) machine groups and profiles. Run the container with rkt,
|
||||
Mount `examples` to pre-load the example machine groups and profiles. Run the container.
|
||||
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/matchbox:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
or with Docker.
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
### With gRPC API
|
||||
@@ -89,26 +76,12 @@ Clients, such as `bootcmd`, verify the server's certificate with a CA bundle pas
|
||||
$ ./bin/bootcmd profile list --endpoints 127.0.0.1:8081 --ca-file examples/etc/matchbox/ca.crt --cert-file examples/etc/matchbox/client.crt --key-file examples/etc/matchbox/client.key
|
||||
```
|
||||
|
||||
### With rkt
|
||||
|
||||
Run the ACI with rkt and TLS credentials from `examples/etc/matchbox`.
|
||||
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples,readOnly=true --mount volume=config,target=/etc/matchbox --volume config,kind=host,source=$PWD/examples/etc/matchbox --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/matchbox:latest -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
A `bootcmd` client can call the gRPC API running at the IP used in the rkt example.
|
||||
|
||||
```sh
|
||||
$ ./bin/bootcmd profile list --endpoints 172.18.0.2:8081 --ca-file examples/etc/matchbox/ca.crt --cert-file examples/etc/matchbox/client.crt --key-file examples/etc/matchbox/client.key
|
||||
```
|
||||
|
||||
### With docker
|
||||
|
||||
Run the Docker image with TLS credentials from `examples/etc/matchbox`.
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 -p 8081:8081 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/etc/matchbox:/etc/matchbox:Z,ro -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
$ sudo docker run -p 8080:8080 -p 8081:8081 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/etc/matchbox:/etc/matchbox:Z,ro -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
A `bootcmd` client can call the gRPC API running at the IP used in the Docker example.
|
||||
@@ -126,14 +99,8 @@ $ export MATCHBOX_PASSPHRASE=test
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -key-ring-path matchbox/sign/fixtures/secring.gpg -data-path=examples -assets-path=examples/assets
|
||||
```
|
||||
|
||||
Run the ACI with a test key.
|
||||
Run the container image with a test key.
|
||||
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --set-env=MATCHBOX_PASSPHRASE=test --mount volume=secrets,target=/secrets --volume secrets,kind=host,source=$PWD/matchbox/sign/fixtures --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/matchbox:latest -- -address=0.0.0.0:8080 -key-ring-path secrets/secring.gpg
|
||||
```
|
||||
|
||||
Run the Docker image with a test key.
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm --env MATCHBOX_PASSPHRASE=test -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z -v $PWD/matchbox/sign/fixtures:/secrets:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug -key-ring-path secrets/secring.gpg
|
||||
$ sudo docker run -p 8080:8080 --rm --env MATCHBOX_PASSPHRASE=test -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z -v $PWD/matchbox/sign/fixtures:/secrets:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -log-level=debug -key-ring-path secrets/secring.gpg
|
||||
```
|
||||
@@ -1,6 +1,6 @@
|
||||
# Container Linux Configs
|
||||
|
||||
A Container Linux Config is a YAML document which declares how Container Linux instances' disks should be provisioned on network boot and first-boot from disk. Configs can declare disk paritions, write files (regular files, systemd units, networkd units, etc.), and configure users. See the Container Linux Config [spec](https://coreos.com/os/docs/latest/configuration.html).
|
||||
A Container Linux Config is a YAML document which declares how Container Linux instances' disks should be provisioned on network boot and first-boot from disk. Configs can declare disk partitions, write files (regular files, systemd units, networkd units, etc.), and configure users. See the Container Linux Config [spec](https://coreos.com/os/docs/latest/configuration.html).
|
||||
|
||||
### Ignition
|
||||
|
||||
@@ -25,7 +25,7 @@ Container Linux Config templates can be added to the `/var/lib/matchbox/ignition
|
||||
|
||||
## Referencing in Profiles
|
||||
|
||||
Profiles can include a Container Linux Config for provisioning machines. Specify the Container Linux Config in a [Profile](matchbox.md#profiles) with `ignition_id`. When PXE booting, use the kernel option `coreos.first_boot=1` and `coreos.config.url` to point to the `matchbox` [Ignition endpoint](api.md#ignition-config).
|
||||
Profiles can include a Container Linux Config for provisioning machines. Specify the Container Linux Config in a [Profile](matchbox.md#profiles) with `ignition_id`. When PXE booting, use the kernel option `coreos.first_boot=1` and `coreos.config.url` to point to the `matchbox` [Ignition endpoint](api-http.md#ignition-config).
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -75,7 +75,7 @@ passwd:
|
||||
```
|
||||
<!-- {% endraw %} -->
|
||||
|
||||
The Ignition config response (formatted) to a query `/ignition?label=value` for a CoreOS instance supporting Ignition 2.0.0 would be:
|
||||
The Ignition config response (formatted) to a query `/ignition?label=value` for a Container Linux instance supporting Ignition 2.0.0 would be:
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -1,62 +1,55 @@
|
||||
# Installation
|
||||
|
||||
This guide walks through deploying the `matchbox` service on a Linux host (via RPM, rkt, docker, or binary) or on a Kubernetes cluster.
|
||||
This guide walks through deploying the `matchbox` service on a Linux host (via RPM, docker, or binary) or on a Kubernetes cluster.
|
||||
|
||||
## Provisoner
|
||||
|
||||
`matchbox` is a service for network booting and provisioning machines to create Container Linux clusters. `matchbox` should be installed on a provisioner machine (CoreOS or any Linux distribution) or cluster (Kubernetes) which can serve configs to client machines in a lab or datacenter.
|
||||
`matchbox` is a service for network booting and provisioning machines to create CoreOS Container Linux clusters. `matchbox` should be installed on a provisioner machine (Container Linux or any Linux distribution) or cluster (Kubernetes) which can serve configs to client machines in a lab or datacenter.
|
||||
|
||||
Choose one of the supported installation options:
|
||||
|
||||
* [CoreOS (rkt)](#coreos)
|
||||
* [CoreOS Container Linux](#coreos-container-linux)
|
||||
* [RPM-based](#rpm-based-distro)
|
||||
* [Generic Linux (binary)](#generic-linux)
|
||||
* [With rkt](#rkt)
|
||||
* [With docker](#docker)
|
||||
* [Kubernetes Service](#kubernetes)
|
||||
|
||||
## Download
|
||||
|
||||
Download the latest matchbox [release](https://github.com/coreos/matchbox/releases) to the provisioner host.
|
||||
Download the latest matchbox [release](https://github.com/poseidon/matchbox/releases) to the provisioner host.
|
||||
|
||||
```sh
|
||||
$ wget https://github.com/coreos/matchbox/releases/download/v0.6.0/matchbox-v0.6.0-linux-amd64.tar.gz
|
||||
$ wget https://github.com/coreos/matchbox/releases/download/v0.6.0/matchbox-v0.6.0-linux-amd64.tar.gz.asc
|
||||
$ wget https://github.com/poseidon/matchbox/releases/download/v0.8.0/matchbox-v0.8.0-linux-amd64.tar.gz
|
||||
$ wget https://github.com/poseidon/matchbox/releases/download/v0.8.0/matchbox-v0.8.0-linux-amd64.tar.gz.asc
|
||||
```
|
||||
|
||||
Verify the release has been signed by the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/).
|
||||
Verify the release has been signed by Dalton Hubble's GPG [Key](https://keyserver.ubuntu.com/pks/lookup?search=0x8F515AD1602065C8&op=vindex)'s signing subkey.
|
||||
|
||||
```sh
|
||||
$ gpg --keyserver pgp.mit.edu --recv-key 18AD5014C99EF7E3BA5F6CE950BDD3E0FC8A365E
|
||||
$ gpg --verify matchbox-v0.6.0-linux-amd64.tar.gz.asc matchbox-v0.6.0-linux-amd64.tar.gz
|
||||
# gpg: Good signature from "CoreOS Application Signing Key <security@coreos.com>"
|
||||
$ gpg --keyserver keyserver.ubuntu.com --recv-key 2E3D92BF07D9DDCCB3BAE4A48F515AD1602065C8
|
||||
$ gpg --verify matchbox-v0.8.0-linux-amd64.tar.gz.asc matchbox-v0.8.0-linux-amd64.tar.gz
|
||||
gpg: Good signature from "Dalton Hubble <dghubble@gmail.com>"
|
||||
```
|
||||
|
||||
Untar the release.
|
||||
|
||||
```sh
|
||||
$ tar xzvf matchbox-v0.6.0-linux-amd64.tar.gz
|
||||
$ cd matchbox-v0.6.0-linux-amd64
|
||||
$ tar xzvf matchbox-v0.8.0-linux-amd64.tar.gz
|
||||
$ cd matchbox-v0.8.0-linux-amd64
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
### RPM-based distro
|
||||
|
||||
On an RPM-based provisioner, install the `matchbox` RPM from the Copr [repository](https://copr.fedorainfracloud.org/coprs/g/CoreOS/matchbox/) using `dnf` or `yum`.
|
||||
On an RPM-based provisioner (Fedora 24+), install the `matchbox` RPM from the Copr [repository](https://copr.fedorainfracloud.org/coprs/g/CoreOS/matchbox/) using `dnf`.
|
||||
|
||||
```sh
|
||||
dnf copr enable @CoreOS/matchbox
|
||||
dnf install matchbox
|
||||
```
|
||||
|
||||
### CoreOS
|
||||
|
||||
On a CoreOS provisioner, rkt run `matchbox` image with the provided systemd unit.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-on-coreos.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
RPMs are not currently available for CentOS and RHEL (due to Go version). CentOS and RHEL users should follow the Generic Linux section below.
|
||||
|
||||
### Generic Linux
|
||||
|
||||
@@ -81,7 +74,7 @@ $ sudo chown -R matchbox:matchbox /var/lib/matchbox
|
||||
Copy the provided `matchbox` systemd unit file.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-local.service /etc/systemd/system/
|
||||
$ sudo cp contrib/systemd/matchbox-local.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
## Customization
|
||||
@@ -110,12 +103,6 @@ Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
|
||||
```
|
||||
|
||||
The Tectonic [Installer](https://tectonic.com/enterprise/docs/latest/install/bare-metal/index.html) uses this API. Tectonic users with a CoreOS provisioner can start with an example that enables it.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-for-tectonic.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
Customize `matchbox` to suit your preferences.
|
||||
|
||||
## Firewall
|
||||
@@ -127,31 +114,45 @@ $ sudo firewall-cmd --zone=MYZONE --add-port=8080/tcp --permanent
|
||||
$ sudo firewall-cmd --zone=MYZONE --add-port=8081/tcp --permanent
|
||||
```
|
||||
|
||||
## Generate TLS credentials
|
||||
## Generate TLS Certificates
|
||||
|
||||
*Skip this unless you need to enable the gRPC API*
|
||||
The Matchbox gRPC API allows clients (terraform-provider-matchbox) to create and update Matchbox resources. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
|
||||
|
||||
The `matchbox` gRPC API allows client apps (terraform-provider-matchbox, Tectonic Installer, etc.) to update how machines are provisioned. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
|
||||
The `cert-gen` helper script generates a self-signed CA, server certificate, and client certificate. **Prefer your organization's PKI, if possible**
|
||||
|
||||
If your organization manages public key infrastructure and a certificate authority, create a server certificate and key for the `matchbox` service and a client certificate and key for each client tool.
|
||||
|
||||
Otherwise, generate a self-signed `ca.crt`, a server certificate (`server.crt`, `server.key`), and client credentials (`client.crt`, `client.key`) with the `examples/etc/matchbox/cert-gen` script. Export the DNS name or IP (discouraged) of the provisioner host.
|
||||
Navigate to the `scripts/tls` directory.
|
||||
|
||||
```sh
|
||||
$ cd scripts/tls
|
||||
```
|
||||
|
||||
Export `SAN` to set the Subject Alt Names which should be used in certificates. Provide the fully qualified domain name or IP (discouraged) where Matchbox will be installed.
|
||||
|
||||
```sh
|
||||
# DNS or IP Subject Alt Names where matchbox runs
|
||||
$ export SAN=DNS.1:matchbox.example.com,IP.1:172.18.0.2
|
||||
```
|
||||
|
||||
Generate a `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`.
|
||||
|
||||
```sh
|
||||
$ cd examples/etc/matchbox
|
||||
# DNS or IP Subject Alt Names where matchbox can be reached
|
||||
$ export SAN=DNS.1:matchbox.example.com,IP.1:192.168.1.42
|
||||
$ ./cert-gen
|
||||
```
|
||||
|
||||
Place the TLS credentials in the default location:
|
||||
Move TLS credentials to the matchbox server's default location.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/matchbox
|
||||
$ sudo cp ca.crt server.crt server.key /etc/matchbox/
|
||||
$ sudo cp ca.crt server.crt server.key /etc/matchbox
|
||||
$ sudo chown -R matchbox:matchbox /etc/matchbox
|
||||
```
|
||||
|
||||
Save `client.crt`, `client.key`, and `ca.crt` to use with a client tool later.
|
||||
Save `client.crt`, `client.key`, and `ca.crt` for later use (e.g. `~/.matchbox`).
|
||||
|
||||
```sh
|
||||
$ mkdir -p ~/.matchbox
|
||||
$ cp client.crt client.key ca.crt ~/.matchbox/
|
||||
```
|
||||
|
||||
## Start matchbox
|
||||
|
||||
@@ -182,7 +183,7 @@ matchbox
|
||||
If you enabled the gRPC API,
|
||||
|
||||
```sh
|
||||
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert examples/etc/matchbox/client.crt -key examples/etc/matchbox/client.key
|
||||
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert scripts/tls/client.crt -key scripts/tls/client.key
|
||||
CONNECTED(00000003)
|
||||
depth=1 CN = fake-ca
|
||||
verify return:1
|
||||
@@ -196,14 +197,14 @@ Certificate chain
|
||||
....
|
||||
```
|
||||
|
||||
## Download CoreOS (optional)
|
||||
## Download Container Linux (optional)
|
||||
|
||||
`matchbox` can serve CoreOS images in development or lab environments to reduce bandwidth usage and increase the speed of CoreOS PXE boots and installs to disk.
|
||||
`matchbox` can serve Container Linux images in development or lab environments to reduce bandwidth usage and increase the speed of Container Linux PXE boots and installs to disk.
|
||||
|
||||
Download a recent CoreOS [release](https://coreos.com/releases/) with signatures.
|
||||
Download a recent Container Linux [release](https://coreos.com/releases/) with signatures.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 . # note the "." 3rd argument
|
||||
$ ./scripts/get-coreos stable 1967.3.0 . # note the "." 3rd argument
|
||||
```
|
||||
|
||||
Move the images to `/var/lib/matchbox/assets`,
|
||||
@@ -215,7 +216,7 @@ $ sudo cp -r coreos /var/lib/matchbox/assets
|
||||
```
|
||||
/var/lib/matchbox/assets/
|
||||
├── coreos
|
||||
│ └── 1298.7.0
|
||||
│ └── 1967.3.0
|
||||
│ ├── CoreOS_Image_Signing_Key.asc
|
||||
│ ├── coreos_production_image.bin.bz2
|
||||
│ ├── coreos_production_image.bin.bz2.sig
|
||||
@@ -228,34 +229,21 @@ $ sudo cp -r coreos /var/lib/matchbox/assets
|
||||
and verify the images are acessible.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080/assets/coreos/1298.7.0/
|
||||
$ curl http://matchbox.example.com:8080/assets/coreos/1967.3.0/
|
||||
<pre>...
|
||||
```
|
||||
|
||||
For large production environments, use a cache proxy or mirror suitable for your environment to serve CoreOS images. See [contrib/squid](../contrib/squid/README.md) for details.
|
||||
For large production environments, use a cache proxy or mirror suitable for your environment to serve Container Linux images.
|
||||
|
||||
## Network
|
||||
|
||||
Review [network setup](https://github.com/coreos/matchbox/blob/master/Documentation/network-setup.md) with your network administrator to set up DHCP, TFTP, and DNS services on your network. At a high level, your goals are to:
|
||||
Review [network setup](https://github.com/poseidon/matchbox/blob/master/docs/network-setup.md) with your network administrator to set up DHCP, TFTP, and DNS services on your network. At a high level, your goals are to:
|
||||
|
||||
* Chainload PXE firmwares to iPXE
|
||||
* Point iPXE client machines to the `matchbox` iPXE HTTP endpoint `http://matchbox.example.com:8080/boot.ipxe`
|
||||
* Ensure `matchbox.example.com` resolves to your `matchbox` deployment
|
||||
|
||||
CoreOS provides [dnsmasq](https://github.com/coreos/matchbox/tree/master/contrib/dnsmasq) as `quay.io/coreos/dnsmasq`, if you wish to use rkt or Docker.
|
||||
|
||||
## rkt
|
||||
|
||||
Run the container image with rkt.
|
||||
|
||||
latest or most recent tagged `matchbox` [release](https://github.com/coreos/matchbox/releases) ACI. Trust the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/) for image signature verification.
|
||||
|
||||
```sh
|
||||
$ mkdir -p /var/lib/matchbox/assets
|
||||
$ sudo rkt run --net=host --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=/var/lib/matchbox quay.io/coreos/matchbox:latest --mount volume=config,target=/etc/matchbox --volume config,kind=host,source=/etc/matchbox,readOnly=true -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
Create machine profiles, groups, or Ignition configs by adding files to `/var/lib/matchbox`.
|
||||
Poseidon provides [dnsmasq](https://github.com/poseidon/matchbox/tree/master/contrib/dnsmasq) as `quay.io/poseidon/dnsmasq`.
|
||||
|
||||
## Docker
|
||||
|
||||
@@ -263,7 +251,7 @@ Run the container image with docker.
|
||||
|
||||
```sh
|
||||
$ mkdir -p /var/lib/matchbox/assets
|
||||
$ sudo docker run --net=host --rm -v /var/lib/matchbox:/var/lib/matchbox:Z -v /etc/matchbox:/etc/matchbox:Z,ro quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
$ sudo docker run --net=host --rm -v /var/lib/matchbox:/var/lib/matchbox:Z -v /etc/matchbox:/etc/matchbox:Z,ro quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
Create machine profiles, groups, or Ignition configs by adding files to `/var/lib/matchbox`.
|
||||
@@ -292,7 +280,8 @@ Create an Ingress resource to expose the HTTP read-only and gRPC API endpoints.
|
||||
$ kubectl create -f contrib/k8s/matchbox-ingress.yaml
|
||||
$ kubectl get ingress
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
matchbox matchbox.example.com,matchbox-rpc.example.com 10.128.0.3,10... 80, 443 32m
|
||||
matchbox matchbox.example.com 10.128.0.3,10... 80 29m
|
||||
matchbox-rpc matchbox-rpc.example.com 10.128.0.3,10... 80, 443 29m
|
||||
```
|
||||
|
||||
Add DNS records `matchbox.example.com` and `matchbox-rpc.example.com` to route traffic to the Ingress Controller.
|
||||
@@ -304,6 +293,16 @@ $ curl http://matchbox.example.com
|
||||
$ openssl s_client -connect matchbox-rpc.example.com:443 -CAfile ca.crt -cert client.crt -key client.key
|
||||
```
|
||||
|
||||
# HTTPS - The read-only Matchbox API is also available with HTTPS
|
||||
|
||||
To start matchbox in this mode you will need the following flags set:
|
||||
|
||||
| Name | Type | Description |
|
||||
|----------------|--------|---------------------------------------------------------------|
|
||||
| -web-ssl | bool | true/false |
|
||||
| -web-cert-file | string | Path to the server TLS certificate file |
|
||||
| -web-key-file | string | Path to the server TLS key file |
|
||||
|
||||
### Operational notes
|
||||
|
||||
* Secrets: Matchbox **can** be run as a public facing service. However, you **must** follow best practices and avoid writing secret material into machine user-data. Instead, load secret materials from an internal secret store.
|
||||
@@ -18,13 +18,7 @@ $ make test
|
||||
|
||||
## Container image
|
||||
|
||||
Build an ACI `matchbox.aci`.
|
||||
|
||||
```sh
|
||||
$ make aci
|
||||
```
|
||||
|
||||
Alternately, build a Docker image `coreos/matchbox:latest`.
|
||||
Build a container image `coreos/matchbox:latest`.
|
||||
|
||||
```sh
|
||||
$ make docker-image
|
||||
@@ -34,7 +28,6 @@ $ make docker-image
|
||||
|
||||
```sh
|
||||
$ ./bin/matchbox -version
|
||||
$ sudo rkt --insecure-options=image run matchbox.aci -- -version
|
||||
$ sudo docker run coreos/matchbox:latest -version
|
||||
```
|
||||
## Run
|
||||
@@ -45,13 +38,7 @@ Run the binary.
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -log-level=debug -data-path examples -assets-path examples/assets
|
||||
```
|
||||
|
||||
Run the container image with rkt, on `metal0`.
|
||||
|
||||
```sh
|
||||
$ sudo rkt --insecure-options=image run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=config,target=/etc/matchbox --volume config,kind=host,source=$PWD/examples/etc/matchbox --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd matchbox.aci -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
Alternately, run the Docker image on `docker0`.
|
||||
Run the Docker image on `docker0`.
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
@@ -59,7 +46,7 @@ $ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD
|
||||
|
||||
## bootcmd
|
||||
|
||||
Run `bootcmd` against the gRPC API of the service running via rkt.
|
||||
Run `bootcmd` against the gRPC API of the service.
|
||||
|
||||
```sh
|
||||
$ ./bin/bootcmd profile list --endpoints 172.18.0.2:8081 --cacert examples/etc/matchbox/ca.crt
|
||||
@@ -67,10 +54,11 @@ $ ./bin/bootcmd profile list --endpoints 172.18.0.2:8081 --cacert examples/etc/m
|
||||
|
||||
## Vendor
|
||||
|
||||
Use `glide` and `glide-vc` to manage dependencies committed to the `vendor` directory.
|
||||
Add or update dependencies in `go.mod` and vendor.
|
||||
|
||||
```sh
|
||||
$ make vendor
|
||||
```
|
||||
make update
|
||||
make vendor
|
||||
```
|
||||
|
||||
## Codegen
|
||||
@@ -8,7 +8,7 @@ This guide covers releasing new versions of matchbox.
|
||||
Create a release commit which updates old version references.
|
||||
|
||||
```sh
|
||||
$ export VERSION=v0.6.0
|
||||
$ export VERSION=v0.8.0
|
||||
```
|
||||
|
||||
## Tag
|
||||
@@ -26,8 +26,7 @@ $ git push origin master
|
||||
Travis CI will build the Docker image and push it to Quay.io when the tag is pushed to master. Verify the new image and version.
|
||||
|
||||
```sh
|
||||
$ sudo docker run quay.io/coreos/matchbox:$VERSION -version
|
||||
$ sudo rkt run --no-store quay.io/coreos/matchbox:$VERSION -- -version
|
||||
$ sudo docker run quay.io/poseidon/matchbox:$VERSION -version
|
||||
```
|
||||
|
||||
## Github release
|
||||
@@ -45,19 +44,19 @@ $ make release
|
||||
Verify the reported version.
|
||||
|
||||
```
|
||||
./_output/matchbox-v0.6.0-linux-amd64/matchbox -version
|
||||
./_output/matchbox-v0.8.0-linux-amd64/matchbox -version
|
||||
```
|
||||
|
||||
## Signing
|
||||
|
||||
Sign the release tarballs and ACI with a [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/) subkey.
|
||||
Release tarballs are signed by Dalton Hubble's GPG [Key](/docs/deployment.md#download)
|
||||
|
||||
```sh
|
||||
cd _output
|
||||
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-amd64.tar.gz
|
||||
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-darwin-amd64.tar.gz
|
||||
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-arm.tar.gz
|
||||
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-arm64.tar.gz
|
||||
gpg2 --armor --detach-sign matchbox-$VERSION-linux-amd64.tar.gz
|
||||
gpg2 --armor --detach-sign matchbox-$VERSION-darwin-amd64.tar.gz
|
||||
gpg2 --armor --detach-sign matchbox-$VERSION-linux-arm.tar.gz
|
||||
gpg2 --armor --detach-sign matchbox-$VERSION-linux-arm64.tar.gz
|
||||
```
|
||||
|
||||
Verify the signatures.
|
||||
@@ -1,7 +1,6 @@
|
||||
|
||||
# Getting started with Docker
|
||||
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with Docker to network boot and provision a cluster of QEMU/KVM CoreOS machines locally. You'll be able to create Kubernetes clusters, etcd3 clusters, and test network setups.
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with Docker to network boot and provision a cluster of QEMU/KVM Container Linux machines locally. You'll be able to create Kubernetes clusters, etcd3 clusters, and test network setups.
|
||||
|
||||
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
@@ -19,20 +18,20 @@ $ # check Docker's docs to install Docker 1.8+ on Debian/Ubuntu
|
||||
$ sudo apt-get install virt-manager virtinst qemu-kvm
|
||||
```
|
||||
|
||||
Clone the [matchbox](https://github.com/coreos/matchbox) source which contains the examples and scripts.
|
||||
Clone the [matchbox](https://github.com/poseidon/matchbox) source which contains the examples and scripts.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/matchbox.git
|
||||
$ git clone https://github.com/poseidon/matchbox.git
|
||||
$ cd matchbox
|
||||
```
|
||||
|
||||
Download CoreOS image assets referenced by the `etcd-docker` [example](../examples) to `examples/assets`.
|
||||
Download CoreOS Container Linux image assets referenced by the `etcd3` [example](../examples) to `examples/assets`.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
|
||||
$ ./scripts/get-coreos stable 1967.3.0 ./examples/assets
|
||||
```
|
||||
|
||||
For development convenience, add `/etc/hosts` entries for nodes so they may be referenced by name as you would in production.
|
||||
For development convenience, add `/etc/hosts` entries for nodes so they may be referenced by name.
|
||||
|
||||
```sh
|
||||
# /etc/hosts
|
||||
@@ -44,11 +43,18 @@ For development convenience, add `/etc/hosts` entries for nodes so they may be r
|
||||
|
||||
## Containers
|
||||
|
||||
Run the latest `matchbox` Docker image from `quay.io/coreos/matchbox` with the `etcd-docker` example. The container should receive the IP address 172.17.0.2 on the `docker0` bridge.
|
||||
Run the `matchbox` and `dnsmasq` services on the `docker0` bridge. `dnsmasq` will run DHCP, DNS and TFTP services to create a suitable network boot environment. `matchbox` will serve configs to machines as they PXE boot.
|
||||
|
||||
The `devnet` convenience script can start these services and accepts the name of any example cluster in [examples](../examples).
|
||||
|
||||
```sh
|
||||
$ sudo docker pull quay.io/coreos/matchbox:latest
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd3:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo ./scripts/devnet create etcd3
|
||||
```
|
||||
|
||||
Inspect the logs.
|
||||
|
||||
```
|
||||
$ sudo ./scripts/devnet status
|
||||
```
|
||||
|
||||
Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of how machines are mapped to Profiles. Explore some endpoints exposed by the service, say for QEMU/KVM node1.
|
||||
@@ -57,28 +63,28 @@ Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of ho
|
||||
* Ignition [http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae)
|
||||
* Metadata [http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae)
|
||||
|
||||
## Network
|
||||
### Manual
|
||||
|
||||
Since the virtual network has no network boot services, use the `dnsmasq` image to create an iPXE network boot environment which runs DHCP, DNS, and TFTP.
|
||||
If you prefer to start the containers yourself, instead of using `devnet`,
|
||||
|
||||
```sh
|
||||
$ sudo docker run --name dnsmasq --cap-add=NET_ADMIN -v $PWD/contrib/dnsmasq/docker0.conf:/etc/dnsmasq.conf:Z quay.io/coreos/dnsmasq -d
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd3:/var/lib/matchbox/groups:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo docker run --name dnsmasq --cap-add=NET_ADMIN -v $PWD/contrib/dnsmasq/docker0.conf:/etc/dnsmasq.conf:Z quay.io/poseidon/dnsmasq -d
|
||||
```
|
||||
|
||||
In this case, dnsmasq runs a DHCP server allocating IPs to VMs between 172.17.0.43 and 172.17.0.99, resolves `matchbox.foo` to 172.17.0.2 (the IP where `matchbox` runs), and points iPXE clients to `http://matchbox.foo:8080/boot.ipxe`.
|
||||
|
||||
## Client VMs
|
||||
|
||||
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `docker0` bridge, where Docker's containers run.
|
||||
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `docker0` bridge, where Docker containers run.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-docker
|
||||
$ sudo ./scripts/libvirt create
|
||||
```
|
||||
|
||||
You can connect to the serial console of any node. If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
You can connect to the serial console of any node (ctrl+] to exit). If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
|
||||
```sh
|
||||
$ sudo virsh console node1
|
||||
$ ssh core@node1.example.com
|
||||
```
|
||||
|
||||
You can also use `virt-manager` to watch the console.
|
||||
@@ -101,7 +107,6 @@ The example profile added autologin so you can verify that etcd3 works between n
|
||||
|
||||
```sh
|
||||
$ systemctl status etcd-member
|
||||
$ ETCDCTL_API=3
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
```
|
||||
@@ -110,11 +115,10 @@ $ etcdctl get /message
|
||||
Clean up the containers and VM machines.
|
||||
|
||||
```sh
|
||||
$ sudo docker rm -f dnsmasq
|
||||
$ sudo ./scripts/libvirt poweroff
|
||||
$ sudo ./scripts/devnet destroy
|
||||
$ sudo ./scripts/libvirt destroy
|
||||
```
|
||||
|
||||
## Going further
|
||||
|
||||
Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](bootkube.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`.
|
||||
Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters.
|
||||
@@ -8,9 +8,9 @@ You'll install the `matchbox` service, setup a PXE network boot environment, and
|
||||
|
||||
Install `matchbox` on a dedicated server or Kubernetes cluster. Generate TLS credentials and enable the gRPC API as directed. Save the `ca.crt`, `client.crt`, and `client.key` on your local machine (e.g. `~/.matchbox`).
|
||||
|
||||
* Installing on [CoreOS / Linux distros](deployment.md)
|
||||
* Installing on [Container Linux / other distros](deployment.md)
|
||||
* Installing on [Kubernetes](deployment.md#kubernetes)
|
||||
* Running with [rkt](deployment.md#rkt) / [docker](deployment.md#docker)
|
||||
* Running with [docker](deployment.md#docker)
|
||||
|
||||
Verify the matchbox read-only HTTP endpoints are accessible.
|
||||
|
||||
@@ -30,26 +30,24 @@ $ openssl s_client -connect matchbox.example.com:8081 \
|
||||
|
||||
## Terraform
|
||||
|
||||
Install [Terraform][terraform-dl] v0.9+ on your system.
|
||||
Install [Terraform][terraform-dl] v0.11+ on your system.
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.9.2
|
||||
Terraform v0.11.13
|
||||
```
|
||||
|
||||
Add the `terraform-provider-matchbox` plugin binary on your system.
|
||||
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
|
||||
```sh
|
||||
$ wget https://github.com/coreos/terraform-provider-matchbox/releases/download/v0.1.0/terraform-provider-matchbox-v0.1.0-linux-amd64.tar.gz
|
||||
$ tar xzf terraform-provider-matchbox-v0.1.0-linux-amd64.tar.gz
|
||||
wget https://github.com/poseidon/terraform-provider-matchbox/releases/download/v0.2.3/terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
|
||||
mv terraform-provider-matchbox-v0.2.3-linux-amd64/terraform-provider-matchbox ~/.terraform.d/plugins/terraform-provider-matchbox_v0.2.3
|
||||
```
|
||||
|
||||
Add the plugin to your `~/.terraformrc`.
|
||||
|
||||
```hcl
|
||||
providers {
|
||||
matchbox = "/path/to/terraform-provider-matchbox"
|
||||
}
|
||||
```sh
|
||||
$ wget https://github.com/poseidon/terraform-provider-matchbox/releases/download/v0.2.3/terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
|
||||
$ tar xzf terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
## First cluster
|
||||
@@ -57,11 +55,11 @@ providers {
|
||||
Clone the matchbox source and take a look at the Terraform examples.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/matchbox.git
|
||||
$ git clone https://github.com/poseidon/matchbox.git
|
||||
$ cd matchbox/examples/terraform
|
||||
```
|
||||
|
||||
Let's start with the `simple-install` example. With `simple-install`, any machines which PXE boot from matchbox will install CoreOS to `dev/sda`, reboot, and have your SSH key set. Its not much of a cluster, but we'll get to that later.
|
||||
Let's start with the `simple-install` example. With `simple-install`, any machines which PXE boot from matchbox will install Container Linux to `dev/sda`, reboot, and have your SSH key set. Its not much of a cluster, but we'll get to that later.
|
||||
|
||||
```sh
|
||||
$ cd simple-install
|
||||
@@ -100,7 +98,7 @@ provider "matchbox" {
|
||||
|
||||
#### Profiles
|
||||
|
||||
Machine profiles specify the kernel, initrd, kernel args, Container Linux Config, Cloud-config, or other configs used to network boot and provision a bare-metal machine. This profile will PXE boot machines using the current stable Container Linux kernel and initrd (see [assets](api.md#assets) to learn about caching for speed) and supply a Container Linux Config specifying that a disk install and reboot should be performed. Learn more about [Container Linux configs](https://coreos.com/os/docs/latest/configuration.html).
|
||||
Machine profiles specify the kernel, initrd, kernel args, Container Linux Config, Cloud-config, or other configs used to network boot and provision a bare-metal machine. This profile will PXE boot machines using the current stable Container Linux kernel and initrd (see [assets](api-http.md#assets) to learn about caching for speed) and supply a Container Linux Config specifying that a disk install and reboot should be performed. Learn more about [Container Linux configs](https://coreos.com/os/docs/latest/configuration.html).
|
||||
|
||||
```hcl
|
||||
// Create a CoreOS-install profile
|
||||
@@ -122,9 +120,9 @@ resource "matchbox_profile" "coreos-install" {
|
||||
|
||||
#### Groups
|
||||
|
||||
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and template in machine-specific values. This group does not have a `selector` block, so any machines which network boot from matchbox will match this group and be provisioned using the `coreos-install` profile. Machines are matched to the most specific matching group.
|
||||
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and templates in machine-specific values. This group does not have a `selector` block, so any machines which network boot from matchbox will match this group and be provisioned using the `coreos-install` profile. Machines are matched to the most specific matching group.
|
||||
|
||||
```
|
||||
```hcl
|
||||
resource "matchbox_group" "default" {
|
||||
name = "default"
|
||||
profile = "${matchbox_profile.coreos-install.name}"
|
||||
@@ -169,7 +167,7 @@ Read [network-setup.md](network-setup.md) for the complete range of options. Net
|
||||
* May configure subnets, architectures, or specific machines to delegate to matchbox
|
||||
* May place matchbox behind a menu entry (timeout and default to matchbox)
|
||||
|
||||
If you've never setup a PXE-enabled network before or you're trying to setup a home lab, checkout the [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image [copy-paste examples](https://github.com/coreos/matchbox/blob/master/Documentation/network-setup.md#coreosdnsmasq) and see the section about [proxy-DHCP](https://github.com/coreos/matchbox/blob/master/Documentation/network-setup.md#proxy-dhcp).
|
||||
If you've never setup a PXE-enabled network before or you're trying to setup a home lab, checkout the [quay.io/poseidon/dnsmasq](https://quay.io/repository/poseidon/dnsmasq) container image [copy-paste examples](https://github.com/poseidon/matchbox/blob/master/docs/network-setup.md#poseidondnsmasq) and see the section about [proxy-DHCP](https://github.com/poseidon/matchbox/blob/master/docs/network-setup.md#proxy-dhcp).
|
||||
|
||||
## Boot
|
||||
|
||||
@@ -193,7 +191,7 @@ To re-provision the machine for another purpose, run `terraform apply` and PXE b
|
||||
|
||||
## Going Further
|
||||
|
||||
Matchbox can be used to provision multi-node Container Linux clusters at one or many on-premise sites if deployed in an HA way. Machines can be matched individually by MAC address, UUID, region, or other labels you choose. Installs can be made much faster by caching images in the built-in HTTP [assets](api.md#assets) server.
|
||||
Matchbox can be used to provision multi-node Container Linux clusters at one or many on-premise sites if deployed in an HA way. Machines can be matched individually by MAC address, UUID, region, or other labels you choose. Installs can be made much faster by caching images in the built-in HTTP [assets](api-http.md#assets) server.
|
||||
|
||||
[Container Linux configs](https://coreos.com/os/docs/latest/configuration.html) can be used to partition disks and filesystems, write systemd units, write networkd configs or regular files, and create users. Container Linux nodes can be provisioned into a system that meets your needs. Checkout the examples which create a 3 node [etcd](../examples/terraform/etcd3-install) cluster or a 3 node [Kubernetes](../examples/terraform/bootkube-install) cluster.
|
||||
|
||||
33
docs/grub.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# GRUB2 netboot
|
||||
|
||||
Use GRUB to network boot UEFI hardware.
|
||||
|
||||
## Requirements
|
||||
|
||||
For local development, install the dependencies for libvirt with UEFI.
|
||||
|
||||
* [UEFI with QEMU](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU)
|
||||
|
||||
Ensure that you've gone through the [matchbox with docker](getting-started-docker.md) and [matchbox](matchbox.md) guides and understand the basics.
|
||||
|
||||
## Containers
|
||||
|
||||
Run `matchbox` according to [matchbox with Docker](getting-started-docker.md), but mount the [grub](../examples/groups/grub) group example. Then start the `poseidon/dnsmasq` Docker image, which bundles a `grub.efi`.
|
||||
|
||||
```sh
|
||||
$ sudo docker run --rm --cap-add=NET_ADMIN quay.io/poseidon/dnsmasq -d -q --dhcp-range=172.17.0.43,172.17.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-match=set:efi-bc,option:client-arch,7 --dhcp-boot=tag:efi-bc,grub.efi --dhcp-userclass=set:grub,GRUB2 --dhcp-boot=tag:grub,"(http;matchbox.foo:8080)/grub","172.17.0.2" --log-queries --log-dhcp --dhcp-option=3,172.17.0.1 --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:pxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe --address=/matchbox.foo/172.17.0.2
|
||||
```
|
||||
|
||||
## Client VM
|
||||
|
||||
Create UEFI VM nodes which have known hardware attributes.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-uefi
|
||||
```
|
||||
|
||||
Create a VM to verify the machine network boots.
|
||||
|
||||
```sh
|
||||
$ sudo virt-install --name uefi-test --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
|
||||
```
|
||||
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 24 KiB |
|
Before Width: | Height: | Size: 130 KiB After Width: | Height: | Size: 130 KiB |
|
Before Width: | Height: | Size: 67 KiB After Width: | Height: | Size: 67 KiB |
|
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 69 KiB After Width: | Height: | Size: 69 KiB |
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
32
docs/index.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Matchbox
|
||||
|
||||
Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. Machines are matched by labels like MAC or UUID during PXE and profiles specify a kernel/initrd, iPXE config, and Container Linux or Fedora CoreOS config.
|
||||
|
||||
## Features
|
||||
|
||||
* Chainload via iPXE and match hardware labels
|
||||
* Provision Container Linux and Fedora CoreOS (powered by [Ignition](https://github.com/coreos/ignition))
|
||||
* Authenticated gRPC API for clients (e.g. Terraform)
|
||||
|
||||
## Installation
|
||||
|
||||
Matchbox can be installed from a binary or a container image.
|
||||
|
||||
* Install Matchbox on [Kubernetes](deployment.md#kubernetes), on a [Linux](deployment.md) host, or as a [container](deployment.md#docker)
|
||||
* Setup a PXE-enabled [network](network-setup.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
[Getting started](getting-started.md) provisioning machines with Container Linux.
|
||||
|
||||
* Local QEMU/KVM
|
||||
* [matchbox with Docker](getting-started-docker.md)
|
||||
* Clusters
|
||||
* [etcd3](getting-started-docker.md) - Install a 3-node etcd3 cluster
|
||||
* [etcd3](https://github.com/poseidon/matchbox/tree/master/examples/terraform/etcd3-install) - Install a 3-node etcd3 cluster (terraform-based)
|
||||
|
||||
## Related
|
||||
|
||||
* [dnsmasq](https://github.com/poseidon/matchbox/tree/master/contrib/dnsmasq) - container image to run DHCP, TFTP, and DNS services
|
||||
* [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) - Terraform provider plugin for Matchbox
|
||||
* [Typhoon](https://typhoon.psdn.io/) - minimal and free Kubernetes distribution, supporting bare-metal
|
||||
15
docs/machine-lifecycle.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Lifecycle of a physical machine
|
||||
|
||||
## About boot environment
|
||||
|
||||
Physical machines [network boot](network-booting.md) in an network boot environment with DHCP/TFTP/DNS services or with [poseidon/dnsmasq](../contrib/dnsmasq).
|
||||
|
||||
`matchbox` serves iPXE or GRUB configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS Container Linux. The "first boot" Ignition config if fetched and Container Linux is installed.
|
||||
|
||||
Container Linux boots ("first boot" from disk) and runs Ignition to provision its disk with systemd units, files, keys, and more to become a cluster node. Systemd units may fetch metadata from a remote source if needed.
|
||||
|
||||
Coordinated auto-updates are enabled. Systems like [fleet](https://coreos.com/docs/#fleet) or [Kubernetes](http://kubernetes.io/docs/) coordinate container services. IPMI, vendor utilities, or first-boot are used to re-provision machines into new roles.
|
||||
|
||||
## Machine lifecycle
|
||||
|
||||

|
||||
@@ -1,16 +1,15 @@
|
||||
# matchbox
|
||||
|
||||
`matchbox` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create Container Linux clusters. `matchbox` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use CoreOS Linux's early-boot capabilities to provision CoreOS machines.
|
||||
`matchbox` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create CoreOS Container Linux clusters. `matchbox` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use Container Linux's early-boot capabilities to provision Container Linux machines.
|
||||
|
||||
Network boot endpoints provide PXE, iPXE, GRUB support. `matchbox` can be deployed as a binary, as an [appc](https://github.com/appc/spec) container with rkt, or as a Docker container.
|
||||
Network boot endpoints provide PXE, iPXE, GRUB support. `matchbox` can be run a binary or as a container.
|
||||
|
||||

|
||||
|
||||
## Getting started
|
||||
|
||||
Get started running `matchbox` on your Linux machine, with rkt or Docker.
|
||||
Get started running `matchbox` on your Linux machine, with Docker.
|
||||
|
||||
* [matchbox with rkt](getting-started-rkt.md)
|
||||
* [matchbox with Docker](getting-started-docker.md)
|
||||
|
||||
## Flags
|
||||
@@ -19,8 +18,8 @@ See [configuration](config.md) flags and variables.
|
||||
|
||||
## API
|
||||
|
||||
* [HTTP API](api.md)
|
||||
* [gRPC API](https://godoc.org/github.com/coreos/matchbox/matchbox/client)
|
||||
* [HTTP API](api-http.md)
|
||||
* [gRPC API](https://godoc.org/github.com/poseidon/matchbox/matchbox/client)
|
||||
|
||||
## Data
|
||||
|
||||
@@ -59,13 +58,13 @@ Profiles reference an Ignition config, Cloud-Config, and/or generic config by na
|
||||
```json
|
||||
{
|
||||
"id": "etcd",
|
||||
"name": "CoreOS with etcd2",
|
||||
"name": "Container Linux with etcd2",
|
||||
"cloud_id": "",
|
||||
"ignition_id": "etcd.yaml",
|
||||
"generic_id": "some-service.cfg",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
@@ -75,11 +74,11 @@ Profiles reference an Ignition config, Cloud-Config, and/or generic config by na
|
||||
}
|
||||
```
|
||||
|
||||
The `"boot"` settings will be used to render configs to network boot programs such as iPXE, GRUB, or Pixiecore. You may reference remote kernel and initrd assets or [local assets](#assets).
|
||||
The `"boot"` settings will be used to render configs to network boot programs such as iPXE or GRUB. You may reference remote kernel and initrd assets or [local assets](#assets).
|
||||
|
||||
To use Ignition, set the `coreos.config.url` kernel option to reference the `matchbox` [Ignition endpoint](api.md#ignition-config), which will render the `ignition_id` file. Be sure to add the `coreos.first_boot` option as well.
|
||||
To use Ignition, set the `coreos.config.url` kernel option to reference the `matchbox` [Ignition endpoint](api-http.md#ignition-config), which will render the `ignition_id` file. Be sure to add the `coreos.first_boot` option as well.
|
||||
|
||||
To use cloud-config, set the `cloud-config-url` kernel option to reference the `matchbox` [Cloud-Config endpoint](api.md#cloud-config), which will render the `cloud_id` file.
|
||||
To use cloud-config, set the `cloud-config-url` kernel option to reference the `matchbox` [Cloud-Config endpoint](api-http.md#cloud-config), which will render the `cloud_id` file.
|
||||
|
||||
### Groups
|
||||
|
||||
@@ -173,14 +172,14 @@ matchbox.foo/assets/
|
||||
|
||||
For example, a `Profile` might refer to a local asset `/assets/coreos/VERSION/coreos_production_pxe.vmlinuz` instead of `http://stable.release.core-os.net/amd64-usr/VERSION/coreos_production_pxe.vmlinuz`.
|
||||
|
||||
See the [get-coreos](../scripts/README.md#get-coreos) script to quickly download, verify, and place CoreOS assets.
|
||||
See the [get-coreos](https://github.com/poseidon/matchbox/blob/master/scripts/get-coreos) script to quickly download, verify, and place Container Linux assets.
|
||||
|
||||
## Network
|
||||
|
||||
`matchbox` does not implement or exec a DHCP/TFTP server. Read [network setup](network-setup.md) or use the [coreos/dnsmasq](../contrib/dnsmasq) image if you need a quick DHCP, proxyDHCP, TFTP, or DNS setup.
|
||||
`matchbox` does not implement or exec a DHCP/TFTP server. Read [network setup](network-setup.md) or use the [poseidon/dnsmasq](../contrib/dnsmasq) image if you need a quick DHCP, proxyDHCP, TFTP, or DNS setup.
|
||||
|
||||
## Going further
|
||||
|
||||
* [gRPC API Usage](config.md#grpc-api)
|
||||
* [Metadata](api.md#metadata)
|
||||
* OpenPGP [Signing](api.md#openpgp-signatures)
|
||||
* [Metadata](api-http.md#metadata)
|
||||
* OpenPGP [Signing](api-http.md#openpgp-signatures)
|
||||
@@ -15,7 +15,7 @@ The network environment can be set up in a number of ways, which we'll discuss.
|
||||
|
||||
### Network boot programs
|
||||
|
||||
Machines can be booted and configured with CoreOS using several network boot programs and approaches. Let's review them. If you're new to network booting or unsure which to choose, iPXE is a reasonable and flexible choice.
|
||||
Machines can be booted and configured with CoreOS Container Linux using several network boot programs and approaches. Let's review them. If you're new to network booting or unsure which to choose, iPXE is a reasonable and flexible choice.
|
||||
|
||||
#### PXELINUX
|
||||
|
||||
@@ -26,7 +26,7 @@ $ mybootdir/pxelinux.cfg/b8945908-d6a6-41a9-611d-74a6ab80b83d
|
||||
$ mybootdir/pxelinux.cfg/default
|
||||
```
|
||||
|
||||
Here is an example PXE config file which boots a CoreOS image hosted on the TFTP server.
|
||||
Here is an example PXE config file which boots a Container Linux image hosted on the TFTP server.
|
||||
|
||||
```
|
||||
default coreos
|
||||
@@ -53,7 +53,7 @@ This approach has a number of drawbacks. TFTP can be slow, managing config files
|
||||
|
||||
A DHCPOFFER to iPXE client firmware specifies an HTTP boot script such as `http://matchbox.foo/boot.ipxe`.
|
||||
|
||||
Here is an example iPXE script for booting the remote CoreOS stable image.
|
||||
Here is an example iPXE script for booting the remote Container Linux stable image.
|
||||
|
||||
```
|
||||
#!ipxe
|
||||
@@ -66,7 +66,7 @@ boot
|
||||
|
||||
A TFTP server is used only to provide the `undionly.kpxe` boot program to older PXE firmware in order to bootstrap into iPXE.
|
||||
|
||||
CoreOS `matchbox` can render signed iPXE scripts to machines based on their hardware attributes. Setup involves configuring your DHCP server to point iPXE clients to the `matchbox` [iPXE endpoint](api.md#ipxe).
|
||||
CoreOS `matchbox` can render signed iPXE scripts to machines based on their hardware attributes. Setup involves configuring your DHCP server to point iPXE clients to the `matchbox` [iPXE endpoint](api-http.md#ipxe).
|
||||
|
||||
## DHCP
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Network setup
|
||||
|
||||
This guide shows how to create a DHCP/TFTP/DNS network boot environment to work with `matchbox` to boot and provision PXE, iPXE, or GRUB2 client machines.
|
||||
This guide shows how to create a DHCP/TFTP/DNS network boot environment to boot and provision BIOS/PXE, iPXE, or UEFI client machines.
|
||||
|
||||
`matchbox` serves iPXE scripts or GRUB configs over HTTP to serve as the entrypoint for CoreOS cluster bring-up. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, you can configure your own network services to point to `matchbox` or use the convenient [coreos/dnsmasq](../contrib/dnsmasq) container image (used in libvirt demos).
|
||||
Matchbox serves iPXE scripts over HTTP to serve as the entrypoint for provisioning clusters. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, configure your network environment to point to Matchbox or use the convenient [poseidon/dnsmasq](../contrib/dnsmasq) container image (used in local QEMU/KVM setup).
|
||||
|
||||
*Note*: These are just suggestions. Your network administrator or system administrator should choose the right network setup for your company.
|
||||
|
||||
@@ -13,13 +13,14 @@ Client hardware must have a network interface which supports PXE or iPXE.
|
||||
## Goals
|
||||
|
||||
* Add a DNS name which resolves to a `matchbox` deploy.
|
||||
* Chainload PXE firmware to iPXE or GRUB2
|
||||
* Point iPXE clients to `http://matchbox.foo:port/boot.ipxe`
|
||||
* Point GRUB clients to `http://matchbox.foo:port/grub`
|
||||
* Chainload BIOS clients (legacy PXE) to iPXE (undionly.kpxe)
|
||||
* Chainload UEFI clients to iPXE (ipxe.efi)
|
||||
* Point iPXE clients to `http://matchbox.example.com:port/boot.ipxe`
|
||||
* Point GRUB clients to `http://matchbox.example.com:port/grub`
|
||||
|
||||
## Setup
|
||||
|
||||
Many companies already have DHCP/TFTP configured to "PXE-boot" PXE/iPXE clients. In this case, machines (or a subset of machines) can be made to chainload from `chain http://matchbox.foo:port/boot.ipxe`. Older PXE clients can be made to chainload into iPXE or GRUB to be able to fetch subsequent configs via HTTP.
|
||||
Many companies already have DHCP/TFTP configured to "PXE-boot" PXE/iPXE clients. In this case, machines (or a subset of machines) can be made to chainload from `chain http://matchbox.example.com:port/boot.ipxe`. Older PXE clients can be made to chainload into iPXE to be able to fetch subsequent configs via HTTP.
|
||||
|
||||
On simpler networks, such as what a developer might have at home, a relatively inflexible DHCP server may be in place, with no TFTP server. In this case, a proxy DHCP server can be run alongside a non-PXE capable DHCP server.
|
||||
|
||||
@@ -27,21 +28,21 @@ This diagram can point you to the **right section(s)** of this document.
|
||||
|
||||

|
||||
|
||||
The setup of DHCP, TFTP, and DNS services on a network varies greatly. If you wish to use rkt or Docker to quickly run DHCP, proxyDHCP TFTP, or DNS services, use [coreos/dnsmasq](#coreosdnsmasq).
|
||||
The setup of DHCP, TFTP, and DNS services on a network varies greatly. If you wish to use Docker to quickly run DHCP, proxyDHCP TFTP, or DNS services, use [poseidon/dnsmasq](#poseidondnsmasq).
|
||||
|
||||
## DNS
|
||||
|
||||
Add a DNS entry (e.g. `matchbox.foo`, `provisoner.mycompany-internal`) that resolves to a deployment of the CoreOS `matchbox` service from machines you intend to boot and provision.
|
||||
Add a DNS entry (e.g. `matchbox.example.com`, `provisoner.mycompany-internal`) that resolves to a deployment of the CoreOS `matchbox` service from machines you intend to boot and provision.
|
||||
|
||||
```sh
|
||||
$ dig matchbox.foo
|
||||
$ dig matchbox.example.com
|
||||
```
|
||||
|
||||
If you deployed `matchbox` to a known IP address (e.g. dedicated host, load balanced endpoint, Kubernetes NodePort) and use `dnsmasq`, a domain name to IPv4/IPv6 address mapping could be added to the `/etc/dnsmasq.conf`.
|
||||
|
||||
```
|
||||
# dnsmasq.conf
|
||||
address=/matchbox.foo/172.18.0.2
|
||||
address=/matchbox.example.com/172.18.0.2
|
||||
```
|
||||
|
||||
## iPXE
|
||||
@@ -50,7 +51,7 @@ Networks which already run DHCP and TFTP services to network boot PXE/iPXE clien
|
||||
|
||||
```
|
||||
# /var/www/html/ipxe/default.ipxe
|
||||
chain http://matchbox.foo:8080/boot.ipxe
|
||||
chain http://matchbox.example.com:8080/boot.ipxe
|
||||
```
|
||||
|
||||
You can chainload from a menu entry or use other [iPXE commands](http://ipxe.org/cmd) if you need to do more than simple delegation.
|
||||
@@ -67,26 +68,35 @@ dhcp-range=192.168.1.1,192.168.1.254,30m
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
|
||||
# if request comes from older PXE ROM, chainload to iPXE (via TFTP)
|
||||
dhcp-boot=tag:!ipxe,undionly.kpxe
|
||||
# if request comes from iPXE user class, set tag "ipxe"
|
||||
# Legacy PXE
|
||||
dhcp-match=set:bios,option:client-arch,0
|
||||
dhcp-boot=tag:bios,undionly.kpxe
|
||||
|
||||
# UEFI
|
||||
dhcp-match=set:efi32,option:client-arch,6
|
||||
dhcp-boot=tag:efi32,ipxe.efi
|
||||
dhcp-match=set:efibc,option:client-arch,7
|
||||
dhcp-boot=tag:efibc,ipxe.efi
|
||||
dhcp-match=set:efi64,option:client-arch,9
|
||||
dhcp-boot=tag:efi64,ipxe.efi
|
||||
|
||||
# iPXE - chainload to matchbox ipxe boot script
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
# point ipxe tagged requests to the matchbox iPXE boot script (via HTTP)
|
||||
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
# verbose
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
# static DNS assignements
|
||||
address=/matchbox.foo/192.168.1.100
|
||||
address=/matchbox.example.com/192.168.1.100
|
||||
|
||||
# (optional) disable DNS and specify alternate
|
||||
# port=0
|
||||
# dhcp-option=6,192.168.1.100
|
||||
```
|
||||
|
||||
Add [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) (and undionly.kpxe.0 if using dnsmasq) to your tftp-root (e.g. `/var/lib/tftpboot`).
|
||||
Add [ipxe.efi](http://boot.ipxe.org/ipxe.efi) and [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) to your tftp-root (e.g. `/var/lib/tftpboot`).
|
||||
|
||||
```sh
|
||||
$ sudo systemctl start dnsmasq
|
||||
@@ -113,7 +123,7 @@ pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe
|
||||
# if request comes from iPXE user class, set tag "ipxe"
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
# point ipxe tagged requests to the matchbox iPXE boot script (via HTTP)
|
||||
pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.foo:8080/boot.ipxe
|
||||
pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
# verbose
|
||||
log-queries
|
||||
@@ -141,40 +151,33 @@ timeout 10
|
||||
default iPXE
|
||||
LABEL iPXE
|
||||
KERNEL ipxe.lkrn
|
||||
APPEND dhcp && chain http://matchbox.foo:8080/boot.ipxe
|
||||
APPEND dhcp && chain http://matchbox.example.com:8080/boot.ipxe
|
||||
```
|
||||
|
||||
Add ipxe.lkrn to `/var/lib/tftpboot` (see [iPXE docs](http://ipxe.org/embed)).
|
||||
|
||||
## coreos/dnsmasq
|
||||
## poseidon/dnsmasq
|
||||
|
||||
The [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image can run DHCP, TFTP, and DNS services via rkt or docker. The image bundles `undionly.kpxe` and `grub.efi` for convenience. See [contrib/dnsmasq](contrib/dnsmasq) for details.
|
||||
The [quay.io/poseidon/dnsmasq](https://quay.io/repository/poseidon/dnsmasq) container image can run DHCP, TFTP, and DNS services via docker. The image bundles `ipxe.efi`, `undionly.kpxe`, and `grub.efi` for convenience. See [contrib/dnsmasq](../contrib/dnsmasq) for details.
|
||||
|
||||
Run DHCP, TFTP, and DNS on the host's network:
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:#ipxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/poseidon/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:#ipxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example/192.168.1.2 \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
@@ -182,25 +185,15 @@ sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
Run a proxy-DHCP and TFTP service on the host's network:
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=192.168.1.1,proxy,255.255.255.0 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe \
|
||||
--pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/poseidon/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.1,proxy,255.255.255.0 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe \
|
||||
--pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe \
|
||||
--pxe-service=tag:#ipxe,X86-64_EFI,"PXE chainload to iPXE UEFI",ipxe.efi \
|
||||
--pxe-service=tag:ipxe,X86-64_EFI,"iPXE UEFI",http:///matchbox.example.com:8080/boot.ipxe \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
@@ -211,20 +204,19 @@ Be sure to allow enabled services in your firewall configuration.
|
||||
$ sudo firewall-cmd --add-service=dhcp --add-service=tftp --add-service=dns
|
||||
```
|
||||
|
||||
## GRUB
|
||||
## UEFI
|
||||
|
||||
Grub can be used to delegate as well.
|
||||
### Development
|
||||
|
||||
`grub-mknetdir --net-directory=/var/lib/tftpboot`
|
||||
Install the dependencies for [QEMU with UEFI](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU). Walk through the [getting-started-with-docker](getting-started-docker.md) tutorial. Launch client VMs using `create-uefi`.
|
||||
|
||||
/var/lib/tftpboot/boot/grub/grub.cfg:
|
||||
```ini
|
||||
insmod i386-pc/http.mod
|
||||
set root=http,matchbox.foo:8080
|
||||
configfile /grub
|
||||
Create UEFI QEMU/KVM VMs attached to the `docker0` bridge.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-uefi
|
||||
```
|
||||
|
||||
Make sure to replace variables in the example config files; instead of iPXE variables, use GRUB variables. Check the [GRUB2 manual](https://www.gnu.org/software/grub/manual/grub.html#Network).
|
||||
UEFI clients should chainload `ipxe.efi`, load iPXE and Ignition configs from Matchbox, and Container Linux should boot as usual.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
19
docs/troubleshooting.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Troubleshooting
|
||||
|
||||
## Firewall
|
||||
|
||||
Running DHCP or proxyDHCP with `poseidon/dnsmasq` on a host requires that the Firewall allow DHCP and TFTP (for chainloading) services to run.
|
||||
|
||||
## Port collision
|
||||
|
||||
Running DHCP or proxyDHCP can cause port already in use collisions depending on what's running. Fedora runs bootp listening on udp/67 for example. Find the service using the port.
|
||||
|
||||
```sh
|
||||
$ sudo lsof -i :67
|
||||
```
|
||||
|
||||
Evaluate whether you can configure the existing service or whether you'd like to stop it and test with `poseidon/dnsmasq`.
|
||||
|
||||
## No boot filename received
|
||||
|
||||
PXE client firmware did not receive a DHCP Offer with PXE-Options after several attempts. If you're using the `poseidon/dnsmasq` image with `-d`, each request should log to stdout. Using the wrong `-i` interface is the most common reason DHCP requests are not received. Otherwise, wireshark can be useful for investigating.
|
||||
@@ -8,27 +8,24 @@ These examples use [Terraform](https://www.terraform.io/intro/) as a client to M
|
||||
|
||||
| Name | Description |
|
||||
|-------------------------------|-------------------------------|
|
||||
| [simple-install](terraform/simple-install) | Install Container Linux with an SSH key |
|
||||
| [etcd3-install](terraform/etcd3-install) | Install a 3-node etcd3 cluster |
|
||||
| [bootkube-install](terraform/bootkube-install) | Install a 3-node self-hosted Kubernetes v1.6.4 cluster |
|
||||
| [simple-install](terraform/simple-install/) | Install Container Linux with an SSH key |
|
||||
| [etcd3-install](terraform/etcd3-install/) | Install a 3-node etcd3 cluster |
|
||||
|
||||
### Customization
|
||||
|
||||
You are encouraged to look through the examples and Terraform modules. Implement your own profiles or package them as modules to meet your needs. We've just provided a starting point. Learn more about [matchbox](../Documentation/matchbox.md) and [Container Linux configs](../Documentation/container-linux-config.md).
|
||||
You are encouraged to look through the examples and Terraform modules. Implement your own profiles or package them as modules to meet your needs. We've just provided a starting point. Learn more about [matchbox](../docs/matchbox.md) and [Container Linux configs](../docs/container-linux-config.md).
|
||||
|
||||
## Manual Examples
|
||||
|
||||
These examples mount raw Matchbox objects into a Matchbox server's `/var/lib/matchbox/` directory.
|
||||
|
||||
| Name | Description | CoreOS Version | FS | Docs |
|
||||
| Name | Description | CoreOS Container Linux Version | FS | Docs |
|
||||
|------------|-------------|----------------|----|-----------|
|
||||
| simple | CoreOS with autologin, using iPXE | stable/1298.7.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| simple-install | CoreOS Install, using iPXE | stable/1298.7.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| grub | CoreOS via GRUB2 Netboot | stable/1298.7.0 | RAM | NA |
|
||||
| etcd3 | PXE boot 3 node etcd3 cluster with proxies | stable/1298.7.0 | RAM | None |
|
||||
| etcd3-install | Install a 3 node etcd3 cluster to disk | stable/1298.7.0 | Disk | None |
|
||||
| bootkube | PXE boot a self-hosted Kubernetes v1.6.4 cluster | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| bootkube-install | Install a self-hosted Kubernetes v1.6.4 cluster | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| simple | CoreOS Container Linux with autologin, using iPXE | stable/1967.3.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| simple-install | CoreOS Container Linux Install, using iPXE | stable/1967.3.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| grub | CoreOS Container Linux via GRUB2 Netboot | stable/1967.3.0 | RAM | NA |
|
||||
| etcd3 | PXE boot a 3-node etcd3 cluster with proxies | stable/1967.3.0 | RAM | None |
|
||||
| etcd3-install | Install a 3-node etcd3 cluster to disk | stable/1967.3.0 | Disk | None |
|
||||
|
||||
### Customization
|
||||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
|
||||
## gRPC API Credentials
|
||||
|
||||
Create FAKE TLS credentials for running the `matchbox` gRPC API examples.
|
||||
|
||||
**DO NOT** use these certificates for anything other than running `matchbox` examples. Use your organization's production PKI for production deployments.
|
||||
|
||||
Navigate to the example directory which will be mounted as `/etc/matchbox` in examples:
|
||||
|
||||
cd matchbox/examples/etc/matchbox
|
||||
|
||||
Set certificate subject alt names which should be used by exporting `SAN`. Use the DNS name or IP at which `matchbox` is hosted.
|
||||
|
||||
# for examples on metal0 or docker0 bridges
|
||||
export SAN=IP.1:127.0.0.1,IP.2:172.18.0.2
|
||||
|
||||
# production example
|
||||
export SAN=DNS.1:matchbox.example.com
|
||||
|
||||
Create a fake `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`. Type 'Y' when prompted.
|
||||
|
||||
$ ./cert-gen
|
||||
Creating FAKE CA, server cert/key, and client cert/key...
|
||||
...
|
||||
...
|
||||
...
|
||||
******************************************************************
|
||||
WARNING: Generated TLS credentials are ONLY SUITABLE FOR EXAMPLES!
|
||||
Use your organization's production PKI for production deployments!
|
||||
|
||||
## Inpsect
|
||||
|
||||
Inspect the generated FAKE certificates if desired.
|
||||
|
||||
openssl x509 -noout -text -in ca.crt
|
||||
openssl x509 -noout -text -in server.crt
|
||||
openssl x509 -noout -text -in client.crt
|
||||
|
||||
## Verify
|
||||
|
||||
Verify that the FAKE server and client certificates were signed by the fake CA.
|
||||
|
||||
openssl verify -CAfile ca.crt server.crt
|
||||
openssl verify -CAfile ca.crt client.crt
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "coreos-install",
|
||||
"name": "CoreOS Install",
|
||||
"name": "CoreOS Container Linux Install",
|
||||
"profile": "install-reboot",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1298.7.0",
|
||||
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
|
||||
"baseurl": "http://matchbox.foo:8080/assets/coreos"
|
||||
"coreos_version": "1967.3.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,11 +8,12 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380",
|
||||
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
|
||||
"etcd_name": "node1",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,10 +8,9 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node2.example.com",
|
||||
"etcd_endpoints": "node1.example.com:2379",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,10 +8,9 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node3.example.com",
|
||||
"etcd_endpoints": "node1.example.com:2379",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380",
|
||||
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
|
||||
"etcd_name": "node1",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node2.example.com",
|
||||
"etcd_endpoints": "node1.example.com:2379",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node3.example.com",
|
||||
"etcd_endpoints": "node1.example.com:2379",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "coreos-install",
|
||||
"name": "CoreOS Install",
|
||||
"name": "CoreOS Container Linux Install",
|
||||
"profile": "install-reboot",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1298.7.0",
|
||||
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
|
||||
"baseurl": "http://matchbox.foo:8080/assets/coreos"
|
||||
"coreos_version": "1967.3.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "GRUB CoreOS alpha",
|
||||
"name": "GRUB CoreOS Container Linux alpha",
|
||||
"profile": "grub"
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "install",
|
||||
"name": "Simple CoreOS Alpha Install",
|
||||
"name": "Simple CoreOS Container Linux Install",
|
||||
"profile": "simple-install",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1298.7.0",
|
||||
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
|
||||
"baseurl": "http://matchbox.foo:8080/assets/coreos"
|
||||
"coreos_version": "1967.3.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"id": "simple",
|
||||
"name": "Simple CoreOS Alpha",
|
||||
"name": "Simple CoreOS Container Linux Alpha",
|
||||
"profile": "simple",
|
||||
"selector": {
|
||||
"os": "installed"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "Simple CoreOS Alpha with RAM disk",
|
||||
"name": "Simple CoreOS Container Linux Alpha with RAM disk",
|
||||
"profile": "simple"
|
||||
}
|
||||
|
||||
@@ -1,168 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
|
||||
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
|
||||
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log"
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--anonymous-auth=false \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes control plane with a temp api-server
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
storage:
|
||||
{{ if index . "pxe" }}
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: root
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
{{.domain_name}}
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
|
||||
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.4.4}"
|
||||
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$RKT_OPTS \
|
||||
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
{{ range $element := .ssh_authorized_keys }}
|
||||
- {{$element}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
@@ -1,131 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
ExecStart=
|
||||
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
|
||||
--listen-addr=127.0.0.1:2379 \
|
||||
--endpoints={{.etcd_endpoints}}
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log"
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--anonymous-auth=false \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
storage:
|
||||
{{ if index . "pxe" }}
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: root
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
{{.domain_name}}
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
{{ range $element := .ssh_authorized_keys }}
|
||||
- {{$element}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
@@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
ExecStart=
|
||||
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
|
||||
--listen-addr=127.0.0.1:2379 \
|
||||
|
||||
@@ -7,7 +7,7 @@ systemd:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
|
||||
|
||||
@@ -20,7 +20,7 @@ storage:
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash -ex
|
||||
curl --fail "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
curl --retry 10 --fail "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}}
|
||||
udevadm settle
|
||||
systemctl reboot
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"id": "bootkube-controller",
|
||||
"name": "bootkube Ready Controller",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"root=/dev/sda1",
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "bootkube-controller.yaml"
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"id": "bootkube-worker",
|
||||
"name": "bootkube Ready Worker",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"root=/dev/sda1",
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "bootkube-worker.yaml"
|
||||
}
|
||||
@@ -2,10 +2,11 @@
|
||||
"id": "etcd3-gateway",
|
||||
"name": "etcd3-gateway",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
|
||||
@@ -2,10 +2,11 @@
|
||||
"id": "etcd3",
|
||||
"name": "etcd3",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
@@ -13,4 +14,4 @@
|
||||
]
|
||||
},
|
||||
"ignition_id": "etcd3.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "grub",
|
||||
"name": "CoreOS via GRUB2",
|
||||
"name": "CoreOS Container Linux via GRUB2",
|
||||
"boot": {
|
||||
"kernel": "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "(http;matchbox.example.com:8080)/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["(http;matchbox.example.com:8080)/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
@@ -13,4 +13,4 @@
|
||||
]
|
||||
},
|
||||
"ignition_id": "ssh.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
{
|
||||
"id": "install-reboot",
|
||||
"name": "Install CoreOS and Reboot",
|
||||
"name": "Install CoreOS Container Linux and Reboot",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
@@ -13,4 +14,4 @@
|
||||
]
|
||||
},
|
||||
"ignition_id": "install-reboot.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
{
|
||||
"id": "simple-install",
|
||||
"name": "Simple CoreOS Alpha Install",
|
||||
"name": "Simple CoreOS Container Linux Alpha Install",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
@@ -13,4 +14,4 @@
|
||||
]
|
||||
},
|
||||
"ignition_id": "install-reboot.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
{
|
||||
"id": "simple",
|
||||
"name": "Simple CoreOS Alpha",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "ssh.yaml"
|
||||
}
|
||||
"id": "simple",
|
||||
"name": "Simple CoreOS Container Linux Alpha",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": [
|
||||
"/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"
|
||||
],
|
||||
"args": [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "ssh.yaml"
|
||||
}
|
||||
|
||||
@@ -1,146 +0,0 @@
|
||||
# Self-hosted Kubernetes
|
||||
|
||||
The self-hosted Kubernetes example shows how to use matchbox to network boot and provision a 3 node "self-hosted" Kubernetes v1.6.4 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting.
|
||||
|
||||
## Requirements
|
||||
|
||||
Follow the getting started [tutorial](../../../Documentation/getting-started.md) to learn about matchbox and set up an environment that meets the requirements:
|
||||
|
||||
* Matchbox v0.6+ [installation](../../../Documentation/deployment.md) with gRPC API enabled
|
||||
* Matchbox provider credentials `client.crt`, `client.key`, and `ca.crt`
|
||||
* PXE [network boot](../../../Documentation/network-setup.md) environment
|
||||
* Terraform v0.9+ and [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) installed locally on your system
|
||||
* Machines with known DNS names and MAC addresses
|
||||
|
||||
If you prefer to provision QEMU/KVM VMs on your local Linux machine, set up the matchbox [development environment](../../../Documentation/getting-started-rkt.md).
|
||||
|
||||
```sh
|
||||
sudo ./scripts/devnet create
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Clone the [matchbox](https://github.com/coreos/matchbox) project and take a look at the cluster examples.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/matchbox.git
|
||||
$ cd matchbox/examples/terraform/bootkube-install
|
||||
```
|
||||
|
||||
Copy the `terraform.tfvars.example` file to `terraform.tfvars`. Ensure `provider.tf` references your matchbox credentials.
|
||||
|
||||
```hcl
|
||||
matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
|
||||
cluster_name = "demo"
|
||||
container_linux_version = "1298.7.0"
|
||||
container_linux_channel = "stable"
|
||||
ssh_authorized_key = "ADD ME"
|
||||
```
|
||||
|
||||
Provide an ordered list of controller names, MAC addresses, and domain names. Provide an ordered list of worker names, MAC addresses, and domain names.
|
||||
|
||||
```
|
||||
controller_names = ["node1"]
|
||||
controller_macs = ["52:54:00:a1:9c:ae"]
|
||||
controller_domains = ["node1.example.com"]
|
||||
worker_names = ["node2", "node3"]
|
||||
worker_macs = ["52:54:00:b2:2f:86", "52:54:00:c3:61:77"]
|
||||
worker_domains = ["node2.example.com", "node3.example.com"]
|
||||
```
|
||||
|
||||
Finally, provide an `assets_dir` for generated manifests and a DNS name which you've setup to resolves to controller(s) (e.g. round-robin). Worker nodes and your kubeconfig will communicate via this endpoint.
|
||||
|
||||
```
|
||||
k8s_domain_name = "cluster.example.com"
|
||||
asset_dir = "assets"
|
||||
```
|
||||
|
||||
You may set `experimental_self_hosted_etcd = "true"` to deploy "self-hosted" etcd atop Kubernetes instead of running etcd on hosts directly. Warning, this is experimental and potentially dangerous.
|
||||
|
||||
## Apply
|
||||
|
||||
Fetch the [bootkube](../README.md#modules) Terraform [module](https://www.terraform.io/docs/modules/index.html) for bare-metal, which is maintained in the in the matchbox repo.
|
||||
|
||||
```sh
|
||||
$ terraform get
|
||||
```
|
||||
|
||||
Plan and apply to create the resources on Matchbox.
|
||||
|
||||
```sh
|
||||
$ terraform plan
|
||||
Plan: 37 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
Terraform will configure matchbox with profiles (e.g. `cached-container-linux-install`, `bootkube-controller`, `bootkube-worker`) and add groups to match machines by MAC address to a profile. These resources declare that each machine should PXE boot and install Container Linux to disk. `node1` will provision itself as a controller, while `node2` and `noe3` provision themselves as workers.
|
||||
|
||||
The module referenced in `cluster.tf` will also generate bootkube assets to `assets_dir` (exactly like the [bootkube](https://github.com/kubernetes-incubator/bootkube) binary would). These assets include Kubernetes bootstrapping and control plane manifests as well as a kubeconfig you can use to access the cluster.
|
||||
|
||||
```sh
|
||||
$ terraform apply
|
||||
module.cluster.null_resource.copy-kubeconfig.0: Still creating... (5m0s elapsed)
|
||||
module.cluster.null_resource.copy-kubeconfig.1: Still creating... (5m0s elapsed)
|
||||
module.cluster.null_resource.copy-kubeconfig.2: Still creating... (5m0s elapsed)
|
||||
...
|
||||
module.cluster.null_resource.bootkube-start: Still creating... (8m40s elapsed)
|
||||
...
|
||||
Apply complete! Resources: 37 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
|
||||
You can now move on to the "Machines" section. Apply will loop until it can successfully copy the kubeconfig to each node and start the one-time Kubernetes bootstrapping process on a controller. In practice, you may see `apply` fail if it connects before the disk install has completed. Run terraform apply until it reconciles successfully.
|
||||
|
||||
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
|
||||
|
||||
## Machines
|
||||
|
||||
Power on each machine (with PXE boot device on next boot). Machines should network boot, install Container Linux to disk, reboot, and provision themselves as bootkube controllers or workers.
|
||||
|
||||
```sh
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS chassis bootdev pxe
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS power on
|
||||
```
|
||||
|
||||
For local QEMU/KVM development, create the QEMU/KVM VMs.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create
|
||||
$ sudo ./scripts/libvirt [start|reboot|shutdown|poweroff|destroy]
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the apiserver, scheduler, and controller-manager are running as pods.
|
||||
|
||||
```sh
|
||||
$ KUBECONFIG=assets/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE
|
||||
node1.example.com Ready 3m
|
||||
node2.example.com Ready 3m
|
||||
node3.example.com Ready 3m
|
||||
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system checkpoint-installer-p8g8r 1/1 Running 1 13m
|
||||
kube-system kube-apiserver-s5gnx 1/1 Running 1 41s
|
||||
kube-system kube-controller-manager-3438979800-jrlnd 1/1 Running 1 13m
|
||||
kube-system kube-controller-manager-3438979800-tkjx7 1/1 Running 1 13m
|
||||
kube-system kube-dns-4101612645-xt55f 4/4 Running 4 13m
|
||||
kube-system kube-flannel-pl5c2 2/2 Running 0 13m
|
||||
kube-system kube-flannel-r9t5r 2/2 Running 3 13m
|
||||
kube-system kube-flannel-vfb0s 2/2 Running 4 13m
|
||||
kube-system kube-proxy-cvhmj 1/1 Running 0 13m
|
||||
kube-system kube-proxy-hf9mh 1/1 Running 1 13m
|
||||
kube-system kube-proxy-kpl73 1/1 Running 1 13m
|
||||
kube-system kube-scheduler-694795526-1l23b 1/1 Running 1 13m
|
||||
kube-system kube-scheduler-694795526-fks0b 1/1 Running 1 13m
|
||||
kube-system pod-checkpointer-node1.example.com 1/1 Running 2 10m
|
||||
```
|
||||
|
||||
Try restarting machines or deleting pods to see that the cluster is resilient to failures.
|
||||
|
||||
## Going Further
|
||||
|
||||
Learn more about [matchbox](../../../Documentation/matchbox.md) or explore the other [example](../) clusters.
|
||||
@@ -1,27 +0,0 @@
|
||||
// Self-hosted Kubernetes cluster
|
||||
module "cluster" {
|
||||
source = "../modules/bootkube"
|
||||
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
|
||||
# Machines
|
||||
controller_names = "${var.controller_names}"
|
||||
controller_macs = "${var.controller_macs}"
|
||||
controller_domains = "${var.controller_domains}"
|
||||
worker_names = "${var.worker_names}"
|
||||
worker_macs = "${var.worker_macs}"
|
||||
worker_domains = "${var.worker_domains}"
|
||||
|
||||
# bootkube assets
|
||||
k8s_domain_name = "${var.k8s_domain_name}"
|
||||
asset_dir = "${var.asset_dir}"
|
||||
|
||||
# Optional
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
experimental_self_hosted_etcd = "${var.experimental_self_hosted_etcd}"
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
// Configure the matchbox provider
|
||||
provider "matchbox" {
|
||||
endpoint = "${var.matchbox_rpc_endpoint}"
|
||||
client_cert = "${file("~/.matchbox/client.crt")}"
|
||||
client_key = "${file("~/.matchbox/client.key")}"
|
||||
ca = "${file("~/.matchbox/ca.crt")}"
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
# ssh_authorized_key = "ADD ME"
|
||||
|
||||
cluster_name = "example"
|
||||
container_linux_version = "1298.7.0"
|
||||
container_linux_channel = "stable"
|
||||
|
||||
# Machines
|
||||
controller_names = ["node1"]
|
||||
controller_macs = ["52:54:00:a1:9c:ae"]
|
||||
controller_domains = ["node1.example.com"]
|
||||
worker_names = ["node2", "node3"]
|
||||
worker_macs = ["52:54:00:b2:2f:86", "52:54:00:c3:61:77"]
|
||||
worker_domains = ["node2.example.com", "node3.example.com"]
|
||||
|
||||
# Bootkube
|
||||
k8s_domain_name = "cluster.example.com"
|
||||
asset_dir = "assets"
|
||||
|
||||
# Optional
|
||||
# container_linux_oem = ""
|
||||
# experimental_self_hosted_etcd = "true"
|
||||
@@ -1,94 +0,0 @@
|
||||
variable "matchbox_http_endpoint" {
|
||||
type = "string"
|
||||
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
||||
}
|
||||
|
||||
variable "matchbox_rpc_endpoint" {
|
||||
type = "string"
|
||||
description = "Matchbox gRPC API endpoint, without the protocol (e.g. matchbox.example.com:8081)"
|
||||
}
|
||||
|
||||
variable "container_linux_channel" {
|
||||
type = "string"
|
||||
description = "Container Linux channel corresponding to the container_linux_version"
|
||||
}
|
||||
|
||||
variable "container_linux_version" {
|
||||
type = "string"
|
||||
description = "Container Linux version of the kernel/initrd to PXE or the image to install"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = "string"
|
||||
description = "Cluster name"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
description = "SSH public key to set as an authorized_key on machines"
|
||||
}
|
||||
|
||||
# Machines
|
||||
# Terraform's crude "type system" does properly support lists of maps so we do this.
|
||||
|
||||
variable "controller_names" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "controller_macs" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "controller_domains" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_names" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_macs" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_domains" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
# bootkube assets
|
||||
|
||||
variable "k8s_domain_name" {
|
||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IP range to assign Kubernetes pods"
|
||||
type = "string"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
description = <<EOD
|
||||
CIDR IP range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
|
||||
EOD
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "container_linux_oem" {
|
||||
type = "string"
|
||||
default = ""
|
||||
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||
}
|
||||
|
||||
variable "experimental_self_hosted_etcd" {
|
||||
default = "false"
|
||||
description = "Create self-hosted etcd cluster as pods on Kubernetes, instead of on-hosts"
|
||||
}
|
||||
@@ -4,15 +4,15 @@ The `etcd3-install` example shows how to use matchbox to network boot and provis
|
||||
|
||||
## Requirements
|
||||
|
||||
Follow the getting started [tutorial](../../../Documentation/getting-started.md) to learn about matchbox and set up an environment that meets the requirements:
|
||||
Follow the getting started [tutorial](../../../docs/getting-started.md) to learn about matchbox and set up an environment that meets the requirements:
|
||||
|
||||
* Matchbox v0.6+ [installation](../../../Documentation/deployment.md) with gRPC API enabled
|
||||
* Matchbox v0.6+ [installation](../../../docs/deployment.md) with gRPC API enabled
|
||||
* Matchbox provider credentials `client.crt`, `client.key`, and `ca.crt`
|
||||
* PXE [network boot](../../../Documentation/network-setup.md) environment
|
||||
* Terraform v0.9+ and [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) installed locally on your system
|
||||
* PXE [network boot](../../../docs/network-setup.md) environment
|
||||
* Terraform v0.9+ and [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) installed locally on your system
|
||||
* 3 machines with known DNS names and MAC addresses
|
||||
|
||||
If you prefer to provision QEMU/KVM VMs on your local Linux machine, set up the matchbox [development environment](../../../Documentation/getting-started-rkt.md).
|
||||
If you prefer to provision QEMU/KVM VMs on your local Linux machine, set up the matchbox [development environment](../../../docs/getting-started-docker.md).
|
||||
|
||||
```sh
|
||||
sudo ./scripts/devnet create
|
||||
@@ -20,10 +20,10 @@ sudo ./scripts/devnet create
|
||||
|
||||
## Usage
|
||||
|
||||
Clone the [matchbox](https://github.com/coreos/matchbox) project and take a look at the cluster examples.
|
||||
Clone the [matchbox](https://github.com/poseidon/matchbox) project and take a look at the cluster examples.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/matchbox.git
|
||||
$ git clone https://github.com/poseidon/matchbox.git
|
||||
$ cd matchbox/examples/terraform/etcd3-install
|
||||
```
|
||||
|
||||
@@ -37,6 +37,19 @@ ssh_authorized_key = "ADD ME"
|
||||
|
||||
Configs in `etcd3-install` configure the matchbox provider, define profiles (e.g. `cached-container-linux-install`, `etcd3`), and define 3 groups which match machines by MAC address to a profile. These resources declare that the machines should PXE boot, install Container Linux to disk, and provision themselves into peers in a 3-node etcd3 cluster.
|
||||
|
||||
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/poseidon/matchbox/blob/master/docs/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
|
||||
|
||||
### Optional
|
||||
|
||||
You may set certain optional variables to override defaults.
|
||||
|
||||
```hcl
|
||||
# install_disk = "/dev/sda"
|
||||
# container_linux_oem = ""
|
||||
```
|
||||
|
||||
## Apply
|
||||
|
||||
Fetch the [profiles](../README.md#modules) Terraform [module](https://www.terraform.io/docs/modules/index.html) which let's you use common machine profiles maintained in the matchbox repo (like `etcd3`).
|
||||
|
||||
```sh
|
||||
@@ -52,8 +65,6 @@ $ terraform apply
|
||||
Apply complete! Resources: 10 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
|
||||
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
|
||||
|
||||
## Machines
|
||||
|
||||
Power on each machine (with PXE boot device on next boot). Machines should network boot, install Container Linux to disk, reboot, and provision themselves as a 3-node etcd3 cluster.
|
||||
@@ -82,7 +93,6 @@ $ systemctl status etcd-member
|
||||
Verify that etcd3 peers are healthy and communicating.
|
||||
|
||||
```sh
|
||||
$ ETCDCTL_API=3
|
||||
$ etcdctl cluster-health
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
@@ -90,4 +100,4 @@ $ etcdctl get /message
|
||||
|
||||
## Going Further
|
||||
|
||||
Learn more about [matchbox](../../../Documentation/matchbox.md) or explore the other [example](../) clusters.
|
||||
Learn more about [matchbox](../../../docs/matchbox.md) or explore the other [example](../) clusters.
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
module "profiles" {
|
||||
source = "../modules/profiles"
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
container_linux_version = "1298.7.0"
|
||||
container_linux_version = "1967.3.0"
|
||||
container_linux_channel = "stable"
|
||||
install_disk = "${var.install_disk}"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
}
|
||||
|
||||
// Install Container Linux to disk before provisioning
|
||||
@@ -12,13 +14,9 @@ resource "matchbox_group" "default" {
|
||||
profile = "${module.profiles.cached-container-linux-install}"
|
||||
|
||||
// No selector, matches all nodes
|
||||
|
||||
metadata {
|
||||
container_linux_channel = "stable"
|
||||
container_linux_version = "1298.7.0"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
ignition_endpoint = "${var.matchbox_http_endpoint}/ignition"
|
||||
baseurl = "${var.matchbox_http_endpoint}/assets/coreos"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,5 +2,6 @@ matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
# ssh_authorized_key = "ADD ME"
|
||||
|
||||
# Optional
|
||||
# Optional (defaults)
|
||||
# install_disk = "/dev/sda"
|
||||
# container_linux_oem = ""
|
||||
|
||||
@@ -13,8 +13,16 @@ variable "ssh_authorized_key" {
|
||||
description = "SSH public key to set as an authorized_key on machines"
|
||||
}
|
||||
|
||||
# optional
|
||||
|
||||
variable "install_disk" {
|
||||
type = "string"
|
||||
default = "/dev/sda"
|
||||
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
|
||||
}
|
||||
|
||||
variable "container_linux_oem" {
|
||||
type = "string"
|
||||
default = ""
|
||||
type = "string"
|
||||
default = ""
|
||||
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ Matchbox provides Terraform [modules](https://www.terraform.io/docs/modules/usag
|
||||
|
||||
```hcl
|
||||
module "profiles" {
|
||||
source = "git::https://github.com/coreos/matchbox.git//examples/terraform/modules/profiles?ref=4451425db8f230012c36de6e6628c72aa34e1c10"
|
||||
source = "git::https://github.com/poseidon/matchbox.git//examples/terraform/modules/profiles?ref=08f4e9908b167fba608e60169ec6a803df9db37f"
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
@@ -27,10 +27,7 @@ Available modules:
|
||||
| | cached-container-linux-install | Install Container Linux to disk from matchbox assets cache |
|
||||
| | etcd3 | Provision an etcd3 peer node |
|
||||
| | etcd3-gateway | Provision an etcd3 gateway node |
|
||||
| | bootkube-controller | Provision a self-hosted Kubernetes controller/master node |
|
||||
| | bootkube-worker | Provisioner a self-hosted Kubernetes worker node |
|
||||
| bootkube | | Creates a multi-controller, multi-worker self-hosted Kubernetes cluster |
|
||||
|
||||
## Customization
|
||||
|
||||
You are encouraged to look through the examples and modules. Implement your own profiles or package them as modules to meet your needs. We've just provided a starting point. Learn more about [matchbox](../../Documentation/matchbox.md) and [Container Linux configs](../../Documentation/container-linux-config.md).
|
||||
You are encouraged to look through the examples and modules. Implement your own profiles or package them as modules to meet your needs. We've just provided a starting point. Learn more about [matchbox](../../docs/matchbox.md) and [Container Linux configs](../../docs/container-linux-config.md).
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
# Self-hosted Kubernetes assets (kubeconfig, manifests)
|
||||
module "bootkube" {
|
||||
source = "git::https://github.com/dghubble/bootkube-terraform.git?ref=3720aff28a465987e079dcd74fe3b6d5046d7010"
|
||||
|
||||
cluster_name = "${var.cluster_name}"
|
||||
api_servers = ["${var.k8s_domain_name}"]
|
||||
etcd_servers = ["http://127.0.0.1:2379"]
|
||||
asset_dir = "${var.asset_dir}"
|
||||
pod_cidr = "${var.pod_cidr}"
|
||||
service_cidr = "${var.service_cidr}"
|
||||
experimental_self_hosted_etcd = "${var.experimental_self_hosted_etcd}"
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
// Install Container Linux to disk
|
||||
resource "matchbox_group" "container-linux-install" {
|
||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||
|
||||
name = "${format("container-linux-install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
|
||||
profile = "${module.profiles.cached-container-linux-install}"
|
||||
|
||||
selector {
|
||||
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
|
||||
}
|
||||
|
||||
metadata {
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
container_linux_oem = "${var.container_linux_oem}"
|
||||
ignition_endpoint = "${var.matchbox_http_endpoint}/ignition"
|
||||
baseurl = "${var.matchbox_http_endpoint}/assets/coreos"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "controller" {
|
||||
count = "${length(var.controller_names)}"
|
||||
name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
|
||||
profile = "${module.profiles.bootkube-controller}"
|
||||
|
||||
selector {
|
||||
mac = "${element(var.controller_macs, count.index)}"
|
||||
os = "installed"
|
||||
}
|
||||
|
||||
metadata {
|
||||
domain_name = "${element(var.controller_domains, count.index)}"
|
||||
etcd_name = "${element(var.controller_names, count.index)}"
|
||||
etcd_initial_cluster = "${join(",", formatlist("%s=http://%s:2380", var.controller_names, var.controller_domains))}"
|
||||
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
||||
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||
k8s_etcd_service_ip = "${module.bootkube.etcd_service_ip}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "matchbox_group" "worker" {
|
||||
count = "${length(var.worker_names)}"
|
||||
name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
|
||||
profile = "${module.profiles.bootkube-worker}"
|
||||
|
||||
selector {
|
||||
mac = "${element(var.worker_macs, count.index)}"
|
||||
os = "installed"
|
||||
}
|
||||
|
||||
metadata {
|
||||
domain_name = "${element(var.worker_domains, count.index)}"
|
||||
etcd_endpoints = "${join(",", formatlist("%s:2379", var.controller_domains))}"
|
||||
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
|
||||
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
|
||||
k8s_etcd_service_ip = "${module.bootkube.etcd_service_ip}"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
// Create common profiles
|
||||
module "profiles" {
|
||||
source = "../profiles"
|
||||
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
|
||||
container_linux_version = "${var.container_linux_version}"
|
||||
container_linux_channel = "${var.container_linux_channel}"
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
# Secure copy kubeconfig to all nodes to activate kubelet.service
|
||||
resource "null_resource" "copy-kubeconfig" {
|
||||
count = "${length(var.controller_names) + length(var.worker_names)}"
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(concat(var.controller_domains, var.worker_domains), count.index)}"
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
content = "${module.bootkube.kubeconfig}"
|
||||
destination = "$HOME/kubeconfig"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# Secure copy bootkube assets to ONE controller and start bootkube to perform
|
||||
# one-time self-hosted cluster bootstrapping.
|
||||
resource "null_resource" "bootkube-start" {
|
||||
# Without depends_on, this remote-exec may start before the kubeconfig copy.
|
||||
# Terraform only does one task at a time, so it would try to bootstrap
|
||||
# Kubernetes and Tectonic while no Kubelets are running. Ensure all nodes
|
||||
# receive a kubeconfig before proceeding with bootkube and tectonic.
|
||||
depends_on = ["null_resource.copy-kubeconfig"]
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
host = "${element(var.controller_domains, 0)}"
|
||||
user = "core"
|
||||
timeout = "60m"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = "${var.asset_dir}"
|
||||
destination = "$HOME/assets"
|
||||
}
|
||||
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"sudo mv /home/core/assets /opt/bootkube",
|
||||
"sudo systemctl start bootkube",
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,89 +0,0 @@
|
||||
variable "matchbox_http_endpoint" {
|
||||
type = "string"
|
||||
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
|
||||
}
|
||||
|
||||
variable "container_linux_channel" {
|
||||
type = "string"
|
||||
description = "Container Linux channel corresponding to the container_linux_version"
|
||||
}
|
||||
|
||||
variable "container_linux_version" {
|
||||
type = "string"
|
||||
description = "Container Linux version of the kernel/initrd to PXE or the image to install"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
type = "string"
|
||||
description = "Cluster name"
|
||||
}
|
||||
|
||||
variable "ssh_authorized_key" {
|
||||
type = "string"
|
||||
description = "SSH public key to set as an authorized_key on machines"
|
||||
}
|
||||
|
||||
# Machines
|
||||
# Terraform's crude "type system" does properly support lists of maps so we do this.
|
||||
|
||||
variable "controller_names" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "controller_macs" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "controller_domains" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_names" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_macs" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "worker_domains" {
|
||||
type = "list"
|
||||
}
|
||||
|
||||
# bootkube assets
|
||||
|
||||
variable "k8s_domain_name" {
|
||||
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "asset_dir" {
|
||||
description = "Path to a directory where generated assets should be placed (contains secrets)"
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "pod_cidr" {
|
||||
description = "CIDR IP range to assign Kubernetes pods"
|
||||
type = "string"
|
||||
default = "10.2.0.0/16"
|
||||
}
|
||||
|
||||
variable "service_cidr" {
|
||||
description = <<EOD
|
||||
CIDR IP range to assign Kubernetes services.
|
||||
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
|
||||
EOD
|
||||
type = "string"
|
||||
default = "10.3.0.0/16"
|
||||
}
|
||||
|
||||
variable "container_linux_oem" {
|
||||
type = "string"
|
||||
default = ""
|
||||
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
|
||||
}
|
||||
|
||||
variable "experimental_self_hosted_etcd" {
|
||||
default = "false"
|
||||
description = "Create self-hosted etcd cluster as pods on Kubernetes, instead of on-hosts"
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
{{ if eq .etcd_on_host "true" }}
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.1.6"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
|
||||
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
|
||||
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||
{{ end }}
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
{{ if eq .etcd_on_host "false" -}}
|
||||
Environment="LOCKSMITHD_ENDPOINT=http://{{.k8s_etcd_service_ip}}:2379"
|
||||
{{ end }}
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log"
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--require-kubeconfig \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--anonymous-auth=false \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--network-plugin=cni \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--exit-on-lock-contention \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--allow-privileged \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes control plane with a temp api-server
|
||||
ConditionPathExists=!/opt/bootkube/init_bootkube.done
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
|
||||
storage:
|
||||
{{ if index . "pxe" }}
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: root
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
{{.domain_name}}
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
# Move experimental manifests
|
||||
[ -d /opt/bootkube/assets/experimental/manifests ] && mv /opt/bootkube/assets/experimental/manifests/* /opt/bootkube/assets/manifests && rm -r /opt/bootkube/assets/experimental/manifests
|
||||
[ -d /opt/bootkube/assets/experimental/bootstrap-manifests ] && mv /opt/bootkube/assets/experimental/bootstrap-manifests/* /opt/bootkube/assets/bootstrap-manifests && rm -r /opt/bootkube/assets/experimental/bootstrap-manifests
|
||||
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
|
||||
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.4.4}"
|
||||
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$RKT_OPTS \
|
||||
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- {{.ssh_authorized_key}}
|
||||