Compare commits
55 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff7112e9d3 | ||
|
|
04f1c32ba2 | ||
|
|
da0df01763 | ||
|
|
dadca25978 | ||
|
|
2b04912307 | ||
|
|
4fc5703558 | ||
|
|
e6b10e13dc | ||
|
|
44db881f05 | ||
|
|
5a1fa28d82 | ||
|
|
c91baffe0e | ||
|
|
5b61bc9c93 | ||
|
|
cf17df0aea | ||
|
|
31b1ab20b7 | ||
|
|
705d3402b6 | ||
|
|
3f6cde1cd5 | ||
|
|
48d61a02e6 | ||
|
|
8c5b9dd6c6 | ||
|
|
d007c64a5f | ||
|
|
ec1baf0aef | ||
|
|
b557654995 | ||
|
|
496e8e725d | ||
|
|
2de7d5af6c | ||
|
|
7ba809adf4 | ||
|
|
be10339429 | ||
|
|
81341e4923 | ||
|
|
90a91f7306 | ||
|
|
3767ef93cb | ||
|
|
dcf099591e | ||
|
|
fa499ec1b8 | ||
|
|
97eda57303 | ||
|
|
1fdae1d073 | ||
|
|
8458357f35 | ||
|
|
6593da7ef8 | ||
|
|
26b5055f2f | ||
|
|
991c841046 | ||
|
|
998eeaa1d0 | ||
|
|
873f053d10 | ||
|
|
82e97ed654 | ||
|
|
e1de1ac938 | ||
|
|
515afcbb1d | ||
|
|
97fb6b302c | ||
|
|
bf04fa30ad | ||
|
|
de0b88839c | ||
|
|
f2f00b6d0e | ||
|
|
117d6d07e2 | ||
|
|
c68f411910 | ||
|
|
a0b24a9596 | ||
|
|
0b56acf884 | ||
|
|
ec55f43cdf | ||
|
|
c07eb9aaba | ||
|
|
a7d19dfdd2 | ||
|
|
908e89c3a1 | ||
|
|
0626163494 | ||
|
|
b4f5e574a2 | ||
|
|
74005e901c |
36
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report a bug to improve the project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- READ: Issues are used to receive focused bug reports from users and to track planned future enhancements by the authors. Topics like support, debugging help, advice, and operation are out of scope and should not use issues-->
|
||||
|
||||
**Description**
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Steps to Reproduce**
|
||||
|
||||
Provide clear steps to reproduce the bug.
|
||||
|
||||
- [ ] Relevant error messages if appropriate (concise, not a dump of everything).
|
||||
|
||||
**Expected behavior**
|
||||
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Environment**
|
||||
|
||||
* OS: fedora-coreos, flatcar-linux (include release version)
|
||||
* Release: Matchbox version or Git SHA (reporting latest is **not** helpful)
|
||||
|
||||
**Possible Solution**
|
||||
|
||||
<!-- Most bug reports should have some inkling about solutions. Otherwise, your report may be less of a bug and more of a support request (see top).-->
|
||||
|
||||
Link to a PR or description.
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Security
|
||||
url: https://typhoon.psdn.io/topics/security/
|
||||
about: Report security vulnerabilities
|
||||
31
.github/workflows/test.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
build:
|
||||
name: go
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
go: ['1.13', '1.14', '1.15']
|
||||
steps:
|
||||
- name: setup
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{matrix.go}}
|
||||
|
||||
- name: checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: tools
|
||||
run: GO111MODULE=off go get golang.org/x/lint/golint
|
||||
|
||||
- name: test
|
||||
run: make
|
||||
|
||||
29
.travis.yml
@@ -1,29 +0,0 @@
|
||||
language: go
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go:
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- "1.11.7"
|
||||
- "1.12.x"
|
||||
install:
|
||||
- go get golang.org/x/lint/golint
|
||||
script:
|
||||
- make
|
||||
deploy:
|
||||
- provider: script
|
||||
script: scripts/dev/travis-docker-push
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: master
|
||||
go: '1.11.7'
|
||||
- provider: script
|
||||
script: contrib/dnsmasq/travis-deploy
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: dnsmasq
|
||||
# pick one, so travis deploys once
|
||||
go: '1.10.x'
|
||||
notifications:
|
||||
email: change
|
||||
29
CHANGES.md
@@ -4,6 +4,33 @@ Notable changes between releases.
|
||||
|
||||
## Latest
|
||||
|
||||
## v0.9.0
|
||||
|
||||
* Refresh docs and examples for Fedora CoreOS and Flatcar Linux ([#815](https://github.com/poseidon/matchbox/pull/815), [#816](https://github.com/poseidon/matchbox/pull/816))
|
||||
* Update Kubernetes manifest examples ([#791](https://github.com/poseidon/matchbox/pull/791), [#817](https://github.com/poseidon/matchbox/pull/817))
|
||||
* Update Matchbox container image publishing ([#795](https://github.com/poseidon/matchbox/pull/795))
|
||||
* Publish Matchbox images from internal infra to Quay (`quay.io/poseidon/matchbox`)
|
||||
* Update Go version from v1.13.4 to v1.14.9
|
||||
* Update base image from `alpine:3.10` to `alpine:3.12` ([#784](https://github.com/poseidon/matchbox/pull/784))
|
||||
* Include `contrib/k8s` in release tarballs ([#788](https://github.com/poseidon/matchbox/pull/788))
|
||||
* Remove outdated systemd units ([#817](https://github.com/poseidon/matchbox/pull/817))
|
||||
* Remove RPM spec file (Copr publishing stopped in v0.6)
|
||||
|
||||
## v0.8.3
|
||||
|
||||
* Publish docs to [https://matchbox.psdn.io](https://matchbox.psdn.io/) ([#769](https://github.com/poseidon/matchbox/pull/769))
|
||||
* Update Go version from v1.11.7 to v1.13.4 ([#766](https://github.com/poseidon/matchbox/pull/766), [#770](https://github.com/poseidon/matchbox/pull/770))
|
||||
* Update container image base from `alpine:3.9` to `alpine:3.10` ([#761](https://github.com/poseidon/matchbox/pull/761))
|
||||
* Include `get-fedora-coreos` convenience script ([#763](https://github.com/poseidon/matchbox/pull/763))
|
||||
* Remove Kubernetes provisioning examples ([#759](https://github.com/poseidon/matchbox/pull/759))
|
||||
* Remove rkt tutorials and docs ([#765](https://github.com/poseidon/matchbox/pull/765))
|
||||
|
||||
## v0.8.1 - v0.8.2
|
||||
|
||||
Releases `v0.8.1` and `v0.8.2` were not built cleanly
|
||||
|
||||
* Release tags and container images have been removed
|
||||
* Caused by go get golint (module-aware) mutating `go.mod` on Travis (see [#775](https://github.com/poseidon/matchbox/pull/775), [#777](https://github.com/poseidon/matchbox/pull/777))
|
||||
|
||||
## v0.8.0
|
||||
|
||||
@@ -82,7 +109,7 @@ Note: Release signing key [has changed](https://github.com/poseidon/matchbox/blo
|
||||
* Use etcd3 by default in all clusters (remove etcd2 clusters)
|
||||
* Add Terraform examples for etcd3 and self-hosted Kubernetes 1.6.1
|
||||
|
||||
## v0.5.0 (2017-01-23)
|
||||
## v0.5.0 (2017-01-23)
|
||||
|
||||
* Rename project to CoreOS `matchbox`!
|
||||
* Add Profile `args` field to list kernel args
|
||||
|
||||
@@ -1,77 +1,5 @@
|
||||
# How to Contribute
|
||||
# Contributing
|
||||
|
||||
CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via
|
||||
GitHub pull requests. This document outlines some of the conventions on
|
||||
development workflow, commit message formatting, contact points and other
|
||||
resources to make it easier to get your contribution accepted.
|
||||
## Developer Certificate of Origin
|
||||
|
||||
# Certificate of Origin
|
||||
|
||||
By contributing to this project you agree to the Developer Certificate of
|
||||
Origin (DCO). This document was created by the Linux Kernel community and is a
|
||||
simple statement that you, as a contributor, have the legal right to make the
|
||||
contribution. See the [DCO](DCO) file for details.
|
||||
|
||||
# Email and Chat
|
||||
|
||||
The project currently uses the general CoreOS email list and IRC channel:
|
||||
- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev)
|
||||
- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org
|
||||
|
||||
Please avoid emailing maintainers found in the MAINTAINERS file directly. They
|
||||
are very busy and read the mailing lists.
|
||||
|
||||
## Getting Started
|
||||
|
||||
- Fork the repository on GitHub
|
||||
- Read the [README](README.md) for build and test instructions
|
||||
- Play with the project, submit bugs, submit patches!
|
||||
|
||||
## Contribution Flow
|
||||
|
||||
This is a rough outline of what a contributor's workflow looks like:
|
||||
|
||||
- Create a topic branch from where you want to base your work (usually master).
|
||||
- Make commits of logical units.
|
||||
- Make sure your commit messages are in the proper format (see below).
|
||||
- Push your changes to a topic branch in your fork of the repository.
|
||||
- Make sure the tests pass, and add any new tests as appropriate.
|
||||
- Submit a pull request to the original repository.
|
||||
|
||||
Thanks for your contributions!
|
||||
|
||||
### Coding Style
|
||||
|
||||
CoreOS projects written in Go follow a set of style guidelines that we've documented
|
||||
[here](https://github.com/coreos/docs/tree/master/golang). Please follow them when
|
||||
working on your contributions.
|
||||
|
||||
### Format of the Commit Message
|
||||
|
||||
We follow a rough convention for commit messages that is designed to answer two
|
||||
questions: what changed and why. The subject line should feature the what and
|
||||
the body of the commit should describe the why.
|
||||
|
||||
```
|
||||
scripts: add the test-cluster command
|
||||
|
||||
this uses tmux to setup a test cluster that you can easily kill and
|
||||
start for debugging.
|
||||
|
||||
Fixes #38
|
||||
```
|
||||
|
||||
The format can be described more formally as follows:
|
||||
|
||||
```
|
||||
<subsystem>: <what changed>
|
||||
<BLANK LINE>
|
||||
<why this change was made>
|
||||
<BLANK LINE>
|
||||
<footer>
|
||||
```
|
||||
|
||||
The first line is the subject and should be no longer than 70 characters, the
|
||||
second line is always blank, and other lines should be wrapped at 80 characters.
|
||||
This allows the message to be easier to read on GitHub as well as in various
|
||||
git tools.
|
||||
By contributing, you agree to the Linux Foundation's Developer Certificate of Origin ([DCO](DCO)). The DCO is a statement that you, the contributor, have the legal right to make your contribution and understand the contribution will be distributed as part of this project.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.9
|
||||
FROM docker.io/alpine:3.12
|
||||
LABEL maintainer="Dalton Hubble <dghubble@gmail.com>"
|
||||
COPY bin/matchbox /matchbox
|
||||
EXPOSE 8080
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
# Upgrading self-hosted Kubernetes
|
||||
|
||||
CoreOS Kubernetes clusters "self-host" the apiserver, scheduler, controller-manager, flannel, kube-dns, and kube-proxy as Kubernetes pods, like ordinary applications (except with taint tolerations). This allows upgrades to be performed in-place using (mostly) `kubectl` as an alternative to re-provisioning.
|
||||
|
||||
Let's upgrade a Kubernetes v1.6.6 cluster to v1.6.7 as an example.
|
||||
|
||||
## Stability
|
||||
|
||||
This guide shows how to attempt a in-place upgrade of a Kubernetes cluster setup via the [examples](../examples). It does not provide exact diffs, migrations between breaking changes, the stability of a fresh re-provision, or any guarantees. Evaluate whether in-place updates are appropriate for your Kubernetes cluster and be prepared to perform a fresh re-provision if something goes wrong, especially between Kubernetes minor releases (e.g. 1.6 to 1.7).
|
||||
|
||||
Matchbox Kubernetes examples provide a vanilla Kubernetes cluster with only free (as in freedom and cost) software components. If you require currated updates, migrations, or guarantees for production, consider [Tectonic](https://coreos.com/tectonic/) by CoreOS.
|
||||
|
||||
**Note: Tectonic users should NOT manually upgrade. Follow the [Tectonic docs](https://coreos.com/tectonic/docs/latest/admin/upgrade.html)**
|
||||
|
||||
## Inspect
|
||||
|
||||
Show the control plane daemonsets and deployments which will need to be updated.
|
||||
|
||||
```sh
|
||||
$ kubectl get daemonsets -n=kube-system
|
||||
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE-SELECTOR AGE
|
||||
kube-apiserver 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
kube-etcd-network-checkpointer 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
kube-flannel 4 4 4 4 4 <none> 21d
|
||||
kube-proxy 4 4 4 4 4 <none> 21d
|
||||
pod-checkpointer 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
|
||||
$ kubectl get deployments -n=kube-system
|
||||
kube-controller-manager 2 2 2 2 21d
|
||||
kube-dns 1 1 1 1 21d
|
||||
kube-scheduler 2 2 2 2 21d
|
||||
```
|
||||
|
||||
Check the current Kubernetes version.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.6+coreos.1", GitCommit:"42a5c8b99c994a51d9ceaed5d0254f177e97d419", GitTreeState:"clean", BuildDate:"2017-06-21T01:10:07Z", GoVersion:"go1.7.6", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 21d v1.6.6+coreos.1
|
||||
node2.example.com Ready 21d v1.6.6+coreos.1
|
||||
node3.example.com Ready 21d v1.6.6+coreos.1
|
||||
node4.example.com Ready 21d v1.6.6+coreos.1
|
||||
```
|
||||
|
||||
## Strategy
|
||||
|
||||
Update control plane components with `kubectl`. Then update the `kubelet` systemd unit on each host.
|
||||
|
||||
Prepare the changes to the Kubernetes manifests by generating assets for a target Kubernetes cluster (e.g. bootkube `v0.5.0` produces Kubernetes 1.6.6 and bootkube `v0.5.1` produces Kubernetes 1.6.7). Choose the tool used during creation of the cluster:
|
||||
|
||||
* [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) - install the `bootkube` binary for the target version and render assets
|
||||
* [poseidon/bootkube-terraform](https://github.com/poseidon/bootkube-terraform) - checkout the tag for the target version and `terraform apply` to render assets
|
||||
|
||||
Diff the generated assets against the assets used when originally creating the cluster. In simple cases, you may only need to bump the hyperkube image. In more complex cases, some manifests may have new flags or configuration.
|
||||
|
||||
## Control Plane
|
||||
|
||||
### kube-apiserver
|
||||
|
||||
Edit the `kube-apiserver` daemonset to rolling update the apiserver.
|
||||
|
||||
```sh
|
||||
$ kubectl edit daemonset kube-apiserver -n=kube-system
|
||||
```
|
||||
|
||||
If you only have one apiserver, the cluster may be momentarily unavailable.
|
||||
|
||||
### kube-scheduler
|
||||
|
||||
Edit the `kube-scheduler` deployment to rolling update the scheduler.
|
||||
|
||||
```sh
|
||||
$ kubectl edit deployments kube-scheduler -n=kube-system
|
||||
```
|
||||
|
||||
### kube-controller-manager
|
||||
|
||||
Edit the `kube-controller-manager` deployment to rolling update the controller manager.
|
||||
|
||||
```sh
|
||||
$ kubectl edit deployments kube-controller-manager -n=kube-system
|
||||
```
|
||||
|
||||
### kube-proxy
|
||||
|
||||
Edit the `kube-proxy` daemonset to rolling update the proxy.
|
||||
|
||||
```sh
|
||||
$ kubectl edit daemonset kube-proxy -n=kube-system
|
||||
```
|
||||
|
||||
### Others
|
||||
|
||||
If there are changes between the prior version and target version manifests, update the `kube-dns` deployment, `kube-flannel` daemonset, or `pod-checkpointer` daemonset.
|
||||
|
||||
### Verify
|
||||
|
||||
Verify the control plane components updated.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.7+coreos.0", GitCommit:"c8c505ee26ac3ab4d1dff506c46bc5538bc66733", GitTreeState:"clean", BuildDate:"2017-07-06T17:38:33Z", GoVersion:"go1.7.6", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 21d v1.6.7+coreos.0
|
||||
node2.example.com Ready 21d v1.6.7+coreos.0
|
||||
node3.example.com Ready 21d v1.6.7+coreos.0
|
||||
node4.example.com Ready 21d v1.6.7+coreos.0
|
||||
```
|
||||
|
||||
## kubelet
|
||||
|
||||
SSH to each node and update `/etc/kubernetes/kubelet.env`. Restart the `kubelet.service`.
|
||||
|
||||
```sh
|
||||
ssh core@node1.example.com
|
||||
sudo vim /etc/kubernetes/kubelet.env
|
||||
sudo systemctl restart kubelet
|
||||
```
|
||||
|
||||
### Verify
|
||||
|
||||
Verify the kubelet and kube-proxy of each node updated.
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes -o yaml | grep 'kubeletVersion\|kubeProxyVersion'
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
```
|
||||
|
||||
Kubernetes control plane components have been successfully updated!
|
||||
@@ -1,139 +0,0 @@
|
||||
# Kubernetes
|
||||
|
||||
The Kubernetes example provisions a 3 node Kubernetes v1.8.5 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting. An etcd3 cluster across controllers is used to back Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
Ensure that you've gone through the [matchbox with docker](getting-started-docker.md) guide and understand the basics. In particular, you should be able to:
|
||||
|
||||
* Use Docker to start `matchbox`
|
||||
* Create a network boot environment with `coreos/dnsmasq`
|
||||
* Create the example libvirt client VMs
|
||||
* `/etc/hosts` entries for `node[1-3].example.com`
|
||||
|
||||
Install [bootkube](https://github.com/kubernetes-incubator/bootkube/releases) v0.9.1 and add it on your $PATH.
|
||||
|
||||
```sh
|
||||
$ bootkube version
|
||||
Version: v0.9.1
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. The examples can be used for physical machines if you update the MAC addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
* [bootkube](../examples/groups/bootkube) - iPXE boot a self-hosted Kubernetes cluster
|
||||
* [bootkube-install](../examples/groups/bootkube-install) - Install a self-hosted Kubernetes cluster
|
||||
|
||||
## Assets
|
||||
|
||||
Download the CoreOS Container Linux image assets referenced in the target [profile](../examples/profiles).
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1967.3.0 ./examples/assets
|
||||
```
|
||||
|
||||
Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
|
||||
```json
|
||||
{
|
||||
"profile": "bootkube-worker",
|
||||
"metadata": {
|
||||
"ssh_authorized_keys": ["ssh-rsa pub-key-goes-here"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Set the `--network-provider` to `flannel` (default) or `experimental-calico` if desired.
|
||||
|
||||
```sh
|
||||
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=https://node1.example.com:2379
|
||||
```
|
||||
|
||||
Later, a controller will use `bootkube` to bootstrap these manifests and the credentials will be used to access your cluster.
|
||||
|
||||
## Containers
|
||||
|
||||
Use docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with Docker](getting-started-docker.md) for help.
|
||||
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot Container Linux and become available via SSH in about 1 minute. If you chose `bootkube-install`, notice that machines install Container Linux and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
|
||||
## bootkube
|
||||
|
||||
We're ready to use bootkube to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster.
|
||||
|
||||
Secure copy the etcd TLS assets to `/etc/ssl/etcd/*` on **every controller** node.
|
||||
|
||||
```sh
|
||||
for node in 'node1'; do
|
||||
scp -r assets/tls/etcd-* assets/tls/etcd core@$node.example.com:/home/core/
|
||||
ssh core@$node.example.com 'sudo mkdir -p /etc/ssl/etcd && sudo mv etcd-* etcd /etc/ssl/etcd/ && sudo chown -R etcd:etcd /etc/ssl/etcd && sudo chmod -R 500 /etc/ssl/etcd/'
|
||||
done
|
||||
```
|
||||
|
||||
Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every node** to path activate the `kubelet.service`.
|
||||
|
||||
```sh
|
||||
for node in 'node1' 'node2' 'node3'; do
|
||||
scp assets/auth/kubeconfig core@$node.example.com:/home/core/kubeconfig
|
||||
ssh core@$node.example.com 'sudo mv kubeconfig /etc/kubernetes/kubeconfig'
|
||||
done
|
||||
```
|
||||
|
||||
Secure copy the `bootkube` generated assets to **any controller** node and run `bootkube-start` (takes ~10 minutes).
|
||||
|
||||
```sh
|
||||
scp -r assets core@node1.example.com:/home/core
|
||||
ssh core@node1.example.com 'sudo mv assets /opt/bootkube/assets && sudo systemctl start bootkube'
|
||||
```
|
||||
|
||||
Watch the Kubernetes control plane bootstrapping with the bootkube temporary api-server. You will see quite a bit of output.
|
||||
|
||||
```sh
|
||||
$ ssh core@node1.example.com 'journalctl -f -u bootkube'
|
||||
[ 299.241291] bootkube[5]: Pod Status: kube-api-checkpoint Running
|
||||
[ 299.241618] bootkube[5]: Pod Status: kube-apiserver Running
|
||||
[ 299.241804] bootkube[5]: Pod Status: kube-scheduler Running
|
||||
[ 299.241993] bootkube[5]: Pod Status: kube-controller-manager Running
|
||||
[ 299.311743] bootkube[5]: All self-hosted control plane components successfully started
|
||||
```
|
||||
|
||||
[Verify](#verify) the Kubernetes cluster is accessible once complete. Then install **important** cluster [addons](cluster-addons.md). You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` used to access the cluster.
|
||||
|
||||
## Verify
|
||||
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the apiserver, scheduler, and controller-manager are running as pods.
|
||||
|
||||
```sh
|
||||
$ export KUBECONFIG=assets/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 11m v1.8.5
|
||||
node2.example.com Ready 11m v1.8.5
|
||||
node3.example.com Ready 11m v1.8.5
|
||||
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system kube-apiserver-zd1k3 1/1 Running 0 7m
|
||||
kube-system kube-controller-manager-762207937-2ztxb 1/1 Running 0 7m
|
||||
kube-system kube-controller-manager-762207937-vf6bk 1/1 Running 1 7m
|
||||
kube-system kube-dns-2431531914-qc752 3/3 Running 0 7m
|
||||
kube-system kube-flannel-180mz 2/2 Running 1 7m
|
||||
kube-system kube-flannel-jjr0x 2/2 Running 0 7m
|
||||
kube-system kube-flannel-mlr9w 2/2 Running 0 7m
|
||||
kube-system kube-proxy-0jlq7 1/1 Running 0 7m
|
||||
kube-system kube-proxy-k4mjl 1/1 Running 0 7m
|
||||
kube-system kube-proxy-l4xrd 1/1 Running 0 7m
|
||||
kube-system kube-scheduler-1873228005-5d2mk 1/1 Running 0 7m
|
||||
kube-system kube-scheduler-1873228005-s4w27 1/1 Running 0 7m
|
||||
kube-system pod-checkpointer-hb960 1/1 Running 0 7m
|
||||
kube-system pod-checkpointer-hb960-node1.example.com 1/1 Running 0 6m
|
||||
```
|
||||
|
||||
## Addons
|
||||
|
||||
Install **important** cluster [addons](cluster-addons.md).
|
||||
|
||||
## Going further
|
||||
|
||||
[Learn](bootkube-upgrades.md) to upgrade a self-hosted Kubernetes cluster.
|
||||
@@ -1,30 +0,0 @@
|
||||
## Cluster Addons
|
||||
|
||||
Kubernetes clusters run cluster addons atop Kubernetes itself. Addons may be considered essential for bootstrapping (non-optional), important (highly recommended), or optional.
|
||||
|
||||
## Essential
|
||||
|
||||
Several addons are considered essential. CoreOS cluster creation tools ensure these addons are included. Kubernetes clusters deployed via the Matchbox examples or using our Terraform Modules include these addons as well.
|
||||
|
||||
### kube-proxy
|
||||
|
||||
`kube-proxy` is deployed as a DaemonSet.
|
||||
|
||||
### kube-dns
|
||||
|
||||
`kube-dns` is deployed as a Deployment.
|
||||
|
||||
## Important
|
||||
|
||||
### Container Linux Update Operator
|
||||
|
||||
The [Container Linux Update Operator](https://github.com/coreos/container-linux-update-operator) (i.e. CLUO) coordinates reboots of auto-updating Container Linux nodes so that one node reboots at a time and nodes are drained before reboot. CLUO enables the auto-update behavior Container Linux clusters are known for, but does it in a Kubernetes native way. Deploying CLUO is strongly recommended.
|
||||
|
||||
Create the `update-operator` deployment and `update-agent` DaemonSet.
|
||||
|
||||
```
|
||||
kubectl apply -f examples/addons/cluo/update-operator.yaml
|
||||
kubectl apply -f examples/addons/cluo/update-agent.yaml
|
||||
```
|
||||
|
||||
*Note, CLUO replaces `locksmithd` reboot coordination. The `update_engine` systemd unit on hosts still performs the Container Linux update check, download, and install to the inactive partition.*
|
||||
@@ -1,198 +0,0 @@
|
||||
# Getting started
|
||||
|
||||
In this tutorial, we'll show how to use terraform with `matchbox` to provision Container Linux machines.
|
||||
|
||||
You'll install the `matchbox` service, setup a PXE network boot environment, and then use terraform configs to describe your infrastructure and the terraform CLI to create those resources on `matchbox`.
|
||||
|
||||
## matchbox
|
||||
|
||||
Install `matchbox` on a dedicated server or Kubernetes cluster. Generate TLS credentials and enable the gRPC API as directed. Save the `ca.crt`, `client.crt`, and `client.key` on your local machine (e.g. `~/.matchbox`).
|
||||
|
||||
* Installing on [Container Linux / other distros](deployment.md)
|
||||
* Installing on [Kubernetes](deployment.md#kubernetes)
|
||||
* Running with [rkt](deployment.md#rkt) / [docker](deployment.md#docker)
|
||||
|
||||
Verify the matchbox read-only HTTP endpoints are accessible.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080
|
||||
matchbox
|
||||
```
|
||||
|
||||
Verify your TLS client certificate and key can be used to access the gRPC API.
|
||||
|
||||
```sh
|
||||
$ openssl s_client -connect matchbox.example.com:8081 \
|
||||
-CAfile ~/.matchbox/ca.crt \
|
||||
-cert ~/.matchbox/client.crt \
|
||||
-key ~/.matchbox/client.key
|
||||
```
|
||||
|
||||
## Terraform
|
||||
|
||||
Install [Terraform][terraform-dl] v0.11+ on your system.
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.11.13
|
||||
```
|
||||
|
||||
Add the [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) plugin binary for your system to `~/.terraform.d/plugins/`, noting the final name.
|
||||
|
||||
```sh
|
||||
wget https://github.com/poseidon/terraform-provider-matchbox/releases/download/v0.2.3/terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
|
||||
tar xzf terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
|
||||
mv terraform-provider-matchbox-v0.2.3-linux-amd64/terraform-provider-matchbox ~/.terraform.d/plugins/terraform-provider-matchbox_v0.2.3
|
||||
```
|
||||
|
||||
```sh
|
||||
$ wget https://github.com/poseidon/terraform-provider-matchbox/releases/download/v0.2.3/terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
|
||||
$ tar xzf terraform-provider-matchbox-v0.2.3-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
## First cluster
|
||||
|
||||
Clone the matchbox source and take a look at the Terraform examples.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/poseidon/matchbox.git
|
||||
$ cd matchbox/examples/terraform
|
||||
```
|
||||
|
||||
Let's start with the `simple-install` example. With `simple-install`, any machines which PXE boot from matchbox will install Container Linux to `dev/sda`, reboot, and have your SSH key set. Its not much of a cluster, but we'll get to that later.
|
||||
|
||||
```sh
|
||||
$ cd simple-install
|
||||
```
|
||||
|
||||
Configure the variables in `variables.tf` by creating a `terraform.tfvars` file.
|
||||
|
||||
```hcl
|
||||
matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
ssh_authorized_key = "YOUR_SSH_KEY"
|
||||
```
|
||||
|
||||
Terraform can now interact with the matchbox service and create resources.
|
||||
|
||||
```sh
|
||||
$ terraform plan
|
||||
Plan: 4 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
Let's review the terraform config and learn a bit about matchbox.
|
||||
|
||||
#### Provider
|
||||
|
||||
Matchbox is configured as a provider platform for bare-metal resources.
|
||||
|
||||
```hcl
|
||||
// Configure the matchbox provider
|
||||
provider "matchbox" {
|
||||
endpoint = "${var.matchbox_rpc_endpoint}"
|
||||
client_cert = "${file("~/.matchbox/client.crt")}"
|
||||
client_key = "${file("~/.matchbox/client.key")}"
|
||||
ca = "${file("~/.matchbox/ca.crt")}"
|
||||
}
|
||||
```
|
||||
|
||||
#### Profiles
|
||||
|
||||
Machine profiles specify the kernel, initrd, kernel args, Container Linux Config, Cloud-config, or other configs used to network boot and provision a bare-metal machine. This profile will PXE boot machines using the current stable Container Linux kernel and initrd (see [assets](api.md#assets) to learn about caching for speed) and supply a Container Linux Config specifying that a disk install and reboot should be performed. Learn more about [Container Linux configs](https://coreos.com/os/docs/latest/configuration.html).
|
||||
|
||||
```hcl
|
||||
// Create a CoreOS-install profile
|
||||
resource "matchbox_profile" "coreos-install" {
|
||||
name = "coreos-install"
|
||||
kernel = "https://stable.release.core-os.net/amd64-usr/current/coreos_production_pxe.vmlinuz"
|
||||
initrd = [
|
||||
"https://stable.release.core-os.net/amd64-usr/current/coreos_production_pxe_image.cpio.gz"
|
||||
]
|
||||
args = [
|
||||
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
]
|
||||
container_linux_config = "${file("./cl/coreos-install.yaml.tmpl")}"
|
||||
}
|
||||
```
|
||||
|
||||
#### Groups
|
||||
|
||||
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and templates in machine-specific values. This group does not have a `selector` block, so any machines which network boot from matchbox will match this group and be provisioned using the `coreos-install` profile. Machines are matched to the most specific matching group.
|
||||
|
||||
```hcl
|
||||
resource "matchbox_group" "default" {
|
||||
name = "default"
|
||||
profile = "${matchbox_profile.coreos-install.name}"
|
||||
# no selector means all machines can be matched
|
||||
metadata {
|
||||
ignition_endpoint = "${var.matchbox_http_endpoint}/ignition"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Apply
|
||||
|
||||
Apply the terraform configuration.
|
||||
|
||||
```sh
|
||||
$ terraform apply
|
||||
Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
|
||||
Matchbox serves configs to machines and respects query parameters, if you're interested:
|
||||
|
||||
* iPXE default - [/ipxe](http://matchbox.example.com:8080/ipxe)
|
||||
* Ignition default - [/ignition](http://matchbox.example.com:8080/ignition)
|
||||
* Ignition post-install - [/ignition?os=installed](http://matchbox.example.com:8080/ignition?os=installed)
|
||||
* GRUB default - [/grub](http://matchbox.example.com:8080/grub)
|
||||
|
||||
## Network
|
||||
|
||||
Matchbox can integrate with many on-premise network setups. It does not seek to be the DHCP server, TFTP server, or DNS server for the network. Instead, matchbox serves iPXE scripts and GRUB configs as the entrypoint for provisioning network booted machines. PXE clients are supported by chainloading iPXE firmware.
|
||||
|
||||
In the simplest case, an iPXE-enabled network can chain to matchbox,
|
||||
|
||||
```
|
||||
# /var/www/html/ipxe/default.ipxe
|
||||
chain http://matchbox.foo:8080/boot.ipxe
|
||||
```
|
||||
|
||||
Read [network-setup.md](network-setup.md) for the complete range of options. Network admins have a great amount of flexibility:
|
||||
|
||||
* May keep using existing DHCP, TFTP, and DNS services
|
||||
* May configure subnets, architectures, or specific machines to delegate to matchbox
|
||||
* May place matchbox behind a menu entry (timeout and default to matchbox)
|
||||
|
||||
If you've never setup a PXE-enabled network before or you're trying to setup a home lab, checkout the [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image [copy-paste examples](https://github.com/poseidon/matchbox/blob/master/Documentation/network-setup.md#coreosdnsmasq) and see the section about [proxy-DHCP](https://github.com/poseidon/matchbox/blob/master/Documentation/network-setup.md#proxy-dhcp).
|
||||
|
||||
## Boot
|
||||
|
||||
Its time to network boot your machines. Use the BMC's remote management capablities (may be vendor-specific) to set the boot device (on the next boot only) to PXE and power on each machine.
|
||||
|
||||
```sh
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS power off
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS chassis bootdev pxe
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS power on
|
||||
```
|
||||
|
||||
Each machine should chainload iPXE, delegate to `matchbox`, receive its iPXE config (or other supported configs) and begin the provisioning process. The `simple-install` example assumes your machines are configured to boot from disk first and PXE only when requested, but you can write profiles for different cases.
|
||||
|
||||
Once the Container Linux install completes and the machine reboots you can SSH,
|
||||
|
||||
```ssh
|
||||
$ ssh core@node1.example.com
|
||||
```
|
||||
|
||||
To re-provision the machine for another purpose, run `terraform apply` and PXE boot it again.
|
||||
|
||||
## Going Further
|
||||
|
||||
Matchbox can be used to provision multi-node Container Linux clusters at one or many on-premise sites if deployed in an HA way. Machines can be matched individually by MAC address, UUID, region, or other labels you choose. Installs can be made much faster by caching images in the built-in HTTP [assets](api.md#assets) server.
|
||||
|
||||
[Container Linux configs](https://coreos.com/os/docs/latest/configuration.html) can be used to partition disks and filesystems, write systemd units, write networkd configs or regular files, and create users. Container Linux nodes can be provisioned into a system that meets your needs. Checkout the examples which create a 3 node [etcd](../examples/terraform/etcd3-install) cluster or a 3 node [Kubernetes](../examples/terraform/bootkube-install) cluster.
|
||||
|
||||
[terraform-dl]: https://www.terraform.io/downloads.html
|
||||
@@ -1,66 +0,0 @@
|
||||
# GRUB2 netboot
|
||||
|
||||
Use GRUB to network boot UEFI hardware.
|
||||
|
||||
## Requirements
|
||||
|
||||
For local development, install the dependencies for libvirt with UEFI.
|
||||
|
||||
* [UEFI with QEMU](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU)
|
||||
|
||||
Ensure that you've gone through the [matchbox with docker](getting-started-docker.md) and [matchbox](matchbox.md) guides and understand the basics.
|
||||
|
||||
## Containers
|
||||
|
||||
Run `matchbox` with rkt, but mount the [grub](../examples/groups/grub) group example.
|
||||
|
||||
## Network
|
||||
|
||||
On Fedora, add the `metal0` interface to the trusted zone in your firewall configuration.
|
||||
|
||||
```sh
|
||||
$ sudo firewall-cmd --add-interface=metal0 --zone=trusted
|
||||
```
|
||||
|
||||
Run the `quay.io/coreos/dnsmasq` container image with rkt or docker.
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=metal0:IP=172.18.0.3 quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=172.18.0.50,172.18.0.99 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:efi-bc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efi-bc,grub.efi \
|
||||
--dhcp-userclass=set:grub,GRUB2 \
|
||||
--dhcp-boot=tag:grub,"(http;matchbox.example.com:8080)/grub","172.18.0.2" \
|
||||
--log-queries \
|
||||
--log-dhcp \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:pxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.foo/172.18.0.2
|
||||
```
|
||||
|
||||
## Client VM
|
||||
|
||||
Create UEFI VM nodes which have known hardware attributes.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-uefi
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
If you use Docker, run `matchbox` according to [matchbox with Docker](getting-started-docker.md), but mount the [grub](../examples/groups/grub) group example. Then start the `coreos/dnsmasq` Docker image, which bundles a `grub.efi`.
|
||||
|
||||
```sh
|
||||
$ sudo docker run --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-range=172.17.0.43,172.17.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-match=set:efi-bc,option:client-arch,7 --dhcp-boot=tag:efi-bc,grub.efi --dhcp-userclass=set:grub,GRUB2 --dhcp-boot=tag:grub,"(http;matchbox.foo:8080)/grub","172.17.0.2" --log-queries --log-dhcp --dhcp-option=3,172.17.0.1 --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:pxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe --address=/matchbox.foo/172.17.0.2
|
||||
```
|
||||
|
||||
Create a VM to verify the machine network boots.
|
||||
|
||||
```sh
|
||||
$ sudo virt-install --name uefi-test --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
|
||||
```
|
||||
|
Before Width: | Height: | Size: 116 KiB |
|
Before Width: | Height: | Size: 107 KiB |
@@ -1,19 +0,0 @@
|
||||
# Troubleshooting
|
||||
|
||||
## Firewall
|
||||
|
||||
Running DHCP or proxyDHCP with `coreos/dnsmasq` on a host requires that the Firewall allow DHCP and TFTP (for chainloading) services to run.
|
||||
|
||||
## Port collision
|
||||
|
||||
Running DHCP or proxyDHCP can cause port already in use collisions depending on what's running. Fedora runs bootp listening on udp/67 for example. Find the service using the port.
|
||||
|
||||
```sh
|
||||
$ sudo lsof -i :67
|
||||
```
|
||||
|
||||
Evaluate whether you can configure the existing service or whether you'd like to stop it and test with `coreos/dnsmasq`.
|
||||
|
||||
## No boot filename received
|
||||
|
||||
PXE client firmware did not receive a DHCP Offer with PXE-Options after several attempts. If you're using the `coreos/dnsmasq` image with `-d`, each request should log to stdout. Using the wrong `-i` interface is the most common reason DHCP requests are not received. Otherwise, wireshark can be useful for investigating.
|
||||
@@ -1 +0,0 @@
|
||||
Dalton Hubble <dghubble@gmail.com> (@dghubble)
|
||||
43
Makefile
@@ -2,6 +2,7 @@ export CGO_ENABLED:=0
|
||||
export GO111MODULE=on
|
||||
export GOFLAGS=-mod=vendor
|
||||
|
||||
DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))
|
||||
VERSION=$(shell git describe --tags --match=v* --always --dirty)
|
||||
LD_FLAGS="-w -X github.com/poseidon/matchbox/matchbox/version.Version=$(VERSION)"
|
||||
|
||||
@@ -34,17 +35,17 @@ lint:
|
||||
fmt:
|
||||
@test -z $$(go fmt ./...)
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image:
|
||||
@sudo docker build --rm=true -t $(LOCAL_REPO):$(VERSION) .
|
||||
@sudo docker tag $(LOCAL_REPO):$(VERSION) $(LOCAL_REPO):latest
|
||||
.PHONY: image
|
||||
image:
|
||||
@buildah bud -t $(LOCAL_REPO):$(VERSION) .
|
||||
@buildah tag $(LOCAL_REPO):$(VERSION) $(LOCAL_REPO):latest
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push: docker-image
|
||||
@sudo docker tag $(LOCAL_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
@sudo docker tag $(LOCAL_REPO):$(VERSION) $(IMAGE_REPO):$(VERSION)
|
||||
@sudo docker push $(IMAGE_REPO):latest
|
||||
@sudo docker push $(IMAGE_REPO):$(VERSION)
|
||||
.PHONY: push
|
||||
push:
|
||||
@buildah tag $(LOCAL_REPO):$(VERSION) $(IMAGE_REPO):$(VERSION)
|
||||
@buildah tag $(LOCAL_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
@buildah push docker://$(IMAGE_REPO):$(VERSION)
|
||||
@buildah push docker://$(IMAGE_REPO):latest
|
||||
|
||||
.PHONY: update
|
||||
update:
|
||||
@@ -55,18 +56,17 @@ update:
|
||||
vendor:
|
||||
@go mod vendor
|
||||
|
||||
.PHONY: codegen
|
||||
codegen: tools
|
||||
@./scripts/dev/codegen
|
||||
protoc/%:
|
||||
podman run --security-opt label=disable \
|
||||
-u root \
|
||||
--mount type=bind,src=$(DIR),target=/mnt/code \
|
||||
quay.io/dghubble/protoc:v3.10.1 \
|
||||
--go_out=plugins=grpc,paths=source_relative:. $*
|
||||
|
||||
.PHONY: tools
|
||||
tools: bin/protoc bin/protoc-gen-go
|
||||
|
||||
bin/protoc:
|
||||
@./scripts/dev/get-protoc
|
||||
|
||||
bin/protoc-gen-go:
|
||||
@go build -o bin/protoc-gen-go $(REPO)/vendor/github.com/golang/protobuf/protoc-gen-go
|
||||
codegen: \
|
||||
protoc/matchbox/storage/storagepb/*.proto \
|
||||
protoc/matchbox/server/serverpb/*.proto \
|
||||
protoc/matchbox/rpc/rpcpb/*.proto
|
||||
|
||||
clean:
|
||||
@rm -rf bin
|
||||
@@ -86,6 +86,7 @@ bin/linux-amd64/matchbox: GOARGS = GOOS=linux GOARCH=amd64
|
||||
bin/linux-arm/matchbox: GOARGS = GOOS=linux GOARCH=arm GOARM=6
|
||||
bin/linux-arm64/matchbox: GOARGS = GOOS=linux GOARCH=arm64
|
||||
bin/darwin-amd64/matchbox: GOARGS = GOOS=darwin GOARCH=amd64
|
||||
bin/linux-ppc64le/matchbox: GOARGS = GOOS=linux GOARCH=ppc64le
|
||||
|
||||
bin/%/matchbox:
|
||||
$(GOARGS) go build -o $@ -ldflags $(LD_FLAGS) -a $(REPO)/cmd/matchbox
|
||||
|
||||
5
NOTICE
@@ -1,5 +0,0 @@
|
||||
CoreOS Project
|
||||
Copyright 2015 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
||||
63
README.md
@@ -1,45 +1,38 @@
|
||||
# matchbox [](https://travis-ci.org/poseidon/matchbox) [](https://godoc.org/github.com/poseidon/matchbox) [](https://quay.io/repository/poseidon/matchbox)
|
||||
# matchbox [](https://github.com/poseidon/matchbox/actions?query=workflow%3Atest+branch%3Amaster) [](https://godoc.org/github.com/poseidon/matchbox) [](https://quay.io/repository/poseidon/matchbox)
|
||||
|
||||
`matchbox` is a service that matches bare-metal machines (based on labels like MAC, UUID, etc.) to profiles that PXE boot and provision Container Linux clusters. Profiles specify the kernel/initrd, kernel arguments, iPXE config, GRUB config, [Container Linux Config][cl-config], or other configs a machine should use. Matchbox can be [installed](Documentation/deployment.md) as a binary, RPM, container image, or deployed on a Kubernetes cluster and it provides an authenticated gRPC API for clients like [Terraform][terraform].
|
||||
`matchbox` is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. Machines are matched by labels like MAC or UUID during PXE and profiles specify a kernel/initrd, iPXE config, and Ignition config.
|
||||
|
||||
* [Documentation][docs]
|
||||
* [matchbox Service](Documentation/matchbox.md)
|
||||
* [Profiles](Documentation/matchbox.md#profiles)
|
||||
* [Groups](Documentation/matchbox.md#groups)
|
||||
* Config Templates
|
||||
* [Container Linux Config][cl-config]
|
||||
* [Cloud-Config][cloud-config]
|
||||
* [Configuration](Documentation/config.md)
|
||||
* [HTTP API](Documentation/api.md) / [gRPC API](https://godoc.org/github.com/poseidon/matchbox/matchbox/client)
|
||||
* [Background: Machine Lifecycle](Documentation/machine-lifecycle.md)
|
||||
* [Background: PXE Booting](Documentation/network-booting.md)
|
||||
## Features
|
||||
|
||||
### Installation
|
||||
* Chainload via iPXE and match hardware labels
|
||||
* Provision Fedora CoreOS or Flatcar Linux (powered by [Ignition](https://github.com/coreos/ignition))
|
||||
* Authenticated gRPC API for clients (e.g. Terraform)
|
||||
|
||||
* Installation
|
||||
* Installing on [Container Linux / other distros](Documentation/deployment.md)
|
||||
* Installing on [Kubernetes](Documentation/deployment.md#kubernetes)
|
||||
* Running with [rkt](Documentation/deployment.md#rkt) / [docker](Documentation/deployment.md#docker)
|
||||
* [Network Setup](Documentation/network-setup.md)
|
||||
## Documentation
|
||||
|
||||
### Tutorials
|
||||
* [Docs](https://matchbox.psdn.io/)
|
||||
* [Configuration](docs/config.md)
|
||||
* [HTTP API](docs/api-http.md) / [gRPC API](docs/grpc-api.md)
|
||||
|
||||
* [Getting Started](Documentation/getting-started.md) - provision physical machines with Container Linux
|
||||
* Local QEMU/KVM
|
||||
* [matchbox with Docker](Documentation/getting-started-docker.md)
|
||||
* Clusters
|
||||
* [etcd3](Documentation/getting-started-docker.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](Documentation/bootkube.md) - Install a 3-node Kubernetes v1.8.5 cluster
|
||||
* Clusters (Terraform-based)
|
||||
* [etcd3](examples/terraform/etcd3-install/README.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](examples/terraform/bootkube-install/README.md) - Install a 3-node Kubernetes v1.14.1 cluster
|
||||
## Installation
|
||||
|
||||
Matchbox can be installed from a binary or a container image.
|
||||
|
||||
* Install Matchbox as a [binary](docs/deployment.md#matchbox-binary), as a [container image](docs/deployment.md#container-image), or on [Kubernetes](docs/deployment.md#kubernetes)
|
||||
* Setup a PXE-enabled [network](docs/network-setup.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
Start provisioning machines with Fedora CoreOS or Flatcar Linux.
|
||||
|
||||
* [Terraform Usage](docs/getting-started.md)
|
||||
* Fedora CoreOS (PXE install to disk)
|
||||
* Flatcar Linux (PXE install to disk)
|
||||
* [Local QEMU/KVM](docs/getting-started-docker.md)
|
||||
* Fedora CoreOS (live PXE or PXE install to disk)
|
||||
* Flatcar Linux (live PXE or PXE install to disk)
|
||||
|
||||
## Contrib
|
||||
|
||||
* [dnsmasq](contrib/dnsmasq/README.md) - Run DHCP, TFTP, and DNS services with docker or rkt
|
||||
* [dnsmasq](contrib/dnsmasq/README.md) - Run DHCP, TFTP, and DNS services as a container
|
||||
* [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) - Terraform provider plugin for Matchbox
|
||||
|
||||
[docs]: https://coreos.com/matchbox/docs/latest
|
||||
[terraform]: https://github.com/poseidon/terraform-provider-matchbox
|
||||
[cl-config]: Documentation/container-linux-config.md
|
||||
[cloud-config]: Documentation/cloud-config.md
|
||||
|
||||
@@ -1,61 +0,0 @@
|
||||
## CoreOS Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of
|
||||
fostering an open and welcoming community, we pledge to respect all people who
|
||||
contribute through reporting issues, posting feature requests, updating
|
||||
documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free
|
||||
experience for everyone, regardless of level of experience, gender, gender
|
||||
identity and expression, sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as physical or electronic addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently applying these
|
||||
principles to every aspect of managing this project. Project maintainers who do
|
||||
not follow or enforce the Code of Conduct may be permanently removed from the
|
||||
project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting a project maintainer, Brandon Philips
|
||||
<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### CoreOS Events Code of Conduct
|
||||
|
||||
CoreOS events are working conferences intended for professional networking and
|
||||
collaboration in the CoreOS community. Attendees are expected to behave
|
||||
according to professional standards and in accordance with their employer’s
|
||||
policies on appropriate workplace behavior.
|
||||
|
||||
While at CoreOS events or related social networking opportunities, attendees
|
||||
should not engage in discriminatory or offensive speech or actions including
|
||||
but not limited to gender, sexuality, race, age, disability, or religion.
|
||||
Speakers should be especially aware of these concerns.
|
||||
|
||||
CoreOS does not condone any statements by speakers contrary to these standards.
|
||||
CoreOS reserves the right to deny entrance and/or eject from an event (without
|
||||
refund) any individual found to be engaging in discriminatory or offensive
|
||||
speech or actions.
|
||||
|
||||
Please bring any concerns to the immediate attention of designated on-site
|
||||
staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.9
|
||||
FROM docker.io/alpine:3.12
|
||||
LABEL maintainer="Dalton Hubble <dghubble@gmail.com>"
|
||||
RUN apk -U add dnsmasq curl
|
||||
COPY tftpboot /var/lib/tftpboot
|
||||
|
||||
@@ -1,24 +1,24 @@
|
||||
DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST))))
|
||||
VERSION=$(shell git rev-parse HEAD)
|
||||
|
||||
IMAGE_REPO=poseidon/dnsmasq
|
||||
QUAY_REPO=quay.io/poseidon/dnsmasq
|
||||
LOCAL_REPO=poseidon/dnsmasq
|
||||
IMAGE_REPO=quay.io/poseidon/dnsmasq
|
||||
|
||||
.PHONY: all
|
||||
all: docker-image
|
||||
all: image
|
||||
|
||||
.PHONY: tftp
|
||||
tftp:
|
||||
@$(DIR)/get-tftp-files
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: tftp
|
||||
@sudo docker build --rm=true -t $(IMAGE_REPO):$(VERSION) .
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
.PHONY: image
|
||||
image: tftp
|
||||
@buildah bud -t $(LOCAL_REPO):$(VERSION) .
|
||||
@buildah tag $(LOCAL_REPO):$(VERSION) $(LOCAL_REPO):latest
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push:
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(QUAY_REPO):latest
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(QUAY_REPO):$(VERSION)
|
||||
@sudo docker push $(QUAY_REPO):latest
|
||||
@sudo docker push $(QUAY_REPO):$(VERSION)
|
||||
.PHONY: push
|
||||
push:
|
||||
@buildah tag $(LOCAL_REPO):$(VERSION) $(IMAGE_REPO):$(VERSION)
|
||||
@buildah tag $(LOCAL_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
@buildah push docker://$(IMAGE_REPO):$(VERSION)
|
||||
@buildah push docker://$(IMAGE_REPO):latest
|
||||
|
||||
@@ -9,12 +9,10 @@ The image bundles `undionly.kpxe`, `ipxe.efi`, and `grub.efi` (experimental) for
|
||||
Run the container image as a DHCP, DNS, and TFTP service.
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/poseidon/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
@@ -30,27 +28,7 @@ sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--log-dhcp
|
||||
```
|
||||
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
|
||||
Press ^] three times to stop the rkt pod. Press ctrl-C to stop the Docker container.
|
||||
Press ctrl-C to stop the Docker container.
|
||||
|
||||
## Configuration Flags
|
||||
|
||||
@@ -74,6 +52,6 @@ make docker-image
|
||||
Run the image with Docker on the `docker0` bridge (default).
|
||||
|
||||
```
|
||||
sudo docker run --rm --cap-add=NET_ADMIN coreos/dnsmasq -d -q
|
||||
sudo docker run --rm --cap-add=NET_ADMIN poseidon/dnsmasq -d -q
|
||||
```
|
||||
|
||||
|
||||
@@ -11,6 +11,3 @@ fi
|
||||
curl -s -o $DEST/undionly.kpxe http://boot.ipxe.org/undionly.kpxe
|
||||
cp $DEST/undionly.kpxe $DEST/undionly.kpxe.0
|
||||
curl -s -o $DEST/ipxe.efi http://boot.ipxe.org/ipxe.efi
|
||||
|
||||
# Any vaguely recent CoreOS grub.efi is fine
|
||||
curl -s -o $DEST/grub.efi https://stable.release.core-os.net/amd64-usr/1353.7.0/coreos_production_pxe_grub.efi
|
||||
|
||||
@@ -25,6 +25,3 @@ address=/node3.example.com/172.18.0.23
|
||||
address=/node4.example.com/172.18.0.24
|
||||
address=/cluster.example.com/172.18.0.21
|
||||
|
||||
# for a Tectonic test, ignore
|
||||
address=/tectonic.example.com/172.18.0.22
|
||||
address=/tectonic.example.com/172.18.0.23
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# dirty hack
|
||||
cd "$(dirname $0)"
|
||||
|
||||
docker info
|
||||
make docker-image
|
||||
docker login -u="$DOCKER_USERNAME" -p=$DOCKER_PASSWORD quay.io
|
||||
make docker-push
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: matchbox
|
||||
@@ -7,15 +7,20 @@ spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: matchbox
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: matchbox
|
||||
phase: prod
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
containers:
|
||||
- name: matchbox
|
||||
image: quay.io/poseidon/matchbox:v0.8.0
|
||||
image: quay.io/poseidon/matchbox:v0.9.0
|
||||
env:
|
||||
- name: MATCHBOX_ADDRESS
|
||||
value: "0.0.0.0:8080"
|
||||
@@ -28,10 +33,18 @@ spec:
|
||||
containerPort: 8080
|
||||
- name: https
|
||||
containerPort: 8081
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 5
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8080
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "50Mi"
|
||||
cpu: 30m
|
||||
memory: 20Mi
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/matchbox
|
||||
@@ -39,9 +52,6 @@ spec:
|
||||
mountPath: /var/lib/matchbox
|
||||
- name: assets
|
||||
mountPath: /var/lib/matchbox/assets
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: config
|
||||
secret:
|
||||
43
contrib/k8s/ingress.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: "false"
|
||||
spec:
|
||||
ingressClassName: public
|
||||
# tls ... optional
|
||||
rules:
|
||||
- host: matchbox.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: matchbox
|
||||
port:
|
||||
number: 8080
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox-rpc
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
spec:
|
||||
ingressClassName: public
|
||||
tls:
|
||||
- hosts:
|
||||
- matchbox-rpc.example.com
|
||||
rules:
|
||||
- host: matchbox-rpc.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: matchbox
|
||||
port:
|
||||
number: 8081
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
spec:
|
||||
rules:
|
||||
- host: matchbox.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: matchbox
|
||||
servicePort: 8080
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- matchbox-rpc.example.com
|
||||
rules:
|
||||
- host: matchbox-rpc.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: matchbox
|
||||
servicePort: 8081
|
||||
@@ -6,7 +6,6 @@ spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
name: matchbox
|
||||
phase: prod
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
@@ -1,86 +0,0 @@
|
||||
%global import_path github.com/coreos/matchbox
|
||||
%global repo matchbox
|
||||
%global debug_package %{nil}
|
||||
|
||||
Name: matchbox
|
||||
Version: 0.6.0
|
||||
Release: 2%{?dist}
|
||||
Summary: Network boot and provision CoreOS machines
|
||||
License: ASL 2.0
|
||||
URL: https://%{import_path}
|
||||
Source0: https://%{import_path}/archive/v%{version}/%{name}-%{version}.tar.gz
|
||||
|
||||
|
||||
BuildRequires: golang
|
||||
BuildRequires: systemd
|
||||
%{?systemd_requires}
|
||||
|
||||
Requires(pre): shadow-utils
|
||||
|
||||
%description
|
||||
matchbox is a service that matches machines to profiles to PXE boot and provision
|
||||
clusters. Profiles specify the kernel/initrd, kernel args, iPXE config, GRUB
|
||||
config, Container Linux config, Cloud-config, or other configs. matchbox provides
|
||||
a read-only HTTP API for machines and an authenticated gRPC API for clients.
|
||||
|
||||
# Limit to architectures supported by golang or gcc-go compilers
|
||||
ExclusiveArch: %{go_arches}
|
||||
# Use golang or gcc-go compiler depending on architecture
|
||||
BuildRequires: compiler(golang)
|
||||
|
||||
%prep
|
||||
%setup -q -n %{repo}-%{version}
|
||||
|
||||
%build
|
||||
# create a Go workspace with a symlink to builddir source
|
||||
mkdir -p src/github.com/coreos
|
||||
ln -s ../../../ src/github.com/coreos/matchbox
|
||||
export GOPATH=$(pwd):%{gopath}
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
function gobuild { go build -a -ldflags "-w -X github.com/coreos/matchbox/matchbox/version.Version=v%{version}" "$@"; }
|
||||
gobuild -o bin/matchbox %{import_path}/cmd/matchbox
|
||||
|
||||
%install
|
||||
install -d %{buildroot}/%{_bindir}
|
||||
install -d %{buildroot}%{_sharedstatedir}/%{name}
|
||||
install -p -m 0755 bin/matchbox %{buildroot}/%{_bindir}
|
||||
# systemd service unit
|
||||
mkdir -p %{buildroot}%{_unitdir}
|
||||
cp contrib/systemd/%{name}.service %{buildroot}%{_unitdir}/
|
||||
|
||||
%files
|
||||
%doc README.md CHANGES.md CONTRIBUTING.md LICENSE NOTICE DCO
|
||||
%{_bindir}/matchbox
|
||||
%{_sharedstatedir}/%{name}
|
||||
%{_unitdir}/%{name}.service
|
||||
|
||||
%pre
|
||||
getent group matchbox >/dev/null || groupadd -r matchbox
|
||||
getent passwd matchbox >/dev/null || \
|
||||
useradd -r -g matchbox -s /sbin/nologin matchbox
|
||||
|
||||
%post
|
||||
%systemd_post matchbox.service
|
||||
|
||||
%preun
|
||||
%systemd_preun matchbox.service
|
||||
|
||||
%postun
|
||||
%systemd_postun_with_restart matchbox.service
|
||||
|
||||
%changelog
|
||||
* Mon Apr 24 2017 <dalton.hubble@coreos.com> - 0.6.0-1
|
||||
- New support for terraform-provider-matchbox plugin
|
||||
- Add ProfileDelete, GroupDelete, IgnitionGet and IgnitionDelete gRPC endpoints
|
||||
- Generate code with gRPC v1.2.1 and matching Go protoc-gen-go plugin
|
||||
- Update Ignition to v0.14.0 and coreos-cloudinit to v1.13.0
|
||||
- New documentation at https://coreos.com/matchbox/docs/latest
|
||||
* Wed Jan 25 2017 <dalton.hubble@coreos.com> - 0.5.0-1
|
||||
- Rename project from bootcfg to matchbox
|
||||
* Sat Dec 3 2016 <dalton.hubble@coreos.com> - 0.4.1-3
|
||||
- Add missing ldflags which caused bootcfg -version to report wrong version
|
||||
* Fri Dec 2 2016 <dalton.hubble@coreos.com> - 0.4.1-2
|
||||
- Fix bootcfg user creation
|
||||
* Fri Dec 2 2016 <dalton.hubble@coreos.com> - 0.4.1-1
|
||||
- Initial package
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
Environment="IMAGE=quay.io/poseidon/matchbox"
|
||||
Environment="VERSION=v0.8.0"
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
|
||||
Environment="MATCHBOX_LOG_LEVEL=debug"
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/matchbox
|
||||
ExecStartPre=/usr/bin/mkdir -p /var/lib/matchbox/assets
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--net=host \
|
||||
--inherit-env \
|
||||
--trust-keys-from-https \
|
||||
--mount volume=data,target=/var/lib/matchbox \
|
||||
--mount volume=config,target=/etc/matchbox \
|
||||
--volume data,kind=host,source=/var/lib/matchbox \
|
||||
--volume config,kind=host,source=/etc/matchbox \
|
||||
${IMAGE}:${VERSION}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,16 +0,0 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
User=matchbox
|
||||
Group=matchbox
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
ExecStart=/usr/local/bin/matchbox
|
||||
|
||||
# systemd.exec
|
||||
ProtectHome=yes
|
||||
ProtectSystem=full
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,22 +0,0 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
Environment="IMAGE=quay.io/poseidon/matchbox"
|
||||
Environment="VERSION=v0.8.0"
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/matchbox
|
||||
ExecStartPre=/usr/bin/mkdir -p /var/lib/matchbox/assets
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--net=host \
|
||||
--inherit-env \
|
||||
--trust-keys-from-https \
|
||||
--mount volume=data,target=/var/lib/matchbox \
|
||||
--mount volume=config,target=/etc/matchbox \
|
||||
--volume data,kind=host,source=/var/lib/matchbox \
|
||||
--volume config,kind=host,source=/etc/matchbox \
|
||||
${IMAGE}:${VERSION}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,16 +1,16 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
Description=Matchbox Server
|
||||
Documentation=https://github.com/poseidon/matchbox
|
||||
|
||||
[Service]
|
||||
User=matchbox
|
||||
Group=matchbox
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
ExecStart=/usr/bin/matchbox
|
||||
ExecStart=/usr/local/bin/matchbox
|
||||
|
||||
# systemd.exec
|
||||
ProtectHome=yes
|
||||
ProtectSystem=full
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=multi-user.target
|
||||
|
||||
16
docs/api-grpc.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# gRPC API
|
||||
|
||||
## Protos
|
||||
|
||||
* [rpc.proto](https://github.com/poseidon/matchbox/blob/master/matchbox/rpc/rpcpb/rpc.proto)
|
||||
* [storage.proto](https://github.com/poseidon/matchbox/blob/master/matchbox/storage/storagepb/storage.proto)
|
||||
|
||||
## Client Libraries
|
||||
|
||||
gRPC client libraries
|
||||
|
||||
* [Go](https://godoc.org/github.com/poseidon/matchbox/matchbox/client)
|
||||
|
||||
## Client Plugins
|
||||
|
||||
* [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox)
|
||||
@@ -1,7 +1,7 @@
|
||||
# Cloud Config
|
||||
|
||||
# Cloud config
|
||||
|
||||
**Note:** Please migrate to [Container Linux Configs](container-linux-config.md). Cloud-Config support will be removed in the future.
|
||||
!!! warning
|
||||
Migrate to [Container Linux Configs](container-linux-config.md). Cloud-Config support will be removed in the future.
|
||||
|
||||
CoreOS Cloud-Config is a system for configuring machines with a Cloud-Config file or executable script from user-data. Cloud-Config runs in userspace on each boot and implements a subset of the [cloud-init spec](http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data). See the cloud-config [docs](https://coreos.com/os/docs/latest/cloud-config.html) for details.
|
||||
|
||||
@@ -18,13 +18,12 @@ Cloud-Config template files can be added in `/var/lib/matchbox/cloud` or in a `c
|
||||
|
||||
## Reference
|
||||
|
||||
Reference a Cloud-Config in a [Profile](matchbox.md#profiles) with `cloud_id`. When PXE booting, use the kernel option `cloud-config-url` to point to `matchbox` [cloud-config endpoint](api.md#cloud-config).
|
||||
Reference a Cloud-Config in a [Profile](matchbox.md#profiles) with `cloud_id`. When PXE booting, use the kernel option `cloud-config-url` to point to `matchbox` [cloud-config endpoint](api-http.md#cloud-config).
|
||||
|
||||
## Examples
|
||||
|
||||
Here is an example Cloud-Config which starts some units and writes a file.
|
||||
|
||||
<!-- {% raw %} -->
|
||||
```yaml
|
||||
#cloud-config
|
||||
coreos:
|
||||
@@ -40,7 +39,6 @@ write_files:
|
||||
content: |
|
||||
{{.greeting}}
|
||||
```
|
||||
<!-- {% endraw %} -->
|
||||
|
||||
The Cloud-Config [Validator](https://coreos.com/validate/) is also useful for checking your Cloud-Config files for errors.
|
||||
|
||||
@@ -35,7 +35,6 @@ Configuration arguments can be provided as flags or as environment variables.
|
||||
|
||||
```sh
|
||||
$ ./bin/matchbox -version
|
||||
$ sudo rkt run quay.io/poseidon/matchbox:latest -- -version
|
||||
$ sudo docker run quay.io/poseidon/matchbox:latest -version
|
||||
```
|
||||
|
||||
@@ -47,12 +46,6 @@ Run the binary.
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -log-level=debug -data-path=examples -assets-path=examples/assets
|
||||
```
|
||||
|
||||
Run the latest ACI with rkt.
|
||||
|
||||
```sh
|
||||
$ sudo rkt run --mount volume=assets,target=/var/lib/matchbox/assets --volume assets,kind=host,source=$PWD/examples/assets quay.io/poseidon/matchbox:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
Run the latest Docker image.
|
||||
|
||||
```sh
|
||||
@@ -61,13 +54,7 @@ $ sudo docker run -p 8080:8080 --rm -v $PWD/examples/assets:/var/lib/matchbox/as
|
||||
|
||||
### With examples
|
||||
|
||||
Mount `examples` to pre-load the [example](../examples/README.md) machine groups and profiles. Run the container with rkt,
|
||||
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/poseidon/matchbox:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
or with Docker.
|
||||
Mount `examples` to pre-load the example machine groups and profiles. Run the container.
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
@@ -89,20 +76,6 @@ Clients, such as `bootcmd`, verify the server's certificate with a CA bundle pas
|
||||
$ ./bin/bootcmd profile list --endpoints 127.0.0.1:8081 --ca-file examples/etc/matchbox/ca.crt --cert-file examples/etc/matchbox/client.crt --key-file examples/etc/matchbox/client.key
|
||||
```
|
||||
|
||||
### With rkt
|
||||
|
||||
Run the ACI with rkt and TLS credentials from `examples/etc/matchbox`.
|
||||
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples,readOnly=true --mount volume=config,target=/etc/matchbox --volume config,kind=host,source=$PWD/examples/etc/matchbox --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/poseidon/matchbox:latest -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
A `bootcmd` client can call the gRPC API running at the IP used in the rkt example.
|
||||
|
||||
```sh
|
||||
$ ./bin/bootcmd profile list --endpoints 172.18.0.2:8081 --ca-file examples/etc/matchbox/ca.crt --cert-file examples/etc/matchbox/client.crt --key-file examples/etc/matchbox/client.key
|
||||
```
|
||||
|
||||
### With docker
|
||||
|
||||
Run the Docker image with TLS credentials from `examples/etc/matchbox`.
|
||||
@@ -126,13 +99,7 @@ $ export MATCHBOX_PASSPHRASE=test
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -key-ring-path matchbox/sign/fixtures/secring.gpg -data-path=examples -assets-path=examples/assets
|
||||
```
|
||||
|
||||
Run the ACI with a test key.
|
||||
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --set-env=MATCHBOX_PASSPHRASE=test --mount volume=secrets,target=/secrets --volume secrets,kind=host,source=$PWD/matchbox/sign/fixtures --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/poseidon/matchbox:latest -- -address=0.0.0.0:8080 -key-ring-path secrets/secring.gpg
|
||||
```
|
||||
|
||||
Run the Docker image with a test key.
|
||||
Run the container image with a test key.
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm --env MATCHBOX_PASSPHRASE=test -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z -v $PWD/matchbox/sign/fixtures:/secrets:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -log-level=debug -key-ring-path secrets/secring.gpg
|
||||
@@ -25,7 +25,7 @@ Container Linux Config templates can be added to the `/var/lib/matchbox/ignition
|
||||
|
||||
## Referencing in Profiles
|
||||
|
||||
Profiles can include a Container Linux Config for provisioning machines. Specify the Container Linux Config in a [Profile](matchbox.md#profiles) with `ignition_id`. When PXE booting, use the kernel option `coreos.first_boot=1` and `coreos.config.url` to point to the `matchbox` [Ignition endpoint](api.md#ignition-config).
|
||||
Profiles can include a Container Linux Config for provisioning machines. Specify the Container Linux Config in a [Profile](matchbox.md#profiles) with `ignition_id`. When PXE booting, use the kernel option `coreos.first_boot=1` and `coreos.config.url` to point to the `matchbox` [Ignition endpoint](api-http.md#ignition-config).
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -1,66 +1,46 @@
|
||||
# Installation
|
||||
|
||||
This guide walks through deploying the `matchbox` service on a Linux host (via RPM, rkt, docker, or binary) or on a Kubernetes cluster.
|
||||
This guide walks through deploying the `matchbox` service on a Linux host (as a binary or container image) or on a Kubernetes cluster.
|
||||
|
||||
## Provisoner
|
||||
|
||||
`matchbox` is a service for network booting and provisioning machines to create CoreOS Container Linux clusters. `matchbox` should be installed on a provisioner machine (Container Linux or any Linux distribution) or cluster (Kubernetes) which can serve configs to client machines in a lab or datacenter.
|
||||
Matchbox is a service for network booting and provisioning machines to create Fedora CoreOS or Flatcar Linux clusters. Matchbox may installed on a host server or Kubernetes cluster that can serve configs to client machines in a lab or datacenter.
|
||||
|
||||
Choose one of the supported installation options:
|
||||
|
||||
* [CoreOS Container Linux (rkt)](#coreos-container-linux)
|
||||
* [RPM-based](#rpm-based-distro)
|
||||
* [Generic Linux (binary)](#generic-linux)
|
||||
* [With rkt](#rkt)
|
||||
* [With docker](#docker)
|
||||
* [Kubernetes Service](#kubernetes)
|
||||
* [Matchbox binary](#matchbox-binary)
|
||||
* [Container image](#container-image)
|
||||
* [Kubernetes manifests](#kubernetes)
|
||||
|
||||
## Download
|
||||
|
||||
Download the latest matchbox [release](https://github.com/poseidon/matchbox/releases) to the provisioner host.
|
||||
Download the latest Matchbox [release](https://github.com/poseidon/matchbox/releases).
|
||||
|
||||
```sh
|
||||
$ wget https://github.com/poseidon/matchbox/releases/download/v0.8.0/matchbox-v0.8.0-linux-amd64.tar.gz
|
||||
$ wget https://github.com/poseidon/matchbox/releases/download/v0.8.0/matchbox-v0.8.0-linux-amd64.tar.gz.asc
|
||||
$ wget https://github.com/poseidon/matchbox/releases/download/v0.9.0/matchbox-v0.9.0-linux-amd64.tar.gz
|
||||
$ wget https://github.com/poseidon/matchbox/releases/download/v0.9.0/matchbox-v0.9.0-linux-amd64.tar.gz.asc
|
||||
```
|
||||
|
||||
Verify the release has been signed by Dalton Hubble's [GPG Key 8F515AD1602065C8](https://keyserver.ubuntu.com/pks/lookup?search=0x8F515AD1602065C8&op=vindex).
|
||||
Verify the release has been signed by Dalton Hubble's GPG [Key](https://keyserver.ubuntu.com/pks/lookup?search=0x8F515AD1602065C8&op=vindex)'s signing subkey.
|
||||
|
||||
```sh
|
||||
$ gpg --keyserver keyserver.ubuntu.com --recv-key 8F515AD1602065C8
|
||||
$ gpg --verify matchbox-v0.8.0-linux-amd64.tar.gz.asc matchbox-v0.8.0-linux-amd64.tar.gz
|
||||
$ gpg --keyserver keyserver.ubuntu.com --recv-key 2E3D92BF07D9DDCCB3BAE4A48F515AD1602065C8
|
||||
$ gpg --verify matchbox-v0.9.0-linux-amd64.tar.gz.asc matchbox-v0.9.0-linux-amd64.tar.gz
|
||||
gpg: Good signature from "Dalton Hubble <dghubble@gmail.com>"
|
||||
```
|
||||
|
||||
Untar the release.
|
||||
|
||||
```sh
|
||||
$ tar xzvf matchbox-v0.8.0-linux-amd64.tar.gz
|
||||
$ cd matchbox-v0.8.0-linux-amd64
|
||||
$ tar xzvf matchbox-v0.9.0-linux-amd64.tar.gz
|
||||
$ cd matchbox-v0.9.0-linux-amd64
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
### RPM-based distro
|
||||
Run Matchbox as a binary, a container image, or on Kubernetes.
|
||||
|
||||
On an RPM-based provisioner (Fedora 24+), install the `matchbox` RPM from the Copr [repository](https://copr.fedorainfracloud.org/coprs/g/CoreOS/matchbox/) using `dnf`.
|
||||
|
||||
```sh
|
||||
dnf copr enable @CoreOS/matchbox
|
||||
dnf install matchbox
|
||||
```
|
||||
|
||||
RPMs are not currently available for CentOS and RHEL (due to Go version). CentOS and RHEL users should follow the Generic Linux section below.
|
||||
|
||||
### CoreOS Container Linux
|
||||
|
||||
On a Container Linux provisioner, rkt run `matchbox` image with the provided systemd unit.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-on-coreos.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
### Generic Linux
|
||||
### Matchbox Binary
|
||||
|
||||
Pre-built binaries are available for generic Linux distributions. Copy the `matchbox` static binary to an appropriate location on the host.
|
||||
|
||||
@@ -83,12 +63,12 @@ $ sudo chown -R matchbox:matchbox /var/lib/matchbox
|
||||
Copy the provided `matchbox` systemd unit file.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-local.service /etc/systemd/system/matchbox.service
|
||||
$ sudo cp contrib/systemd/matchbox.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
## Customization
|
||||
#### systemd dropins
|
||||
|
||||
Customize matchbox by editing the systemd unit or adding a systemd dropin. Find the complete set of `matchbox` flags and environment variables at [config](config.md).
|
||||
Customize Matchbox by editing the systemd unit or adding a systemd dropin. Find the complete set of `matchbox` flags and environment variables at [config](config.md).
|
||||
|
||||
```sh
|
||||
$ sudo systemctl edit matchbox
|
||||
@@ -112,172 +92,42 @@ Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
|
||||
```
|
||||
|
||||
The Tectonic [Installer](https://tectonic.com/enterprise/docs/latest/install/bare-metal/index.html) uses this API. Tectonic users with a Container Linux provisioner can start with an example that enables it.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-for-tectonic.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
Customize `matchbox` to suit your preferences.
|
||||
|
||||
## Firewall
|
||||
#### Start
|
||||
|
||||
Allow your port choices on the provisioner's firewall so the clients can access the service. Here are the commands for those using `firewalld`:
|
||||
Start the Matchbox service and enable it if you'd like it to start on every boot.
|
||||
|
||||
```sh
|
||||
$ sudo firewall-cmd --zone=MYZONE --add-port=8080/tcp --permanent
|
||||
$ sudo firewall-cmd --zone=MYZONE --add-port=8081/tcp --permanent
|
||||
```
|
||||
|
||||
## Generate TLS Certificates
|
||||
|
||||
The Matchbox gRPC API allows clients (terraform-provider-matchbox) to create and update Matchbox resources. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
|
||||
|
||||
The `cert-gen` helper script generates a self-signed CA, server certificate, and client certificate. **Prefer your organization's PKI, if possible**
|
||||
|
||||
Navigate to the `scripts/tls` directory.
|
||||
|
||||
```sh
|
||||
$ cd scripts/tls
|
||||
```
|
||||
|
||||
Export `SAN` to set the Subject Alt Names which should be used in certificates. Provide the fully qualified domain name or IP (discouraged) where Matchbox will be installed.
|
||||
|
||||
```sh
|
||||
# DNS or IP Subject Alt Names where matchbox runs
|
||||
$ export SAN=DNS.1:matchbox.example.com,IP.1:172.18.0.2
|
||||
```
|
||||
|
||||
Generate a `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`.
|
||||
|
||||
```sh
|
||||
$ ./cert-gen
|
||||
```
|
||||
|
||||
Move TLS credentials to the matchbox server's default location.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/matchbox
|
||||
$ sudo cp ca.crt server.crt server.key /etc/matchbox
|
||||
$ sudo chown -R matchbox:matchbox /etc/matchbox
|
||||
```
|
||||
|
||||
Save `client.crt`, `client.key`, and `ca.crt` for later use (e.g. `~/.matchbox`).
|
||||
|
||||
```sh
|
||||
$ mkdir -p ~/.matchbox
|
||||
$ cp client.crt client.key ca.crt ~/.matchbox/
|
||||
```
|
||||
|
||||
## Start matchbox
|
||||
|
||||
Start the `matchbox` service and enable it if you'd like it to start on every boot.
|
||||
|
||||
```sh
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl start matchbox
|
||||
$ sudo systemctl enable matchbox
|
||||
```
|
||||
|
||||
## Verify
|
||||
### Container Image
|
||||
|
||||
Verify the matchbox service is running and can be reached by client machines (those being provisioned).
|
||||
|
||||
```sh
|
||||
$ systemctl status matchbox
|
||||
$ dig matchbox.example.com
|
||||
```
|
||||
|
||||
Verify you receive a response from the HTTP and API endpoints.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080
|
||||
matchbox
|
||||
```
|
||||
|
||||
If you enabled the gRPC API,
|
||||
|
||||
```sh
|
||||
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert scripts/tls/client.crt -key scripts/tls/client.key
|
||||
CONNECTED(00000003)
|
||||
depth=1 CN = fake-ca
|
||||
verify return:1
|
||||
depth=0 CN = fake-server
|
||||
verify return:1
|
||||
---
|
||||
Certificate chain
|
||||
0 s:/CN=fake-server
|
||||
i:/CN=fake-ca
|
||||
---
|
||||
....
|
||||
```
|
||||
|
||||
## Download Container Linux (optional)
|
||||
|
||||
`matchbox` can serve Container Linux images in development or lab environments to reduce bandwidth usage and increase the speed of Container Linux PXE boots and installs to disk.
|
||||
|
||||
Download a recent Container Linux [release](https://coreos.com/releases/) with signatures.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1967.3.0 . # note the "." 3rd argument
|
||||
```
|
||||
|
||||
Move the images to `/var/lib/matchbox/assets`,
|
||||
|
||||
```sh
|
||||
$ sudo cp -r coreos /var/lib/matchbox/assets
|
||||
```
|
||||
Run the container image with Podman,
|
||||
|
||||
```
|
||||
/var/lib/matchbox/assets/
|
||||
├── coreos
|
||||
│ └── 1967.3.0
|
||||
│ ├── CoreOS_Image_Signing_Key.asc
|
||||
│ ├── coreos_production_image.bin.bz2
|
||||
│ ├── coreos_production_image.bin.bz2.sig
|
||||
│ ├── coreos_production_pxe_image.cpio.gz
|
||||
│ ├── coreos_production_pxe_image.cpio.gz.sig
|
||||
│ ├── coreos_production_pxe.vmlinuz
|
||||
│ └── coreos_production_pxe.vmlinuz.sig
|
||||
mkdir -p /var/lib/matchbox/assets
|
||||
podman run --net=host --rm -v /var/lib/matchbox:/var/lib/matchbox:Z -v /etc/matchbox:/etc/matchbox:Z,ro quay.io/poseidon/matchbox:v0.9.0 -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
and verify the images are acessible.
|
||||
Or with Docker,
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080/assets/coreos/1967.3.0/
|
||||
<pre>...
|
||||
```
|
||||
|
||||
For large production environments, use a cache proxy or mirror suitable for your environment to serve Container Linux images.
|
||||
|
||||
## Network
|
||||
|
||||
Review [network setup](https://github.com/poseidon/matchbox/blob/master/Documentation/network-setup.md) with your network administrator to set up DHCP, TFTP, and DNS services on your network. At a high level, your goals are to:
|
||||
|
||||
* Chainload PXE firmwares to iPXE
|
||||
* Point iPXE client machines to the `matchbox` iPXE HTTP endpoint `http://matchbox.example.com:8080/boot.ipxe`
|
||||
* Ensure `matchbox.example.com` resolves to your `matchbox` deployment
|
||||
|
||||
CoreOS provides [dnsmasq](https://github.com/poseidon/matchbox/tree/master/contrib/dnsmasq) as `quay.io/coreos/dnsmasq`, if you wish to use rkt or Docker.
|
||||
|
||||
## Docker
|
||||
|
||||
Run the container image with docker.
|
||||
|
||||
```sh
|
||||
$ mkdir -p /var/lib/matchbox/assets
|
||||
$ sudo docker run --net=host --rm -v /var/lib/matchbox:/var/lib/matchbox:Z -v /etc/matchbox:/etc/matchbox:Z,ro quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
mkdir -p /var/lib/matchbox/assets
|
||||
sudo docker run --net=host --rm -v /var/lib/matchbox:/var/lib/matchbox:Z -v /etc/matchbox:/etc/matchbox:Z,ro quay.io/poseidon/matchbox:v0.9.0 -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
Create machine profiles, groups, or Ignition configs by adding files to `/var/lib/matchbox`.
|
||||
|
||||
## Kubernetes
|
||||
### Kubernetes
|
||||
|
||||
Install `matchbox` on a Kubernetes cluster by creating a deployment and service.
|
||||
Install Matchbox on a Kubernetes cluster with the example manifests.
|
||||
|
||||
```sh
|
||||
$ kubectl apply -f contrib/k8s/matchbox-deployment.yaml
|
||||
$ kubectl apply -f contrib/k8s/matchbox-service.yaml
|
||||
$ kubectl apply -R -f contrib/k8s
|
||||
$ kubectl get services
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
matchbox 10.3.0.145 <none> 8080/TCP,8081/TCP 46m
|
||||
@@ -308,15 +158,151 @@ $ curl http://matchbox.example.com
|
||||
$ openssl s_client -connect matchbox-rpc.example.com:443 -CAfile ca.crt -cert client.crt -key client.key
|
||||
```
|
||||
|
||||
# HTTPS - The read-only Matchbox API is also available with HTTPS
|
||||
## Firewall
|
||||
|
||||
To start matchbox in this mode you will need the following flags set:
|
||||
Allow your port choices on the provisioner's firewall so the clients can access the service. Here are the commands for those using `firewalld`:
|
||||
|
||||
| Name | Type | Description |
|
||||
|----------------|--------|---------------------------------------------------------------|
|
||||
| -web-ssl | bool | true/false |
|
||||
| -web-cert-file | string | Path to the server TLS certificate file |
|
||||
| -web-key-file | string | Path to the server TLS key file |
|
||||
```sh
|
||||
$ sudo firewall-cmd --zone=MYZONE --add-port=8080/tcp --permanent
|
||||
$ sudo firewall-cmd --zone=MYZONE --add-port=8081/tcp --permanent
|
||||
```
|
||||
|
||||
## Generate TLS Certificates
|
||||
|
||||
The Matchbox gRPC API allows clients (terraform-provider-matchbox) to create and update Matchbox resources. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
|
||||
|
||||
The `cert-gen` helper script generates a self-signed CA, server certificate, and client certificate. **Prefer your organization's PKI, if possible**
|
||||
|
||||
Navigate to the `scripts/tls` directory.
|
||||
|
||||
```sh
|
||||
$ cd scripts/tls
|
||||
```
|
||||
|
||||
Export `SAN` to set the Subject Alt Names which should be used in certificates. Provide the fully qualified domain name or IP (discouraged) where Matchbox will be installed.
|
||||
|
||||
```sh
|
||||
# DNS or IP Subject Alt Names where matchbox runs
|
||||
$ export SAN=DNS.1:matchbox.example.com,IP.1:172.17.0.2
|
||||
```
|
||||
|
||||
Generate a `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`.
|
||||
|
||||
```sh
|
||||
$ ./cert-gen
|
||||
```
|
||||
|
||||
Move TLS credentials to the matchbox server's default location.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/matchbox
|
||||
$ sudo cp ca.crt server.crt server.key /etc/matchbox
|
||||
$ sudo chown -R matchbox:matchbox /etc/matchbox
|
||||
```
|
||||
|
||||
Save `client.crt`, `client.key`, and `ca.crt` for later use (e.g. `~/.matchbox`).
|
||||
|
||||
```sh
|
||||
$ mkdir -p ~/.matchbox
|
||||
$ cp client.crt client.key ca.crt ~/.matchbox/
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
Verify the matchbox service is running and can be reached by client machines (those being provisioned).
|
||||
|
||||
```sh
|
||||
$ systemctl status matchbox # Matchbox binary method
|
||||
$ dig matchbox.example.com
|
||||
```
|
||||
|
||||
Verify you receive a response from the HTTP and API endpoints.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080
|
||||
matchbox
|
||||
```
|
||||
|
||||
If you enabled the gRPC API,
|
||||
|
||||
```sh
|
||||
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert scripts/tls/client.crt -key scripts/tls/client.key
|
||||
CONNECTED(00000003)
|
||||
depth=1 CN = fake-ca
|
||||
verify return:1
|
||||
depth=0 CN = fake-server
|
||||
verify return:1
|
||||
---
|
||||
Certificate chain
|
||||
0 s:/CN=fake-server
|
||||
i:/CN=fake-ca
|
||||
---
|
||||
....
|
||||
```
|
||||
|
||||
## Download Images (optional)
|
||||
|
||||
Matchbox can serve OS images in development or lab environments to reduce bandwidth usage and increase the speed of PXE boots and installs to disk.
|
||||
|
||||
Download a recent Fedora CoreOS or Flatcar Linux release.
|
||||
|
||||
```
|
||||
$ ./scripts/get-fedora-coreos stable 32.20200923.3.0 .
|
||||
$ ./scripts/get-flatcar stable 2605.6.0 .
|
||||
```
|
||||
|
||||
Move the images to `/var/lib/matchbox/assets`,
|
||||
|
||||
```
|
||||
/var/lib/matchbox/assets/fedora-coreos/
|
||||
├── fedora-coreos-32.20200923.3.0-live-initramfs.x86_64.img
|
||||
├── fedora-coreos-32.20200923.3.0-live-kernel-x86_64
|
||||
├── fedora-coreos-32.20200923.3.0-live-rootfs.x86_64.img
|
||||
├── fedora-coreos-32.20200923.3.0-metal.x86_64.raw.xz
|
||||
└── fedora-coreos-32.20200923.3.0-metal.x86_64.raw.xz.sig
|
||||
|
||||
/var/lib/matchbox/assets/flatcar/
|
||||
└── 2605.6.0
|
||||
├── Flatcar_Image_Signing_Key.asc
|
||||
├── flatcar_production_image.bin.bz2
|
||||
├── flatcar_production_image.bin.bz2.sig
|
||||
├── flatcar_production_pxe_image.cpio.gz
|
||||
├── flatcar_production_pxe_image.cpio.gz.sig
|
||||
├── flatcar_production_pxe.vmlinuz
|
||||
├── flatcar_production_pxe.vmlinuz.sig
|
||||
└── version.txt
|
||||
```
|
||||
|
||||
and verify the images are acessible.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080/assets/fedora-coreos/
|
||||
<pre>...
|
||||
```
|
||||
|
||||
For large production environments, use a cache proxy or mirror suitable for your environment to serve images.
|
||||
|
||||
## Network
|
||||
|
||||
Review [network setup](https://github.com/poseidon/matchbox/blob/master/docs/network-setup.md) with your network administrator to set up DHCP, TFTP, and DNS services on your network. At a high level, your goals are to:
|
||||
|
||||
* Chainload PXE firmwares to iPXE
|
||||
* Point iPXE client machines to the `matchbox` iPXE HTTP endpoint `http://matchbox.example.com:8080/boot.ipxe`
|
||||
* Ensure `matchbox.example.com` resolves to your `matchbox` deployment
|
||||
|
||||
Poseidon provides [dnsmasq](https://github.com/poseidon/matchbox/tree/master/contrib/dnsmasq) as `quay.io/poseidon/dnsmasq`.
|
||||
|
||||
# TLS
|
||||
|
||||
Matchbox can serve the read-only HTTP API with TLS.
|
||||
|
||||
| Name | Type | Description |
|
||||
|----------------|--------|-------------|
|
||||
| -web-ssl | bool | true/false |
|
||||
| -web-cert-file | string | Path to the server TLS certificate file |
|
||||
| -web-key-file | string | Path to the server TLS key file |
|
||||
|
||||
However, it is more common to use an Ingress Controller (Kubernetes) to terminate TLS.
|
||||
|
||||
### Operational notes
|
||||
|
||||
@@ -38,13 +38,7 @@ Run the binary.
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -log-level=debug -data-path examples -assets-path examples/assets
|
||||
```
|
||||
|
||||
Run the container image with rkt, on `metal0`.
|
||||
|
||||
```sh
|
||||
$ sudo rkt --insecure-options=image run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=config,target=/etc/matchbox --volume config,kind=host,source=$PWD/examples/etc/matchbox --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd matchbox.aci -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
Alternately, run the Docker image on `docker0`.
|
||||
Run the Docker image on `docker0`.
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
@@ -52,7 +46,7 @@ $ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD
|
||||
|
||||
## bootcmd
|
||||
|
||||
Run `bootcmd` against the gRPC API of the service running via rkt.
|
||||
Run `bootcmd` against the gRPC API of the service.
|
||||
|
||||
```sh
|
||||
$ ./bin/bootcmd profile list --endpoints 172.18.0.2:8081 --cacert examples/etc/matchbox/ca.crt
|
||||
@@ -8,7 +8,7 @@ This guide covers releasing new versions of matchbox.
|
||||
Create a release commit which updates old version references.
|
||||
|
||||
```sh
|
||||
$ export VERSION=v0.8.0
|
||||
$ export VERSION=v0.9.0
|
||||
```
|
||||
|
||||
## Tag
|
||||
@@ -27,7 +27,6 @@ Travis CI will build the Docker image and push it to Quay.io when the tag is pus
|
||||
|
||||
```sh
|
||||
$ sudo docker run quay.io/poseidon/matchbox:$VERSION -version
|
||||
$ sudo rkt run --no-store quay.io/poseidon/matchbox:$VERSION -- -version
|
||||
```
|
||||
|
||||
## Github release
|
||||
@@ -45,12 +44,12 @@ $ make release
|
||||
Verify the reported version.
|
||||
|
||||
```
|
||||
./_output/matchbox-v0.8.0-linux-amd64/matchbox -version
|
||||
./_output/matchbox-v0.9.0-linux-amd64/matchbox -version
|
||||
```
|
||||
|
||||
## Signing
|
||||
|
||||
Release tarballs are signed by Dalton Hubble's [GPG Key 8F515AD1602065C8](https://keyserver.ubuntu.com/pks/lookup?search=0x8F515AD1602065C8&op=vindex)
|
||||
Release tarballs are signed by Dalton Hubble's GPG [Key](/docs/deployment.md#download)
|
||||
|
||||
```sh
|
||||
cd _output
|
||||
@@ -1,8 +1,9 @@
|
||||
# Getting started with Docker
|
||||
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with Docker to network boot and provision a cluster of QEMU/KVM Container Linux machines locally. You'll be able to create Kubernetes clusters, etcd3 clusters, and test network setups.
|
||||
In this tutorial, we'll run `matchbox` on a Linux machine with Docker to network boot and provision local QEMU/KVM machines as Fedora CoreOS or Flatcar Linux machines. You'll be able to test network setups and Ignition provisioning.
|
||||
|
||||
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
!!! note
|
||||
To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -25,10 +26,11 @@ $ git clone https://github.com/poseidon/matchbox.git
|
||||
$ cd matchbox
|
||||
```
|
||||
|
||||
Download CoreOS Container Linux image assets referenced by the `etcd3` [example](../examples) to `examples/assets`.
|
||||
Download Fedora CoreOS or Flatcar Linux image assets to `examples/assets`.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1967.3.0 ./examples/assets
|
||||
$ ./scripts/get-fedora-coreos stable 32.20200923.3.0 ./examples/assets
|
||||
$ ./scripts/get-flatcar stable 2605.6.0 ./examples/assets
|
||||
```
|
||||
|
||||
For development convenience, add `/etc/hosts` entries for nodes so they may be referenced by name.
|
||||
@@ -45,10 +47,10 @@ For development convenience, add `/etc/hosts` entries for nodes so they may be r
|
||||
|
||||
Run the `matchbox` and `dnsmasq` services on the `docker0` bridge. `dnsmasq` will run DHCP, DNS and TFTP services to create a suitable network boot environment. `matchbox` will serve configs to machines as they PXE boot.
|
||||
|
||||
The `devnet` convenience script can start these services and accepts the name of any example cluster in [examples](../examples).
|
||||
The `devnet` convenience script can start these services and accepts the name of any example in [examples](https://github.com/poseidon/matchbox/tree/master/examples).
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/devnet create etcd3
|
||||
$ sudo ./scripts/devnet create fedora-coreos
|
||||
```
|
||||
|
||||
Inspect the logs.
|
||||
@@ -57,7 +59,7 @@ Inspect the logs.
|
||||
$ sudo ./scripts/devnet status
|
||||
```
|
||||
|
||||
Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of how machines are mapped to Profiles. Explore some endpoints exposed by the service, say for QEMU/KVM node1.
|
||||
Inspect the examples and Matchbox endpoints to see how machines (e.g. node1 with MAC `52:54:00:a1:9c:ae`) are mapped to Profiles, and therefore iPXE and Ignition configs.
|
||||
|
||||
* iPXE [http://127.0.0.1:8080/ipxe?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/ipxe?mac=52:54:00:a1:9c:ae)
|
||||
* Ignition [http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae)
|
||||
@@ -68,8 +70,8 @@ Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of ho
|
||||
If you prefer to start the containers yourself, instead of using `devnet`,
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd3:/var/lib/matchbox/groups:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo docker run --name dnsmasq --cap-add=NET_ADMIN -v $PWD/contrib/dnsmasq/docker0.conf:/etc/dnsmasq.conf:Z quay.io/coreos/dnsmasq -d
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/fedora-coreos:/var/lib/matchbox/groups:Z quay.io/poseidon/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo docker run --name dnsmasq --cap-add=NET_ADMIN -v $PWD/contrib/dnsmasq/docker0.conf:/etc/dnsmasq.conf:Z quay.io/poseidon/dnsmasq -d
|
||||
```
|
||||
|
||||
## Client VMs
|
||||
@@ -101,15 +103,12 @@ $ sudo ./scripts/libvirt [start|reboot|shutdown|poweroff|destroy]
|
||||
|
||||
## Verify
|
||||
|
||||
The VMs should network boot and provision themselves into a three node etcd3 cluster, with other nodes behaving as etcd3 gateways.
|
||||
The VMs should network boot and provision themselves as declared.
|
||||
|
||||
The example profile added autologin so you can verify that etcd3 works between nodes.
|
||||
|
||||
```sh
|
||||
$ systemctl status etcd-member
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
```
|
||||
cat /etc/os-release
|
||||
```
|
||||
|
||||
## Clean up
|
||||
|
||||
Clean up the containers and VM machines.
|
||||
@@ -119,6 +118,13 @@ $ sudo ./scripts/devnet destroy
|
||||
$ sudo ./scripts/libvirt destroy
|
||||
```
|
||||
|
||||
## Going further
|
||||
## Going Further
|
||||
|
||||
Learn more about [matchbox](matchbox.md) or explore the other [examples](https://github.com/poseidon/matchbox/tree/master/examples).
|
||||
|
||||
Try different examples and Ignition declarations:
|
||||
|
||||
* Declare an SSH authorized public key (see examples README)
|
||||
* Declare a systemd unit
|
||||
* Declare file or directory content
|
||||
|
||||
Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](bootkube.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`.
|
||||
220
docs/getting-started.md
Normal file
@@ -0,0 +1,220 @@
|
||||
# Getting started
|
||||
|
||||
In this tutorial, we'll use `matchbox` with Terraform to provision Fedora CoreOS or Flatcar Linux machines.
|
||||
|
||||
We'll install the `matchbox` service, setup a PXE network boot environment, and use Terraform configs to declare infrastructure and apply resources on `matchbox`.
|
||||
|
||||
## matchbox
|
||||
|
||||
Install `matchbox` on a host server or Kubernetes cluster. Generate TLS credentials and enable the gRPC API as directed. Save the `ca.crt`, `client.crt`, and `client.key` on your local machine (e.g. `~/.matchbox`).
|
||||
|
||||
* Installing on a [Linux distro](deployment.md)
|
||||
* Installing on [Kubernetes](deployment.md#kubernetes)
|
||||
* Running with [docker](deployment.md#docker)
|
||||
|
||||
Verify the matchbox read-only HTTP endpoints are accessible.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080
|
||||
matchbox
|
||||
```
|
||||
|
||||
Verify your TLS client certificate and key can be used to access the gRPC API.
|
||||
|
||||
```sh
|
||||
$ openssl s_client -connect matchbox.example.com:8081 \
|
||||
-CAfile ~/.matchbox/ca.crt \
|
||||
-cert ~/.matchbox/client.crt \
|
||||
-key ~/.matchbox/client.key
|
||||
```
|
||||
|
||||
## Terraform
|
||||
|
||||
Install [Terraform][https://www.terraform.io/downloads.html] v0.13+ on your system.
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.13.3
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
Clone the matchbox source.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/poseidon/matchbox.git
|
||||
$ cd matchbox/examples/terraform
|
||||
```
|
||||
|
||||
Select from the Terraform [examples](https://github.com/poseidon/matchbox/tree/master/examples/terraform). For example,
|
||||
|
||||
* `fedora-coreos-install` - PXE boot, install Fedora CoreOS to disk, reboot, and machines come up with your SSH authorized key set
|
||||
* `flatcar-install` - PXE boot, install Flatcar Linux to disk, reboot, and machines come up with your SSH authorized key set
|
||||
|
||||
These aren't exactly full clusters, but they show declarations and network provisioning.
|
||||
|
||||
```sh
|
||||
$ cd fedora-coreos-install # or flatcar-install
|
||||
```
|
||||
|
||||
!!! note
|
||||
Fedora CoreOS images are only served via HTTPS, so your iPXE firmware must be compiled to support HTTPS downloads.
|
||||
|
||||
Let's review the terraform config and learn a bit about Matchbox.
|
||||
|
||||
### Provider
|
||||
|
||||
Matchbox is configured as a provider platform for bare-metal resources.
|
||||
|
||||
```tf
|
||||
// Configure the matchbox provider
|
||||
provider "matchbox" {
|
||||
endpoint = var.matchbox_rpc_endpoint
|
||||
client_cert = file("~/.matchbox/client.crt")
|
||||
client_key = file("~/.matchbox/client.key")
|
||||
ca = file("~/.matchbox/ca.crt")
|
||||
}
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
ct = {
|
||||
source = "poseidon/ct"
|
||||
version = "0.6.1"
|
||||
}
|
||||
matchbox = {
|
||||
source = "poseidon/matchbox"
|
||||
version = "0.4.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Profiles
|
||||
|
||||
Machine profiles specify the kernel, initrd, kernel args, Ignition Config, and other configs (e.g. templated Container Linux Config, Cloud-config, generic) used to network boot and provision a bare-metal machine. The profile below would PXE boot machines using a Fedora CoreOS kernel and initrd (see [assets](api-http.md#assets) to learn about caching for speed), perform a disk install, reboot (first boot from disk), and use a [Fedora CoreOS Config](https://github.com/coreos/fcct/blob/master/docs/configuration-v1_1.md) to generate an Ignition config to provision.
|
||||
|
||||
```tf
|
||||
// Fedora CoreOS profile
|
||||
resource "matchbox_profile" "fedora-coreos-install" {
|
||||
name = "worker"
|
||||
kernel = "https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-kernel-x86_64"
|
||||
initrd = [
|
||||
"https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-initramfs.x86_64.img",
|
||||
"https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-live-rootfs.x86_64.img"
|
||||
]
|
||||
|
||||
args = [
|
||||
"rd.neednet=1",
|
||||
"coreos.inst.install_dev=/dev/sda",
|
||||
"coreos.inst.ignition_url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.inst.image_url=https://builds.coreos.fedoraproject.org/prod/streams/${var.os_stream}/builds/${var.os_version}/x86_64/fedora-coreos-${var.os_version}-metal.x86_64.raw.xz",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
]
|
||||
|
||||
raw_ignition = data.ct_config.worker-ignition.rendered
|
||||
}
|
||||
|
||||
data "ct_config" "worker-ignition" {
|
||||
content = data.template_file.worker-config.rendered
|
||||
strict = true
|
||||
}
|
||||
|
||||
data "template_file" "worker-config" {
|
||||
template = file("fcc/fedora-coreos.yaml")
|
||||
vars = {
|
||||
ssh_authorized_key = var.ssh_authorized_key
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Groups
|
||||
|
||||
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and templates in machine-specific values. The group below does not have a `selector` block, so any machines which network boot from Matchbox will match this group and be provisioned using the `fedora-coreos-install` profile. Machines are matched to the most specific matching group.
|
||||
|
||||
```tf
|
||||
// Default matcher group for machines
|
||||
resource "matchbox_group" "default" {
|
||||
name = "default"
|
||||
profile = matchbox_profile.fedora-coreos-install.name
|
||||
}
|
||||
```
|
||||
|
||||
### Variables
|
||||
|
||||
Some Terraform [variables](https://www.terraform.io/docs/configuration/variables.html) are used in the examples. A quick way to set their value is by creating a `terraform.tfvars` file.
|
||||
|
||||
```
|
||||
cp terraform.tfvars.example terraform.tfvars
|
||||
```
|
||||
|
||||
```tf
|
||||
matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
ssh_authorized_key = "YOUR_SSH_KEY"
|
||||
```
|
||||
|
||||
### Apply
|
||||
|
||||
Initialize the Terraform workspace. Then plan and apply the resources.
|
||||
|
||||
```
|
||||
terraform init
|
||||
```
|
||||
|
||||
```
|
||||
$ terraform apply
|
||||
Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
|
||||
Matchbox serves configs to machines and respects query parameters, if you're interested:
|
||||
|
||||
* iPXE default - [/ipxe](http://matchbox.example.com:8080/ipxe)
|
||||
* Ignition default - [/ignition](http://matchbox.example.com:8080/ignition)
|
||||
* Ignition post-install - [/ignition?os=installed](http://matchbox.example.com:8080/ignition?os=installed)
|
||||
|
||||
## Network
|
||||
|
||||
Matchbox can integrate with many on-premise network setups. It does not seek to be the DHCP server, TFTP server, or DNS server for the network. Instead, matchbox serves iPXE scripts as the entrypoint for provisioning network booted machines. PXE clients are supported by chainloading iPXE firmware.
|
||||
|
||||
In the simplest case, an iPXE-enabled network can chain to Matchbox,
|
||||
|
||||
```
|
||||
# /var/www/html/ipxe/default.ipxe
|
||||
chain http://matchbox.foo:8080/boot.ipxe
|
||||
```
|
||||
|
||||
Read [network-setup.md](network-setup.md) for the complete range of options. Network admins have a great amount of flexibility:
|
||||
|
||||
* May keep using existing DHCP, TFTP, and DNS services
|
||||
* May configure subnets, architectures, or specific machines to delegate to matchbox
|
||||
* May place matchbox behind a menu entry (timeout and default to matchbox)
|
||||
|
||||
If you've never setup a PXE-enabled network before or you're trying to setup a home lab, checkout the [quay.io/poseidon/dnsmasq](https://quay.io/repository/poseidon/dnsmasq) container image [copy-paste examples](https://github.com/poseidon/matchbox/blob/master/docs/network-setup.md#poseidondnsmasq) and see the section about [proxy-DHCP](https://github.com/poseidon/matchbox/blob/master/docs/network-setup.md#proxy-dhcp).
|
||||
|
||||
## Boot
|
||||
|
||||
Its time to network boot your machines. Use the BMC's remote management capablities (may be vendor-specific) to set the boot device (on the next boot only) to PXE and power on each machine.
|
||||
|
||||
```sh
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS power off
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS chassis bootdev pxe
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS power on
|
||||
```
|
||||
|
||||
Each machine should chainload iPXE, delegate to Matchbox, receive its iPXE config (or other supported configs) and begin the provisioning process. The examples assume machines are configured to boot from disk first and PXE only when requested, but you can write profiles for different cases.
|
||||
|
||||
Once the install completes and the machine reboots, you can SSH.
|
||||
|
||||
```ssh
|
||||
$ ssh core@node1.example.com
|
||||
```
|
||||
|
||||
To re-provision the machine for another purpose, run `terraform apply` and PXE boot machines again.
|
||||
|
||||
## Going Further
|
||||
|
||||
Matchbox can be used to provision multi-node Fedora CoreOS or Flatcar Linux clusters at one or many on-premise sites if deployed in an HA way. Machines can be matched individually by MAC address, UUID, region, or other labels you choose. Installs can be made much faster by caching images in the built-in HTTP [assets](api-http.md#assets) server.
|
||||
|
||||
[Ignition](https://github.com/coreos/ignition) can be used to partition disks and filesystems, write systemd units, write networkd configs or regular files, and create users. Nodes can be network provisioned into a complete cluster system that meets your needs. For example, see [Typhoon](https://typhoon.psdn.io/fedora-coreos/bare-metal/).
|
||||
|
||||
33
docs/grub.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# GRUB2 netboot
|
||||
|
||||
Use GRUB to network boot UEFI hardware.
|
||||
|
||||
## Requirements
|
||||
|
||||
For local development, install the dependencies for libvirt with UEFI.
|
||||
|
||||
* [UEFI with QEMU](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU)
|
||||
|
||||
Ensure that you've gone through the [matchbox with docker](getting-started-docker.md) and [matchbox](matchbox.md) guides and understand the basics.
|
||||
|
||||
## Containers
|
||||
|
||||
Run `matchbox` according to [matchbox with Docker](getting-started-docker.md), but mount the [grub](../examples/groups/grub) group example. Then start the `poseidon/dnsmasq` Docker image, which bundles a `grub.efi`.
|
||||
|
||||
```sh
|
||||
$ sudo docker run --rm --cap-add=NET_ADMIN quay.io/poseidon/dnsmasq -d -q --dhcp-range=172.17.0.43,172.17.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-match=set:efi-bc,option:client-arch,7 --dhcp-boot=tag:efi-bc,grub.efi --dhcp-userclass=set:grub,GRUB2 --dhcp-boot=tag:grub,"(http;matchbox.foo:8080)/grub","172.17.0.2" --log-queries --log-dhcp --dhcp-option=3,172.17.0.1 --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:pxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe --address=/matchbox.foo/172.17.0.2
|
||||
```
|
||||
|
||||
## Client VM
|
||||
|
||||
Create UEFI VM nodes which have known hardware attributes.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-uefi
|
||||
```
|
||||
|
||||
Create a VM to verify the machine network boots.
|
||||
|
||||
```sh
|
||||
$ sudo virt-install --name uefi-test --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
|
||||
```
|
||||
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 24 KiB |
|
Before Width: | Height: | Size: 130 KiB After Width: | Height: | Size: 130 KiB |
|
Before Width: | Height: | Size: 67 KiB After Width: | Height: | Size: 67 KiB |
|
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 50 KiB |
|
Before Width: | Height: | Size: 69 KiB After Width: | Height: | Size: 69 KiB |
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
33
docs/index.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Matchbox
|
||||
|
||||
Matchbox is a service that matches bare-metal machines to profiles that PXE boot and provision clusters. Machines are matched by labels like MAC or UUID during PXE and profiles specify a kernel/initrd, iPXE config, and Ignition config.
|
||||
|
||||
## Features
|
||||
|
||||
* Chainload via iPXE and match hardware labels
|
||||
* Provision Fedora CoreOS or Flatcar Linux (powered by [Ignition](https://github.com/coreos/ignition))
|
||||
* Authenticated gRPC API for clients (e.g. Terraform)
|
||||
|
||||
## Installation
|
||||
|
||||
Matchbox can be installed from a binary or a container image.
|
||||
|
||||
* Install Matchbox as a [binary](deployment.md#matchbox-binary), as a [container image](deployment.md#container-image), or on [Kubernetes](deployment.md#kubernetes)
|
||||
* Setup a PXE-enabled [network](network-setup.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
Start provisioning machines with Fedora CoreOS or Flatcar Linux.
|
||||
|
||||
* [Terraform Usage](getting-started.md)
|
||||
* Fedora CoreOS (live PXE or PXE install to disk)
|
||||
* Flatcar Linux (live PXE or PXE install to disk)
|
||||
* [Local QEMU/KVM](getting-started-docker.md)
|
||||
* Fedora CoreOS (live PXE or PXE install to disk)
|
||||
* Flatcar Linux (live PXE or PXE install to disk)
|
||||
|
||||
## Related
|
||||
|
||||
* [dnsmasq](https://github.com/poseidon/matchbox/tree/master/contrib/dnsmasq) - container image to run DHCP, TFTP, and DNS services
|
||||
* [terraform-provider-matchbox](https://github.com/poseidon/terraform-provider-matchbox) - Terraform provider plugin for Matchbox
|
||||
* [Typhoon](https://typhoon.psdn.io/) - minimal and free Kubernetes distribution, supporting bare-metal
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## About boot environment
|
||||
|
||||
Physical machines [network boot](network-booting.md) in an network boot environment with DHCP/TFTP/DNS services or with [coreos/dnsmasq](../contrib/dnsmasq).
|
||||
Physical machines [network boot](network-booting.md) in an network boot environment with DHCP/TFTP/DNS services or with [poseidon/dnsmasq](../contrib/dnsmasq).
|
||||
|
||||
`matchbox` serves iPXE or GRUB configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS Container Linux. The "first boot" Ignition config if fetched and Container Linux is installed.
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
`matchbox` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create CoreOS Container Linux clusters. `matchbox` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use Container Linux's early-boot capabilities to provision Container Linux machines.
|
||||
|
||||
Network boot endpoints provide PXE, iPXE, GRUB support. `matchbox` can be deployed as a binary, as an [appc](https://github.com/appc/spec) container with rkt, or as a Docker container.
|
||||
Network boot endpoints provide PXE, iPXE, GRUB support. `matchbox` can be run a binary or as a container.
|
||||
|
||||

|
||||
|
||||
@@ -18,7 +18,7 @@ See [configuration](config.md) flags and variables.
|
||||
|
||||
## API
|
||||
|
||||
* [HTTP API](api.md)
|
||||
* [HTTP API](api-http.md)
|
||||
* [gRPC API](https://godoc.org/github.com/poseidon/matchbox/matchbox/client)
|
||||
|
||||
## Data
|
||||
@@ -76,9 +76,9 @@ Profiles reference an Ignition config, Cloud-Config, and/or generic config by na
|
||||
|
||||
The `"boot"` settings will be used to render configs to network boot programs such as iPXE or GRUB. You may reference remote kernel and initrd assets or [local assets](#assets).
|
||||
|
||||
To use Ignition, set the `coreos.config.url` kernel option to reference the `matchbox` [Ignition endpoint](api.md#ignition-config), which will render the `ignition_id` file. Be sure to add the `coreos.first_boot` option as well.
|
||||
To use Ignition, set the `coreos.config.url` kernel option to reference the `matchbox` [Ignition endpoint](api-http.md#ignition-config), which will render the `ignition_id` file. Be sure to add the `coreos.first_boot` option as well.
|
||||
|
||||
To use cloud-config, set the `cloud-config-url` kernel option to reference the `matchbox` [Cloud-Config endpoint](api.md#cloud-config), which will render the `cloud_id` file.
|
||||
To use cloud-config, set the `cloud-config-url` kernel option to reference the `matchbox` [Cloud-Config endpoint](api-http.md#cloud-config), which will render the `cloud_id` file.
|
||||
|
||||
### Groups
|
||||
|
||||
@@ -172,14 +172,14 @@ matchbox.foo/assets/
|
||||
|
||||
For example, a `Profile` might refer to a local asset `/assets/coreos/VERSION/coreos_production_pxe.vmlinuz` instead of `http://stable.release.core-os.net/amd64-usr/VERSION/coreos_production_pxe.vmlinuz`.
|
||||
|
||||
See the [get-coreos](../scripts/README.md#get-coreos) script to quickly download, verify, and place Container Linux assets.
|
||||
See the [get-fedora-coreos](https://github.com/poseidon/matchbox/blob/master/scripts/get-fedora-coreos) or [get-flatcar](https://github.com/poseidon/matchbox/blob/master/scripts/get-flatcar) scripts to quickly download, verify, and place image assets.
|
||||
|
||||
## Network
|
||||
|
||||
`matchbox` does not implement or exec a DHCP/TFTP server. Read [network setup](network-setup.md) or use the [coreos/dnsmasq](../contrib/dnsmasq) image if you need a quick DHCP, proxyDHCP, TFTP, or DNS setup.
|
||||
`matchbox` does not implement or exec a DHCP/TFTP server. Read [network setup](network-setup.md) or use the [poseidon/dnsmasq](../contrib/dnsmasq) image if you need a quick DHCP, proxyDHCP, TFTP, or DNS setup.
|
||||
|
||||
## Going further
|
||||
|
||||
* [gRPC API Usage](config.md#grpc-api)
|
||||
* [Metadata](api.md#metadata)
|
||||
* OpenPGP [Signing](api.md#openpgp-signatures)
|
||||
* [Metadata](api-http.md#metadata)
|
||||
* OpenPGP [Signing](api-http.md#openpgp-signatures)
|
||||
@@ -66,7 +66,7 @@ boot
|
||||
|
||||
A TFTP server is used only to provide the `undionly.kpxe` boot program to older PXE firmware in order to bootstrap into iPXE.
|
||||
|
||||
CoreOS `matchbox` can render signed iPXE scripts to machines based on their hardware attributes. Setup involves configuring your DHCP server to point iPXE clients to the `matchbox` [iPXE endpoint](api.md#ipxe).
|
||||
CoreOS `matchbox` can render signed iPXE scripts to machines based on their hardware attributes. Setup involves configuring your DHCP server to point iPXE clients to the `matchbox` [iPXE endpoint](api-http.md#ipxe).
|
||||
|
||||
## DHCP
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
This guide shows how to create a DHCP/TFTP/DNS network boot environment to boot and provision BIOS/PXE, iPXE, or UEFI client machines.
|
||||
|
||||
Matchbox serves iPXE scripts over HTTP to serve as the entrypoint for provisioning clusters. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, configure your network environment to point to Matchbox or use the convenient [coreos/dnsmasq](../contrib/dnsmasq) container image (used in local QEMU/KVM setup).
|
||||
Matchbox serves iPXE scripts over HTTP to serve as the entrypoint for provisioning clusters. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, configure your network environment to point to Matchbox or use the convenient [quay.io/poseidon/dnsmasq](https://quay.io/repository/poseidon/matchbox) container image (used in local QEMU/KVM setup).
|
||||
|
||||
*Note*: These are just suggestions. Your network administrator or system administrator should choose the right network setup for your company.
|
||||
|
||||
@@ -28,7 +28,7 @@ This diagram can point you to the **right section(s)** of this document.
|
||||
|
||||

|
||||
|
||||
The setup of DHCP, TFTP, and DNS services on a network varies greatly. If you wish to use rkt or Docker to quickly run DHCP, proxyDHCP TFTP, or DNS services, use [coreos/dnsmasq](#coreosdnsmasq).
|
||||
The setup of DHCP, TFTP, and DNS services on a network varies greatly. If you wish to use Docker to quickly run DHCP, proxyDHCP TFTP, or DNS services, use [poseidon/dnsmasq](#poseidondnsmasq).
|
||||
|
||||
## DNS
|
||||
|
||||
@@ -156,35 +156,14 @@ APPEND dhcp && chain http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
Add ipxe.lkrn to `/var/lib/tftpboot` (see [iPXE docs](http://ipxe.org/embed)).
|
||||
|
||||
## coreos/dnsmasq
|
||||
## poseidon/dnsmasq
|
||||
|
||||
The [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image can run DHCP, TFTP, and DNS services via rkt or docker. The image bundles `ipxe.efi`, `undionly.kpxe`, and `grub.efi` for convenience. See [contrib/dnsmasq](../contrib/dnsmasq) for details.
|
||||
The [quay.io/poseidon/dnsmasq](https://quay.io/repository/poseidon/dnsmasq) container image can run DHCP, TFTP, and DNS services via docker. The image bundles `ipxe.efi`, `undionly.kpxe`, and `grub.efi` for convenience. See [contrib/dnsmasq](https://github.com/poseidon/matchbox/tree/master/contrib/dnsmasq) for details.
|
||||
|
||||
Run DHCP, TFTP, and DNS on the host's network:
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/poseidon/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
@@ -206,25 +185,15 @@ sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
Run a proxy-DHCP and TFTP service on the host's network:
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=192.168.1.1,proxy,255.255.255.0 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe \
|
||||
--pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/poseidon/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.1,proxy,255.255.255.0 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe \
|
||||
--pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe \
|
||||
--pxe-service=tag:#ipxe,X86-64_EFI,"PXE chainload to iPXE UEFI",ipxe.efi \
|
||||
--pxe-service=tag:ipxe,X86-64_EFI,"iPXE UEFI",http:///matchbox.example.com:8080/boot.ipxe \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
@@ -239,7 +208,7 @@ $ sudo firewall-cmd --add-service=dhcp --add-service=tftp --add-service=dns
|
||||
|
||||
### Development
|
||||
|
||||
Install the dependencies for [QEMU with UEFI](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU). Walk through the [getting-started-with-docker](getting-started-with-docker.md) tutorial. Launch client VMs using `create-uefi`.
|
||||
Install the dependencies for [QEMU with UEFI](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU). Walk through the [getting-started-with-docker](getting-started-docker.md) tutorial. Launch client VMs using `create-uefi`.
|
||||
|
||||
Create UEFI QEMU/KVM VMs attached to the `docker0` bridge.
|
||||
|
||||
19
docs/troubleshooting.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Troubleshooting
|
||||
|
||||
## Firewall
|
||||
|
||||
Running DHCP or proxyDHCP with `poseidon/dnsmasq` on a host requires that the Firewall allow DHCP and TFTP (for chainloading) services to run.
|
||||
|
||||
## Port collision
|
||||
|
||||
Running DHCP or proxyDHCP can cause port already in use collisions depending on what's running. Fedora runs bootp listening on udp/67 for example. Find the service using the port.
|
||||
|
||||
```sh
|
||||
$ sudo lsof -i :67
|
||||
```
|
||||
|
||||
Evaluate whether you can configure the existing service or whether you'd like to stop it and test with `poseidon/dnsmasq`.
|
||||
|
||||
## No boot filename received
|
||||
|
||||
PXE client firmware did not receive a DHCP Offer with PXE-Options after several attempts. If you're using the `poseidon/dnsmasq` image with `-d`, each request should log to stdout. Using the wrong `-i` interface is the most common reason DHCP requests are not received. Otherwise, wireshark can be useful for investigating.
|
||||
@@ -1,6 +1,6 @@
|
||||
# Examples
|
||||
|
||||
Matchbox automates network booting and provisioning of clusters. These examples show how to use matchbox on-premise or locally with [QEMU/KVM](scripts/README.md#libvirt).
|
||||
Matchbox automates network booting and provisioning of clusters. These examples show how to use Matchbox on-premise or locally with QEMU/KVM.
|
||||
|
||||
## Terraform Examples
|
||||
|
||||
@@ -8,49 +8,54 @@ These examples use [Terraform](https://www.terraform.io/intro/) as a client to M
|
||||
|
||||
| Name | Description |
|
||||
|-------------------------------|-------------------------------|
|
||||
| [simple-install](terraform/simple-install/) | Install Container Linux with an SSH key |
|
||||
| [etcd3-install](terraform/etcd3-install/) | Install a 3-node etcd3 cluster |
|
||||
| [bootkube-install](terraform/bootkube-install/) | Install a 3-node Kubernetes v1.14.1 cluster |
|
||||
| [fedora-coreos-install](terraform/fedora-coreos-install) | Fedora CoreOS disk install |
|
||||
| [flatcar-install](terraform/flatcar-install) | Flatcar Linux disk install |
|
||||
|
||||
### Customization
|
||||
|
||||
You are encouraged to look through the examples and Terraform modules. Implement your own profiles or package them as modules to meet your needs. We've just provided a starting point. Learn more about [matchbox](../Documentation/matchbox.md) and [Container Linux configs](../Documentation/container-linux-config.md).
|
||||
Look through the examples and Terraform modules and use them as a starting point. Learn more about [matchbox](../docs/matchbox.md).
|
||||
|
||||
## Manual Examples
|
||||
|
||||
These examples mount raw Matchbox objects into a Matchbox server's `/var/lib/matchbox/` directory.
|
||||
|
||||
| Name | Description | CoreOS Container Linux Version | FS | Docs |
|
||||
|------------|-------------|----------------|----|-----------|
|
||||
| simple | CoreOS Container Linux with autologin, using iPXE | stable/1967.3.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| simple-install | CoreOS Container Linux Install, using iPXE | stable/1967.3.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| grub | CoreOS Container Linux via GRUB2 Netboot | stable/1967.3.0 | RAM | NA |
|
||||
| etcd3 | PXE boot a 3-node etcd3 cluster with proxies | stable/1967.3.0 | RAM | None |
|
||||
| etcd3-install | Install a 3-node etcd3 cluster to disk | stable/1967.3.0 | Disk | None |
|
||||
| bootkube | PXE boot a 3-node Kubernetes v1.8.5 cluster | stable/1967.3.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| bootkube-install | Install a 3-node Kubernetes v1.8.5 cluster | stable/1967.3.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| Name | Description | FS | Docs |
|
||||
|---------------|------------------------------|-----|-------|
|
||||
| fedora-coreos | Fedora CoreOS live PXE | RAM | [docs](https://docs.fedoraproject.org/en-US/fedora-coreos/live-booting-ipxe/) |
|
||||
| fedora-coreos-install | Fedora CoreOS install | Disk | [docs](https://docs.fedoraproject.org/en-US/fedora-coreos/bare-metal/) |
|
||||
| flatcar | Flatcar Linux live PXE | RAM | [docs](https://docs.flatcar-linux.org/os/booting-with-ipxe/) |
|
||||
| flatcar-install | Flatcar Linux install | Disk | [docs](https://docs.flatcar-linux.org/os/booting-with-ipxe/) |
|
||||
|
||||
### Customization
|
||||
|
||||
#### Autologin
|
||||
For Fedora CoreOS, add an SSH authorized key to Fedora CoreOS Config (`ignition/fedora-coreos.yaml`) and regenerate the Ignition Config.
|
||||
|
||||
Example profiles pass the `coreos.autologin` kernel argument. This skips the password prompt for development and troubleshooting and should be removed **before production**.
|
||||
```yaml
|
||||
variant: fcos
|
||||
version: 1.1.0
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa pub-key-goes-here
|
||||
```
|
||||
|
||||
## SSH Keys
|
||||
```
|
||||
podman run -i --rm quay.io/coreos/fcct:release --pretty --strict < fedora-coreos.yaml > fedora-coreos.ign
|
||||
```
|
||||
|
||||
Example groups allow `ssh_authorized_keys` to be added for the `core` user as metadata. You might also include this directly in your Ignition.
|
||||
For Flatcar Linux, add a Matchbox variable to a Group (`groups/flatcar-install/flatcar.json`) to set the SSH authorized key (or directly update the Container Linux Config).
|
||||
|
||||
# /var/lib/matchbox/groups/default.json
|
||||
{
|
||||
"name": "Example Machine Group",
|
||||
"profile": "pxe",
|
||||
"metadata": {
|
||||
"ssh_authorized_keys": ["ssh-rsa pub-key-goes-here"]
|
||||
}
|
||||
}
|
||||
|
||||
#### Conditional Variables
|
||||
|
||||
**"pxe"**
|
||||
|
||||
Some examples check the `pxe` variable to determine whether to create a `/dev/sda1` filesystem and partition for PXEing with `root=/dev/sda1` ("pxe":"true") or to write files to the existing filesystem on `/dev/disk/by-label/ROOT` ("pxe":"false").
|
||||
```json
|
||||
{
|
||||
"id": "stage-1",
|
||||
"name": "Flatcar Linux",
|
||||
"profile": "flatcar",
|
||||
"selector": {
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"ssh_authorized_keys": ["ssh-rsa pub-key-goes-here"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: container-linux-update-agent
|
||||
namespace: kube-system
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: container-linux-update-agent
|
||||
spec:
|
||||
containers:
|
||||
- name: update-agent
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.3.1
|
||||
command:
|
||||
- "/bin/update-agent"
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/dbus
|
||||
name: var-run-dbus
|
||||
- mountPath: /etc/coreos
|
||||
name: etc-coreos
|
||||
- mountPath: /usr/share/coreos
|
||||
name: usr-share-coreos
|
||||
- mountPath: /etc/os-release
|
||||
name: etc-os-release
|
||||
env:
|
||||
# read by update-agent as the node name to manage reboots for
|
||||
- name: UPDATE_AGENT_NODE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: var-run-dbus
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
- name: etc-coreos
|
||||
hostPath:
|
||||
path: /etc/coreos
|
||||
- name: usr-share-coreos
|
||||
hostPath:
|
||||
path: /usr/share/coreos
|
||||
- name: etc-os-release
|
||||
hostPath:
|
||||
path: /etc/os-release
|
||||
@@ -1,22 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: container-linux-update-operator
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: container-linux-update-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: update-operator
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.3.1
|
||||
command:
|
||||
- "/bin/update-operator"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"id": "coreos-install",
|
||||
"name": "CoreOS Container Linux Install",
|
||||
"profile": "install-reboot",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1967.3.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"id": "node1",
|
||||
"name": "Controller Node",
|
||||
"profile": "bootkube-controller",
|
||||
"selector": {
|
||||
"mac": "52:54:00:a1:9c:ae",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
|
||||
"etcd_name": "node1",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"id": "node2",
|
||||
"name": "Worker Node",
|
||||
"profile": "bootkube-worker",
|
||||
"selector": {
|
||||
"mac": "52:54:00:b2:2f:86",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node2.example.com",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"id": "node3",
|
||||
"name": "Worker Node",
|
||||
"profile": "bootkube-worker",
|
||||
"selector": {
|
||||
"mac": "52:54:00:c3:61:77",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node3.example.com",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"ssh_authorized_keys": [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"id": "node1",
|
||||
"name": "Controller Node",
|
||||
"profile": "bootkube-controller",
|
||||
"selector": {
|
||||
"mac": "52:54:00:a1:9c:ae"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
|
||||
"etcd_name": "node1",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"id": "node2",
|
||||
"name": "Worker Node",
|
||||
"profile": "bootkube-worker",
|
||||
"selector": {
|
||||
"mac": "52:54:00:b2:2f:86"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node2.example.com",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"id": "node3",
|
||||
"name": "Worker Node",
|
||||
"profile": "bootkube-worker",
|
||||
"selector": {
|
||||
"mac": "52:54:00:c3:61:77"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node3.example.com",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "default",
|
||||
"profile": "etcd3-gateway",
|
||||
"selector": {
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"etcd_endpoints": "node1.example.com:2379,node2.example.com:2379,node3.example.com:2379"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"id": "coreos-install",
|
||||
"name": "CoreOS Container Linux Install",
|
||||
"profile": "install-reboot",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1967.3.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"id": "node1",
|
||||
"name": "etcd Node 1",
|
||||
"profile": "etcd3",
|
||||
"selector": {
|
||||
"mac": "52:54:00:a1:9c:ae",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_name": "node1",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380,node2=http://node2.example.com:2380,node3=http://node3.example.com:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"id": "node2",
|
||||
"name": "etcd Node 2",
|
||||
"profile": "etcd3",
|
||||
"selector": {
|
||||
"mac": "52:54:00:b2:2f:86",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node2.example.com",
|
||||
"etcd_name": "node2",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380,node2=http://node2.example.com:2380,node3=http://node3.example.com:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
{
|
||||
"id": "node3",
|
||||
"name": "etcd Node 3",
|
||||
"profile": "etcd3",
|
||||
"selector": {
|
||||
"mac": "52:54:00:c3:61:77",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node3.example.com",
|
||||
"etcd_name": "node3",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380,node2=http://node2.example.com:2380,node3=http://node3.example.com:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "default",
|
||||
"profile": "etcd3-gateway",
|
||||
"metadata": {
|
||||
"etcd_endpoints": "node1.example.com:2379,node2.example.com:2379,node3.example.com:2379"
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"id": "node1",
|
||||
"name": "etcd Node 1",
|
||||
"profile": "etcd3",
|
||||
"selector": {
|
||||
"mac": "52:54:00:a1:9c:ae"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_name": "node1",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380,node2=http://node2.example.com:2380,node3=http://node3.example.com:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"id": "node2",
|
||||
"name": "etcd Node 2",
|
||||
"profile": "etcd3",
|
||||
"selector": {
|
||||
"mac": "52:54:00:b2:2f:86"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node2.example.com",
|
||||
"etcd_name": "node2",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380,node2=http://node2.example.com:2380,node3=http://node3.example.com:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
{
|
||||
"id": "node3",
|
||||
"name": "etcd Node 3",
|
||||
"profile": "etcd3",
|
||||
"selector": {
|
||||
"mac": "52:54:00:c3:61:77"
|
||||
},
|
||||
"metadata": {
|
||||
"domain_name": "node3.example.com",
|
||||
"etcd_name": "node3",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380,node2=http://node2.example.com:2380,node3=http://node3.example.com:2380"
|
||||
}
|
||||
}
|
||||
7
examples/groups/fedora-coreos-install/default.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "Fedora CoreOS install",
|
||||
"profile": "fedora-coreos-install",
|
||||
"selector": {},
|
||||
"metadata": {}
|
||||
}
|
||||
7
examples/groups/fedora-coreos/default.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "Fedora CoreOS",
|
||||
"profile": "fedora-coreos",
|
||||
"selector": {},
|
||||
"metadata": {}
|
||||
}
|
||||
10
examples/groups/flatcar-install/flatcar.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"id": "stage-1",
|
||||
"name": "Flatcar Linux",
|
||||
"profile": "flatcar",
|
||||
"selector": {
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
}
|
||||
}
|
||||
11
examples/groups/flatcar-install/install.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"id": "stage-0",
|
||||
"name": "Flatcar Linux install",
|
||||
"profile": "flatcar-install",
|
||||
"metadata": {
|
||||
"os_channel": "stable",
|
||||
"os_version": "2605.6.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/flatcar"
|
||||
}
|
||||
}
|
||||
7
examples/groups/flatcar/default.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "Flatcar Linux",
|
||||
"profile": "flatcar",
|
||||
"selector": {},
|
||||
"metadata": {}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "GRUB CoreOS Container Linux alpha",
|
||||
"profile": "grub"
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"id": "install",
|
||||
"name": "Simple CoreOS Container Linux Install",
|
||||
"profile": "simple-install",
|
||||
"metadata": {
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1967.3.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"id": "simple",
|
||||
"name": "Simple CoreOS Container Linux Alpha",
|
||||
"profile": "simple",
|
||||
"selector": {
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "Simple CoreOS Container Linux Alpha with RAM disk",
|
||||
"profile": "simple"
|
||||
}
|
||||
@@ -1,183 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{.domain_name}}:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
|
||||
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
|
||||
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
|
||||
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
|
||||
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
|
||||
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
|
||||
Environment="ETCD_CLIENT_CERT_AUTH=true"
|
||||
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
|
||||
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
|
||||
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
|
||||
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
mask: true
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
||||
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--allow-privileged \
|
||||
--anonymous-auth=false \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--exit-on-lock-contention \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/master \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
|
||||
--require-kubeconfig
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: bootkube.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Bootstrap a Kubernetes control plane with a temp api-server
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/opt/bootkube
|
||||
ExecStart=/opt/bootkube/bootkube-start
|
||||
storage:
|
||||
{{ if index . "pxe" }}
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: root
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://gcr.io/google_containers/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.8.5
|
||||
- path: /etc/ssl/etcd/.empty
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
empty
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
{{.domain_name}}
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
- path: /opt/bootkube/bootkube-start
|
||||
filesystem: root
|
||||
mode: 0544
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
#!/bin/bash
|
||||
# Wrapper for bootkube start
|
||||
set -e
|
||||
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
|
||||
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.9.1}"
|
||||
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
|
||||
exec /usr/bin/rkt run \
|
||||
--trust-keys-from-https \
|
||||
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
|
||||
--mount volume=assets,target=/assets \
|
||||
--volume bootstrap,kind=host,source=/etc/kubernetes \
|
||||
--mount volume=bootstrap,target=/etc/kubernetes \
|
||||
$RKT_OPTS \
|
||||
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} \
|
||||
--net=host \
|
||||
--dns=host \
|
||||
--exec=/bootkube -- start --asset-dir=/assets "$@"
|
||||
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
{{ range $element := .ssh_authorized_keys }}
|
||||
- {{$element}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
@@ -1,126 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: docker.service
|
||||
enable: true
|
||||
- name: locksmithd.service
|
||||
mask: true
|
||||
- name: kubelet.path
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Watch for kubeconfig
|
||||
[Path]
|
||||
PathExists=/etc/kubernetes/kubeconfig
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: wait-for-dns.service
|
||||
enable: true
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Wait for DNS entries
|
||||
Wants=systemd-resolved.service
|
||||
Before=kubelet.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=true
|
||||
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
|
||||
[Install]
|
||||
RequiredBy=kubelet.service
|
||||
- name: kubelet.service
|
||||
contents: |
|
||||
[Unit]
|
||||
Description=Kubelet via Hyperkube ACI
|
||||
[Service]
|
||||
EnvironmentFile=/etc/kubernetes/kubelet.env
|
||||
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
|
||||
--volume=resolv,kind=host,source=/etc/resolv.conf \
|
||||
--mount volume=resolv,target=/etc/resolv.conf \
|
||||
--volume var-lib-cni,kind=host,source=/var/lib/cni \
|
||||
--mount volume=var-lib-cni,target=/var/lib/cni \
|
||||
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
|
||||
--mount volume=opt-cni-bin,target=/opt/cni/bin \
|
||||
--volume var-log,kind=host,source=/var/log \
|
||||
--mount volume=var-log,target=/var/log \
|
||||
--insecure-options=image"
|
||||
ExecStartPre=/bin/mkdir -p /opt/cni/bin
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
|
||||
ExecStartPre=/bin/mkdir -p /var/lib/cni
|
||||
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
|
||||
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
ExecStart=/usr/lib/coreos/kubelet-wrapper \
|
||||
--allow-privileged \
|
||||
--anonymous-auth=false \
|
||||
--client-ca-file=/etc/kubernetes/ca.crt \
|
||||
--cluster_dns={{.k8s_dns_service_ip}} \
|
||||
--cluster_domain=cluster.local \
|
||||
--cni-conf-dir=/etc/kubernetes/cni/net.d \
|
||||
--exit-on-lock-contention \
|
||||
--hostname-override={{.domain_name}} \
|
||||
--kubeconfig=/etc/kubernetes/kubeconfig \
|
||||
--lock-file=/var/run/lock/kubelet.lock \
|
||||
--network-plugin=cni \
|
||||
--node-labels=node-role.kubernetes.io/node \
|
||||
--pod-manifest-path=/etc/kubernetes/manifests \
|
||||
--require-kubeconfig
|
||||
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
storage:
|
||||
{{ if index . "pxe" }}
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: root
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
{{end}}
|
||||
files:
|
||||
- path: /etc/kubernetes/kubelet.env
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
KUBELET_IMAGE_URL=docker://gcr.io/google_containers/hyperkube
|
||||
KUBELET_IMAGE_TAG=v1.8.5
|
||||
- path: /etc/ssl/etcd/.empty
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline: |
|
||||
empty
|
||||
- path: /etc/hostname
|
||||
filesystem: root
|
||||
mode: 0644
|
||||
contents:
|
||||
inline:
|
||||
{{.domain_name}}
|
||||
- path: /etc/sysctl.d/max-user-watches.conf
|
||||
filesystem: root
|
||||
contents:
|
||||
inline: |
|
||||
fs.inotify.max_user_watches=16184
|
||||
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
{{ range $element := .ssh_authorized_keys }}
|
||||
- {{$element}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
ExecStart=
|
||||
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
|
||||
--listen-addr=127.0.0.1:2379 \
|
||||
--endpoints={{.etcd_endpoints}}
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
{{ range $element := .ssh_authorized_keys }}
|
||||
- {{$element}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
---
|
||||
systemd:
|
||||
units:
|
||||
- name: etcd-member.service
|
||||
enable: true
|
||||
dropins:
|
||||
- name: 40-etcd-cluster.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="ETCD_IMAGE_TAG=v3.2.0"
|
||||
Environment="ETCD_NAME={{.etcd_name}}"
|
||||
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
|
||||
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
|
||||
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379"
|
||||
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
|
||||
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
|
||||
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
|
||||
- name: locksmithd.service
|
||||
dropins:
|
||||
- name: 40-etcd-lock.conf
|
||||
contents: |
|
||||
[Service]
|
||||
Environment="REBOOT_STRATEGY=etcd-lock"
|
||||
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
{{ range $element := .ssh_authorized_keys }}
|
||||
- {{$element}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
15
examples/ignition/fedora-coreos.ign
Normal file
@@ -0,0 +1,15 @@
|
||||
{
|
||||
"ignition": {
|
||||
"version": "3.1.0"
|
||||
},
|
||||
"passwd": {
|
||||
"users": [
|
||||
{
|
||||
"name": "core",
|
||||
"sshAuthorizedKeys": [
|
||||
"ssh-rsa SET_PUBKEY_HERE"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
8
examples/ignition/fedora-coreos.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
variant: fcos
|
||||
version: 1.1.0
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa SET_PUBKEY_HERE
|
||||
|
||||
@@ -21,7 +21,12 @@ storage:
|
||||
inline: |
|
||||
#!/bin/bash -ex
|
||||
curl --retry 10 --fail "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
|
||||
coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}}
|
||||
flatcar-install \
|
||||
-d /dev/sda \
|
||||
-C {{.os_channel}} \
|
||||
-V {{.os_version}} \
|
||||
{{- if index . "baseurl"}}-b {{.baseurl}} \{{end}}
|
||||
-i ignition.json
|
||||
udevadm settle
|
||||
systemctl reboot
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"id": "bootkube-controller",
|
||||
"name": "bootkube Ready Controller",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"root=/dev/sda1",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "bootkube-controller.yaml"
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"id": "bootkube-worker",
|
||||
"name": "bootkube Ready Worker",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"root=/dev/sda1",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "bootkube-worker.yaml"
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"id": "etcd3-gateway",
|
||||
"name": "etcd3-gateway",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "etcd3-gateway.yaml"
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"id": "etcd3",
|
||||
"name": "etcd3",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1967.3.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1967.3.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"initrd=coreos_production_pxe_image.cpio.gz",
|
||||
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "etcd3.yaml"
|
||||
}
|
||||