216 Commits

Author SHA1 Message Date
Stephen Demos
e78150218f Merge pull request #727 from sdemos/release
release v0.7.1
2018-11-01 14:37:41 -07:00
Stephen Demos
cccb588855 *: update matchbox version to v0.7.1 2018-11-01 14:28:26 -07:00
Stephen Demos
9a177e83d7 changes: update changes document with relevant changes 2018-11-01 14:20:09 -07:00
Stephen Demos
dfd0457e03 Merge pull request #713 from anitakumar/master
HTTPS support for web server
2018-11-01 13:58:24 -07:00
Anita Kumar
9de30aea59 documentation: document HTTPS flags
Updated Documentation to include HTTPS
2018-11-01 13:41:51 -07:00
Anita Kumar
910ee6f18c cmd/matchbox: HTTPS support for web server 2018-11-01 13:41:04 -07:00
Stephen Demos
0994b860b5 Merge pull request #720 from salarmgh/feature/autologin
Add kernel args variable
2018-11-01 13:26:54 -07:00
Stephen Demos
78f7e8d492 Merge pull request #722 from kkohtaka/fix-terraform-modules-example
Fix an example usage of terraform modules
2018-11-01 13:25:18 -07:00
Stephen Demos
e804ace9e2 Merge pull request #726 from schu/schu/scripts-get-flatcar
scripts: add helper script `get-flatcar`
2018-10-30 11:00:33 -07:00
Kazumasa Kohtaka
0012d691f4 Fix an example usage of terraform modules 2018-10-30 02:37:43 +09:00
Michael Schubert
e170c600b3 scripts: add helper script get-flatcar
Similar to `get-coreos`, add a helper script `get-flatcar` to download
Flatcar assets.

Follow up for https://github.com/poseidon/typhoon/pull/315
2018-10-29 16:53:22 +01:00
Stephen Demos
4f229d5d9a Merge pull request #723 from sdemos/master
travis: update to latest supported go major versions
2018-10-19 14:17:48 -07:00
Stephen Demos
3cd8ba0a05 travis: update to latest supported go major versions
this also fixes the golint url to use the new location, to fix ci.
2018-10-19 12:14:20 -07:00
Salar Moghaddam
74f13a2f86 Add description and defualt value 2018-09-24 15:59:08 +03:30
Salar Moghaddam
4eee84b17d Add kernel args variable 2018-09-24 15:15:17 +03:30
Stephen Demos
845d1d0adc Merge pull request #717 from olleolleolle/patch-2
README: Use SVG badge for GoDoc
2018-09-13 11:56:00 -07:00
Stephen Demos
5b1c790d0c Merge pull request #716 from olleolleolle/patch-1
[docs] Typo fix
2018-09-13 11:55:45 -07:00
Olle Jonsson
70400b7dd0 README: Use SVG badge for GoDoc 2018-09-12 16:09:20 +02:00
Olle Jonsson
c6ebdfeb92 [docs] Typo fix 2018-09-12 13:22:35 +02:00
Stephen Demos
99acdf4c6b Merge pull request #709 from dghubble/update-kubernetes
Update Kubernetes (terraform) example to v1.10.3
2018-05-30 10:07:21 -07:00
Dalton Hubble
be057ed9c8 Update Kubernetes (terraform) example to v1.10.3
* https://github.com/poseidon/typhoon/releases/tag/v1.10.3
2018-05-30 00:34:05 -07:00
Stephen Demos
8bb99143e8 Merge pull request #704 from ae-v/master
fixes typo in scripts/tls/README.md
2018-04-09 16:26:48 -07:00
Stephen Demos
c802ce5805 Merge pull request #703 from dghubble/master
Update terraform Kubernetes examples to v1.10.0
2018-04-09 13:20:28 -07:00
Andre Veelken
c4e82c03a4 fixes typo in scripts/tls/README.md 2018-04-09 10:20:55 +02:00
Dalton Hubble
29c93046ef Update terraform Kubernetes examples to v1.10.0 2018-04-04 01:23:11 -07:00
Dalton Hubble
34e981dc7c examples: Update terraform Kubernetes examples to v1.9.3 2018-02-13 16:18:45 -08:00
Dalton Hubble
3a88a663c3 Merge pull request #696 from zbwright/example-links
docs: change links to work with sync
2018-01-25 15:20:51 -08:00
Dalton Hubble
572c8d26eb Merge pull request #695 from coreos/fix-cert-gen
scripts/tls: Fix cert-gen to add index.txt.attr
2018-01-25 15:09:43 -08:00
Beth Wright
c22b273548 docs: change links to work with sync 2018-01-25 14:04:56 -08:00
Dalton Hubble
c3ef870ce5 scripts/tls: Fix cert-gen to add index.txt.attr 2018-01-25 11:35:09 -08:00
Dalton Hubble
e9ce7325ab Merge pull request #689 from diegs/env
scripts: fix shebangs.
2018-01-10 10:02:38 -08:00
Diego Pontoriero
948bdee165 scripts: fix shebangs.
/bin/bash is not an LSB path.
2018-01-09 17:59:15 -08:00
Dalton Hubble
50e923730e Merge pull request #687 from coreos/bump-cl
Bump Container Linux version from 1576.4.0 to 1576.5.0
2018-01-09 04:28:40 -08:00
Dalton Hubble
1799c8e23e Bump Container Linux version from 1576.4.0 to 1576.5.0 2018-01-08 16:33:48 -08:00
Dalton Hubble
454ae972a1 Merge pull request #686 from ericchiang/coc
automated PR: update CoC
2018-01-08 06:55:38 -08:00
Eric Chiang
fe0c3438fd update CoC 2018-01-04 12:30:28 -08:00
Dalton Hubble
65b410e20b Merge pull request #683 from coreos/update-kubernetes
Update Kubernetes from v1.8.4 to v1.8.5
2017-12-18 16:09:39 -08:00
Dalton Hubble
dced573acb examples: Update Kubernetes from v1.8.4 to v1.8.5 2017-12-14 13:23:57 -08:00
Dalton Hubble
4888c04dee contrib: Change nginx-ingress ssl-passthrough annotation
* nginx-ingress controller 0.9.0-beta.18 and above changed the
annotations prefix to nginx.ingress.kubernetes.io
2017-12-13 15:24:24 -08:00
Dalton Hubble
4e9d542a87 Merge pull request #682 from coreos/release-v0.7.0
*: Update Matchbox version to v0.7.0
2017-12-12 17:00:55 -08:00
Dalton Hubble
08f4e9908b *: Update Matchbox version to v0.7.0 2017-12-12 14:57:09 -08:00
Dalton Hubble
dd96f58417 Merge pull request #681 from coreos/allow-terraform-11
examples: Fix examples to work with Terraform v0.11.x
2017-12-12 14:48:45 -08:00
Dalton Hubble
f5ef2d156b examples: Fix examples to work with Terraform v0.11.x
* Explicitly pass provider modules to satisfy constraints
* https://github.com/hashicorp/terraform/issues/16824
2017-12-12 14:36:38 -08:00
Dalton Hubble
f673d48007 Merge pull request #680 from coreos/bump-cl
examples: Update Container Linux to stable 1576.4.0
2017-12-12 13:33:13 -08:00
Dalton Hubble
7a58d944d8 examples: Update Container Linux to stable 1576.4.0
* Use Docker 17.09 by default in Kubernetes clusters
2017-12-11 21:40:51 -08:00
Dalton Hubble
5d975ec42a Merge pull request #678 from coreos/update-bootkube
examples: Update from Kubernetes v1.8.3 to v1.8.4
2017-12-11 21:40:26 -08:00
Dalton Hubble
2404d34b0e examples: Update from Kubernetes v1.8.3 to v1.8.4 2017-12-11 21:30:26 -08:00
Dalton Hubble
c9b9711bca Merge pull request #677 from dghubble/bump-version
scripts/devnet: Bump matchbox image version
2017-11-27 16:12:17 -08:00
Dalton Hubble
ae524f57f2 scripts/devnet: Bump matchbox image version
* Examples use Ignition v2.1.0 spec
2017-11-27 11:14:47 -08:00
Dalton Hubble
f26224c57d Merge pull request #675 from redbaron/multiple-initrd
fix loading multiple initrds
2017-11-22 13:45:42 -08:00
Dalton Hubble
2c063a4674 Merge pull request #676 from coreos/fix-matchbox-endpoint
examples: Fix endpoint name for manual examples
2017-11-20 14:10:46 -08:00
Dalton Hubble
7d5656ffe3 examples: Fix endpoint name for manual examples
* Bug introduced by b10c777729
2017-11-20 13:46:02 -08:00
Maxim Ivanov
a683e8261e iPXE loads multiple initrds when each is given to it's own initrd command 2017-11-20 19:23:04 +00:00
Dalton Hubble
c75fc8f88e Merge pull request #674 from coreos/efi
contrib/dnsmasq: Add ipxe.efi for dnsmasq:v0.5.0
2017-11-17 11:21:24 -08:00
Dalton Hubble
b10c777729 contrib/dnsmasq: Remove old matchbox endpoint from dnsmasq configs 2017-11-16 23:41:29 -08:00
Dalton Hubble
5992ba6ad5 scripts/libvirt: Add disk hd to UEFI VM boot order 2017-11-16 23:41:29 -08:00
Dalton Hubble
ca223f800b examples: Add UEFI initrd option to Terraform examples 2017-11-16 23:41:27 -08:00
Dalton Hubble
1246d5a0db contrib/dnsmasq: Add ipxe.efi for dnsmasq:v0.5.0
* Add ipxe.efi to dnsmasq image's /var/lib/tftpboot directory
* Add initrd kernel argument respected only by UEFI
https://github.com/coreos/bugs/issues/1239
* Improve network-setup docs and scripts to cover UEFI clients
and to support launching UEFI QEMU/KVM clusters locally
* Reduce references to grub.efi flow, its not a happy path
2017-11-16 23:40:52 -08:00
Dalton Hubble
4f7dd0942c Merge pull request #673 from coreos/update-kubernetes
examples: Update Kubernetes from v1.8.2 to v1.8.3
2017-11-09 16:29:45 -08:00
Dalton Hubble
3e6aa4ee73 examples: Update Kubernetes from v1.8.2 to v1.8.3 2017-11-09 16:01:43 -08:00
Dalton Hubble
9c39221b71 Merge pull request #672 from coreos/fix-publishing
travis.yml: Ensure deploy condition matches build matrix
2017-11-08 15:41:40 -08:00
Dalton Hubble
4103461778 travis.yml: Ensure deploy condition matches build matrix
* Build binaries for Docker images with Go 1.8.5
* Travis should "deploy" publish the quay image for Go 1.8.5
2017-11-08 15:09:43 -08:00
Dalton Hubble
9a6d815978 Merge pull request #671 from coreos/fix-publishing
travis.yml: Fix travis to publish master images
2017-11-08 15:00:39 -08:00
Dalton Hubble
6aa8759bfd travis.yml: Fix travis to publish master images 2017-11-08 14:47:40 -08:00
Dalton Hubble
d5027950e2 Merge pull request #670 from coreos/update-ignition
Update Ignition config version to v2.1.0
2017-11-08 12:58:29 -08:00
Dalton Hubble
85a2a6b252 matchbox: Update tests due to Ignition 2.1.0 format 2017-11-07 15:23:41 -08:00
Dalton Hubble
4bc5fcdc5e vendor: Vendor glide.yaml ct, Ignition, and dependencies 2017-11-06 14:13:54 -08:00
Dalton Hubble
2f4d5b95e4 glide.yaml: Update ct to v0.5.0 and Ignition to v0.19.0
* Change `/ignition` endpoint to serve a v2.1.0 Ignition config
* Drops support for Container Linux versions before 1465.0.0
2017-11-06 13:29:42 -08:00
Dalton Hubble
257f2fa553 Merge pull request #667 from dghubble/bump-cl
examples: Bump Container Linux to stable 1520.8.0
2017-10-30 17:11:50 -07:00
Dalton Hubble
7829c14d52 examples: Bump Container Linux to stable 1520.8.0
* Increase minimum RAM required to use PXE image
* https://coreos.com/releases/#1520.5.0
2017-10-30 13:58:17 -07:00
Dalton Hubble
ce72fb72a0 Merge pull request #665 from coreos/hyperkube
Update to Kubernetes v1.8.2
2017-10-27 16:39:07 -07:00
Dalton Hubble
41d5db4723 examples: Update examples to Kubernetes v1.8.2
* Fixes v1.8.1 kube-apiserver memory leak
2017-10-27 15:49:53 -07:00
Dalton Hubble
dfd08e48e5 Switch from quay.io to gcr.io hyperkube image 2017-10-27 15:49:53 -07:00
Dalton Hubble
347e142db9 Merge pull request #664 from coreos/docker-docs
Switch local QEMU/KVM tutorial to favor Docker
2017-10-27 13:51:36 -07:00
Dalton Hubble
b63e9b2589 scripts/devnet: Use a tagged matchbox release in devnet 2017-10-23 13:50:07 -07:00
Dalton Hubble
4a32b0cd59 scripts: Switch default tutorial from rkt to docker 2017-10-23 13:49:09 -07:00
Dalton Hubble
b0b8d97539 examples: Update examples to Kubernetes v1.8.1
* Use bootkube v0.8.0
2017-10-20 15:04:09 -07:00
Dalton Hubble
581be69da7 Merge pull request #659 from rlenferink/master
Documentation: minor documentation changes
2017-10-05 14:01:28 -07:00
Roy Lenferink
dc75fcc869 Documentation: minor improvements
Fixed example hostname in docker run command

Added bash statements for storing certificates
2017-10-05 22:51:12 +02:00
Dalton Hubble
fc3e688c97 Merge pull request #658 from zbwright/fix-link
docs: fix broken link
2017-10-04 17:14:10 -07:00
Beth Wright
f07dc758c4 docs: fix broken link 2017-10-04 16:40:30 -07:00
Dalton Hubble
d2827d7ed0 Merge pull request #656 from coreos/update-kubernetes
examples: Update Kubernetes from v1.7.5 to v1.7.7
2017-10-04 10:13:33 -07:00
Dalton Hubble
692bf81df8 examples: Update Kubernetes from v1.7.5 to v1.7.7
* Update from bootkube v0.6.2 to v0.7.0
* Update kube-dns to fix dnsmasq vulnerability
2017-10-04 09:55:37 -07:00
Dalton Hubble
cfcec6ac03 Merge pull request #655 from coreos/update-terraform-module
examples/terraform: Update bare-metal module version
2017-09-29 10:52:18 -07:00
Dalton Hubble
592969134c examples/terraform: Update bare-metal module version
* Upstream fixes to bump all control plane components to v1.7.5
* Stop including etcd-network-checkpointer with on-host etcd
* Remove experimental_self_hosted_etcd support
2017-09-28 11:25:52 -07:00
Dalton Hubble
2b605c8d9c Merge pull request #653 from coreos/improve-ctx
matchbox: Use Go 1.7 request Context, remove ContextHandler
2017-09-25 17:07:45 -07:00
Dalton Hubble
63a95188be matchbox: Use Go 1.7 request Context, remove ContextHandler
* Starting in Go 1.7, the standard library http.Request includes
a Context for passing request-scoped values between chained handlers
* Delete the ContextHandler (breaking, should not have been
exported to begin with)
2017-09-21 17:12:33 -07:00
Dalton Hubble
5aa301b72d Merge pull request #648 from coreos/bump-container-linux
examples: Bump Container Linux to stable 1465.7.0
2017-09-18 16:35:48 -07:00
Dalton Hubble
7647a5d095 Merge pull request #649 from radhus/add_select_client
matchbox/client: Expose Select endpoint
2017-09-18 15:09:50 -07:00
Dalton Hubble
06f80fa003 examples: Bump Container Linux to stable 1465.7.0 2017-09-18 15:08:08 -07:00
Dalton Hubble
01a767ab3e Merge pull request #651 from coreos/cleanup
examples: Remove unused example module
2017-09-18 14:57:34 -07:00
Dalton Hubble
6be5c0f59c examples: Remove unused example module
* Terraform-based Kubernetes example now uses an community project's
 Terraform module to show Matchbox usage
2017-09-18 14:33:51 -07:00
William Johansson
5efc514097 matchbox/client: Expose Select endpoint
Exposes the Select endpoint in matchbox/client just as the other
endpoints like Profiles, Ignition and Generic.
2017-09-17 21:19:37 +02:00
Dalton Hubble
757f46e96f Merge pull request #647 from dvrkps/patch-1
travis: update go versions
2017-09-15 10:43:05 -07:00
Dalton Hubble
5aeb2d1d3d Merge pull request #646 from coreos/update-kubernetes
examples: Update Kubernetes from v1.7.3 to v1.7.5
2017-09-15 10:38:59 -07:00
Davor Kapsa
1119bb22f0 travis: update go versions 2017-09-15 12:15:03 +02:00
Dalton Hubble
6195ae377e examples/ignition: Update kubelet.service to match upstream
* Mount host /opt/cni/bin in Kubelet to use host's CNI plugins
* Switch /var/run/kubelet-pod.uuid to /var/cache/kubelet-pod.uuid
to persist between reboots and cleanup old Kubelet pods
* Organize Kubelet flags in alphabetical order
2017-09-14 16:53:42 -07:00
Dalton Hubble
d7783a94e9 examples: Update Kubernetes from v1.7.3 to v1.7.5
* Switch Terraform example to use Typhoon project's module
instead: https://github.com/poseidon/typhoon
* Includes support for Calico and Flannel
2017-09-14 15:52:58 -07:00
Dalton Hubble
4228ccb330 README: List notable projects using Matchbox 2017-09-11 15:59:05 -07:00
Dalton Hubble
e5d5280658 Merge pull request #644 from squeed/fix-pxe-flag
libvirt: don't pass --pxe
2017-08-22 10:47:31 -07:00
Casey Callendrello
46f0477614 libvirt: don't pass --pxe
In virt-install v1.4.2, the meaning of  `--pxe` changed from "allow pxe
boot" to "always pxe boot." This breaks matchbox, since we expect hosts
to pxe-boot only with empty hds. On hosts with v1.4.2, the VMs loop,
re-installing CL over and over.

The flag isn't necessary anyways, since we pass `--boot=hd,network`,
which enables pxe-booting.
2017-08-22 11:19:16 +02:00
Dalton Hubble
0e4265b2bc Merge pull request #643 from coreos/bump-kubernetes
examples: Update Kubernetes from v1.7.1 to v1.7.3
2017-08-21 15:00:57 -07:00
Dalton Hubble
18de74e85b examples: Update Kubernetes from v1.7.1 to v1.7.3 2017-08-21 11:19:39 -07:00
Dalton Hubble
31040e9729 Merge pull request #642 from coreos/bump-fix
Update CLUO version and bootkube-terraform location
2017-08-18 10:28:29 -07:00
Dalton Hubble
f0a4cfd1cb *: Update location of bootkube-terraform module 2017-08-17 15:56:49 -07:00
Dalton Hubble
aeca5b08f9 examples/addons: Update CLUO to v0.3.1 2017-08-17 15:38:34 -07:00
Dalton Hubble
7c1b9b17dc Merge pull request #636 from jcmoraisjr/jm-add-version
Add version.txt download on get-coreos
2017-08-15 17:15:47 -07:00
Dalton Hubble
0e6ce19172 Merge pull request #640 from andrewrothstein/typo
fix typo in documentation
2017-08-15 10:49:50 -07:00
Andrew Rothstein
281fd5226a fix typo 2017-08-14 19:35:49 -04:00
Joao Morais
fb0ee0f05a Add version.txt download on get-coreos
The version.txt file is used by coreos-install if
the version number is "current".
2017-08-09 22:10:59 -03:00
Dalton Hubble
7def0d7e86 Merge pull request #635 from dghubble/better-validation
matchbox/client: Validate client endpoint is a host:port
2017-08-09 14:45:57 -07:00
Dalton Hubble
1c076875c2 matchbox/client: Validate client endpoint is a host:port
* Provide better errors to clients that forget to specify the
port or include a protocol scheme by mistake
* grpc-go uses net.SplitHostPort to validate server listener
addresses are 'host:port', but doesn't validate Dial targets
2017-08-09 10:50:25 -07:00
Dalton Hubble
7ba0f1476b Merge pull request #632 from dghubble/update-ct-and-ignition
glide.yaml: Update ct and Ignition
2017-08-08 13:55:21 -07:00
Dalton Hubble
ec6844a43a glide.yaml: Update ct and Ignition
* Fix container-linux-config-transpiler calls that changes
* Update container-linux-config-transpiler to v0.4.2
* Update Ignition to v0.17.2
2017-08-08 13:30:14 -07:00
Dalton Hubble
6857c1319a Merge pull request #629 from heyitsanthony/etcdctl-api
Documentation: remove ETCDCTL_API=3 settings
2017-08-07 09:48:44 -07:00
Anthony Romano
cb6bb3c90d Documentation: remove ETCDCTL_API=3 settings
etcd examples set ETCDCTL_API=3 but are using v2 etcdctl commands. This
works on CL by accident because it ships with 2.3 so etcdctl doesn't
recognize the API env var.
2017-08-04 23:04:19 -07:00
Dalton Hubble
5c5be5ce5b Merge pull request #628 from alrs/fix-swallowed-test-errors
Fix swallowed errors in server package tests
2017-08-04 17:02:41 -07:00
Lars Lehtonen
4cbf2b7448 Fix swallowed errors in server package tests 2017-08-03 18:59:15 -07:00
Dalton Hubble
d781e43212 Merge pull request #627 from coreos/fix-module-location
*: Fix location of the bootkube-terraform module
2017-08-03 16:09:57 -07:00
Dalton Hubble
3ca88334d2 *: Fix location of the bootkube-terraform module 2017-08-03 14:00:35 -07:00
Dalton Hubble
c7a649c731 Merge pull request #626 from coreos/bump-dnsmasq
*: Bump dnsmasq references to use v0.4.1
2017-08-01 23:21:18 -07:00
Dalton Hubble
d03f256976 *: Bump dnsmasq references to use v0.4.1 2017-08-01 16:47:18 -07:00
Dalton Hubble
9ecfcac0b9 Merge pull request #625 from coreos/dnsmasq
contrib/dnsmasq: Bump dnsmasq image to v0.4.1
2017-08-01 16:17:06 -07:00
Dalton Hubble
035b01634f contrib/dnsmasq: Bump dnsmasq image to v0.4.1
* Update from alpine:3.5 to alpine:3.6
* List ports 67 and 69 so ACI conversion still works
2017-07-31 14:26:05 -07:00
Dalton Hubble
e8d3e8c70c Merge pull request #617 from coreos/kubernetes-v1.7
examples: Update Kubernetes to v1.7.1
2017-07-24 17:14:51 -07:00
Dalton Hubble
cc490ff55d examples: Update Kubernetes to v1.7.1 2017-07-24 15:52:57 -07:00
Dalton Hubble
df6354ad45 Merge pull request #618 from dghubble/cluo
examples/addonts: Update CLUO from v0.2.1 to v0.2.2
2017-07-21 16:05:43 -07:00
Dalton Hubble
3d8a3777f0 examples/addonts: Update CLUO from v0.2.1 to v0.2.2 2017-07-21 15:12:23 -07:00
Dalton Hubble
dfee550522 Merge pull request #615 from dghubble/in-place-upgrade
Documentation: Refresh Kubernetes in-place upgrade doc
2017-07-21 13:50:00 -07:00
Dalton Hubble
07e9676457 Merge pull request #616 from coreos/bump-cl
examples: Install clusters at Container Linux 1409.7.0 (stable)
2017-07-20 11:52:57 -07:00
Dalton Hubble
a69f6dd2d8 examples: Install clusters at Container Linux 1409.7.0 (stable) 2017-07-20 11:13:43 -07:00
Dalton Hubble
26d8b7d480 Documentation: Refresh Kubernetes in-place upgrade doc 2017-07-19 17:15:12 -07:00
Dalton Hubble
2c02549cd6 Merge branch 'celevra' 2017-07-19 13:06:44 -07:00
Philipp Zeitschel
3c999d27e9 Documentation: Export variables in example commands 2017-07-19 13:04:44 -07:00
Dalton Hubble
52b317dff9 Merge pull request #614 from coreos/kubernetes-v1.6.7
examples: Update Kubernetes from v1.6.6 to v1.6.7
2017-07-19 11:59:12 -07:00
Dalton Hubble
97985b213b examples: Update Kubernetes from v1.6.6 to v1.6.7 2017-07-19 11:30:54 -07:00
Dalton Hubble
1ba353e5b6 Merge pull request #611 from coreos/fix-bootkube-tests
tests/smoke: Fix etcd certs distribution in bootkube test
2017-07-17 14:15:38 -07:00
Dalton Hubble
398d12e148 tests/smoke: Fix etcd certs distribution in bootkube test
* Introduced in ce3154cae9
* Masked by larger-scale timeouts / issues in the testing env
2017-07-17 13:25:48 -07:00
Dalton Hubble
be8fd3d488 Merge pull request #608 from coreos/locksmithd-to-cluo
Switch Kubernetes clusters from locksmith to Container Linux Update Operator
2017-07-17 11:26:14 -07:00
Dalton Hubble
27d1139a07 examples/terraform: Switch Kubernetes to use CLUO
* Users should deploy the Container Linux Update Operator to coordinate
reboots of Container Linux nodes in a Kubernetes cluster
* Write cluster addon docs to describe CLUO
* Terraform modules `bootkube` and `profiles` (Kubernetes) disable
locksmithd
2017-07-14 15:12:53 -07:00
Dalton Hubble
ee3445454e examples: Switch Kubernetes (non-terraform) to use CLUO
* Use the container linux update operator to coordinate reboots
* Stop using locksmithd for reboot coordination
* etcd TLS assets now only need to be distributed to controller
nodes which are etcd peers
2017-07-14 14:11:33 -07:00
Dalton Hubble
170f8c09ec Merge pull request #605 from coreos/fix-bootkube-version
scripts/dev: Update bootkube render binary for tests
2017-07-14 10:23:35 -07:00
Dalton Hubble
e10525ded0 scripts/dev: Fix bootkube render binary for tests 2017-07-13 10:26:30 -07:00
Dalton Hubble
4c47adf390 Merge pull request #604 from coreos/bootkube-v0.5.0
examples: Update terraform Kubernetes to use bootkube v0.5.0
2017-07-13 09:37:41 -07:00
Dalton Hubble
ce3154cae9 examples: Update terraform Kubernetes to use bootkube v0.5.0 2017-07-12 20:13:04 -07:00
Dalton Hubble
5e54960a92 Merge pull request #603 from coreos/non-terraform-bootkube
Update non-terraform Kubernetes to use bootkube v0.5.0
2017-07-12 15:27:16 -07:00
Dalton Hubble
e008b8ea5e Jenkinsfile: Bump Kubernetes test timeouts
* Hyperkube image downloads can be very slow, though the
clusters themselves are considered correctly configured
2017-07-12 13:42:34 -07:00
Dalton Hubble
b636fc7a3d examples: Update non-terraform Kubernetes to use bootkube v0.5.0 2017-07-12 13:41:33 -07:00
Dalton Hubble
30cf06853d Merge pull request #597 from ivy/doc-tweaks
Documentation tweaks
2017-07-10 11:46:43 -07:00
Ivy Evans
61377d2955 Documentation: Add syntax highlighting for example 2017-07-06 18:38:57 -07:00
Ivy Evans
a7ba7714f5 Documentation: Fix typo "template" => "templates" 2017-07-06 18:34:26 -07:00
Dalton Hubble
ff916686e7 Merge pull request #596 from euank/retry-curl
examples: include 'curl' retries
2017-06-30 14:53:08 -07:00
Euan Kemp
fbc4b39c59 examples: include 'curl' retries
`After=network-online.target` *should* mean this isn't needed in most
cases, but per
https://www.freedesktop.org/wiki/Software/systemd/NetworkTarget/, the
definition of "network-online" is a little shaky.

Regardless, being a little more resilient to network flakes and races is
a good thing. The count of `10` was arbitrarily chosen.
2017-06-30 10:58:51 -07:00
Dalton Hubble
be46b389bf Merge pull request #594 from sdemos/master
scripts/devnet: open port 8081 when using docker
2017-06-28 14:51:53 -07:00
Stephen Demos
a14e6c8bb9 scripts/devnet: open port 8081 when using docker
otherwise the gRPC server is not accessible
2017-06-28 14:10:07 -07:00
Dalton Hubble
c03b7a9627 Merge branch 'readme-cleanup' 2017-06-26 17:38:48 -07:00
Dalton Hubble
ac40eeedb5 README: Remove duplicated Tectonic docs
* Ensure that Matchbox (open-source) and Tectonic (enterprise)
are kept separate, Tectonic has its own docs
* Matchbox is agnostic to Kubernetes distribution
2017-06-26 17:03:10 -07:00
Dalton Hubble
9e23f3a86d examples: Fix LOCKSMITHD_ENDPOINT protocol to be https
* Fix auto-update issue introduced in 6f02107 which occurs
when self-hosted etcd is used and locksmithd cannot auth
* See #590
2017-06-26 16:02:17 -07:00
Dalton Hubble
d1baa3fb65 Merge pull request #591 from coreos/fix-locksmithd
examples: Use etcd client certs in locksmithd dropin
2017-06-26 15:11:33 -07:00
Dalton Hubble
c915fc2b52 examples: Use etcd client certs in locksmithd dropin
* Fixes a regression introduced in 6f02107 which upgraded to
Kubernetes v1.6.6 and added self-hosted etcd with TLS
* Both on-host and self-hosted etcd now require clients to use
TLS client certs so locksmithd
2017-06-26 14:39:54 -07:00
Dalton Hubble
6f02107448 Merge pull request #585 from coreos/kubernetes-upgrade
examples: Upgrade Kubernetes to v1.6.6
2017-06-24 15:02:20 -07:00
Dalton Hubble
ff06990edb examples: Upgrade Kubernetes to v1.6.6
* Upgrade to bootkube v0.4.5
* Enable TLS for experimental self-hosted etcd
* Upstream manifest generation changes modify the flannel
Daemonset, switch several control plane components to run
as non-root, and add an explicit UpdateStrategy to the
control plane components
2017-06-24 14:39:10 -07:00
Dalton Hubble
9bc6edc65b Merge pull request #583 from coreos/etcd3-update
examples: Update etcd3 from v3.1.6 to v3.2.0
2017-06-16 15:19:02 -07:00
Dalton Hubble
5b8006ae35 examples: Update etcd3 from v3.1.6 to v3.2.0 2017-06-16 14:23:38 -07:00
Dalton Hubble
ff5cd0468e Merge pull request #547 from coreos/enable-bootkube-tests
Re-enable bootkube-terraform cluster tests
2017-06-15 16:56:30 -07:00
Dalton Hubble
4d9bd82c12 tests/smoke: Re-enable bootkube-terraform cluster tests
* Simplify script to not launch subshells
* Verify tests don't leave behind processes running terraform apply
2017-06-15 11:59:34 -07:00
Dalton Hubble
882793f230 Merge pull request #577 from notnamed/patch-1
Correct path to client.crt and client.key
2017-06-15 11:31:05 -07:00
Dalton Hubble
858e1bda73 Merge pull request #572 from coreos/allow-docker
scripts: Improve devnet script to allow using rkt or docker
2017-06-15 11:30:52 -07:00
Dalton Hubble
cfbb9cebd0 scripts: Improve devnet script to allow using rkt or docker
* Add create, status, and destroy subcommands that use docker as
the container runtime for testing local QEMU/KVM clusters. Before,
only rkt could be used.
* Update local QEMU/KVM tutorial documentation
2017-06-15 11:06:22 -07:00
Jordan Cooks
edbe5bab20 Correct path to client.crt and client.key
gRPC API verification step has invalid paths to client.crt and client.key; these are created in ~/matchbox-v0.6.1-linux-amd64/scripts/tls (depending on where the matchbox installer is extracted).
2017-06-14 09:19:55 -07:00
Dalton Hubble
299701e7ea Merge pull request #576 from coreos/fix-ingress-resource
contrib/k8s: Use two Ingress resources for HTTP and TLS gRPC
2017-06-13 17:15:02 -07:00
Dalton Hubble
a20720a0d4 contrib/k8s: Use two Ingress resources for HTTP and TLS gRPC
* Fixes Ingress controller issue upgrading from nginx-ingress-controller
0.9-beta.3 to 0.9-beta.4 through 0.9-beta.7
2017-06-13 14:06:53 -07:00
Dalton Hubble
5a9c24ceb3 Merge pull request #573 from coreos/base-image
Dockerfile: Update base image from alpine:3.5 to alpine:3.6
2017-06-13 09:57:52 -07:00
Dalton Hubble
82af3f747d Dockerfile: Update base image from alpine:3.5 to alpine:3.6 2017-06-12 16:45:18 -07:00
Dalton Hubble
e955fecd30 Merge pull request #571 from coreos/missing-output
examples/terraform/modules: Add outputs.tf with kubeconfig
2017-06-12 14:18:31 -07:00
Dalton Hubble
0c1e20db27 Merge pull request #569 from coreos/deprecate-cloud
matchbox,Documentation: Mark Cloud-Config as deprecated
2017-06-12 09:48:29 -07:00
Dalton Hubble
8d6d0397ff examples/terraform/modules: Add outputs.tf with kubeconfig 2017-06-12 00:46:14 -07:00
Dalton Hubble
abc7eb8dfb Merge pull request #568 from dghubble/changelog
CHANGES.md: Add missing changelog notes
2017-06-09 11:18:41 -07:00
Dalton Hubble
149f441ad8 matchbox,Documentation: Mark Cloud-Config as deprecated
* Warn that Cloud-Config support will be removed in the
future
2017-06-09 10:53:49 -07:00
Dalton Hubble
cf43908a72 CHANGES.md: Add missing changelog notes 2017-06-09 10:35:27 -07:00
Benjamin Gilbert
523b15ed13 Merge pull request #567 from bgilbert/container-linux
*: CoreOS -> Container Linux
2017-06-08 15:33:37 -07:00
Benjamin Gilbert
aac270e937 README: Shorten line 2017-06-08 15:14:03 -07:00
Dalton Hubble
1cfdce2970 Merge branch 'add-generic' 2017-06-08 14:37:18 -07:00
Benjamin Gilbert
9d3d08a26f *: CoreOS -> Container Linux 2017-06-08 12:29:00 -07:00
Wagner Sartori Junior
b176de805e cli,client,http,rpc,server,storage: Add gRPC API for generic (experimental) templates
Matchbox added generic template support to enable experimenting with
rendering different kinds of templates, beyond Container Linux configs
and cloud-configs. We'd like to add a gRPC endpoint for generic
templates, as is done for other configs to support gRPC clients.
2017-06-08 11:34:09 -07:00
Dalton Hubble
009b44b25d Merge pull request #566 from coreos/on-host-etcd-tls
examples: Use Kubernetes on-host etcd TLS
2017-06-08 09:51:44 -07:00
Dalton Hubble
57e473b6f5 examples/terraform: Enable on-host etcd TLS for terraform-based bootkube 2017-06-07 16:38:54 -07:00
Dalton Hubble
66cd8da417 examples: Use Kubernetes on-host etcd TLS
* etcd3 cluster requires peers and clients to be TLS authenticated
* kube-apiserver (incl. bootstrap) communicates with TLS
authenticated on-host etcd cluster
2017-06-07 10:56:55 -07:00
Dalton Hubble
50a3d11414 Merge pull request #564 from coreos/remove-cmdline
matchbox: Remove Profile cmdline map field
2017-06-06 13:53:14 -07:00
Dalton Hubble
6fa13007c8 matchbox: Remove Profile cmdline map field 2017-06-05 13:04:09 -07:00
Dalton Hubble
500a7b25e1 Merge pull request #561 from joshix/patch-1
Doc/deployment.md: Cp local config to correct location
2017-06-02 14:35:45 -07:00
Josh Wood
951e5ec4a3 Doc/deployment.md: Cp local config to correct location
Copy matchbox-local.service to /etc/systemd/system/matchbox.service
rather than bare dir.
2017-06-02 14:11:19 -07:00
Dalton Hubble
f92743fa57 Merge pull request #556 from coreos/terraform-improvements
Add some minor Terraform variables
2017-06-01 11:12:01 -07:00
Dalton Hubble
d84bb8e398 examples/terraform: Configure whether to install CL from cache
* Module "profiles" provides container-linux-install and
cached-container-linux-install Profiles
* Module bootkube accepts cached_install variable to determine
whether the cluster should install Container Linux from cache
or from the public download site (default)
2017-05-31 13:57:12 -07:00
Dalton Hubble
d54562f429 examples/terraform: Add install_disk optional override 2017-05-30 16:00:37 -07:00
Dalton Hubble
395494c1d9 examples/terraform: Template variables early where possible 2017-05-30 16:00:37 -07:00
Dalton Hubble
ddbe17cd31 Merge pull request #555 from coreos/declarative-jenkinsfile
Jenkinsfile: Switch to declarative-style Jenkins pipeline
2017-05-26 16:34:27 -07:00
Dalton Hubble
b1a866370a Jenkinsfile: Cleanup workspace directories 2017-05-26 14:40:34 -07:00
Dalton Hubble
b8326e6db6 Jenkinsfile: Switch to declarative-style Jenkins pipeline 2017-05-26 11:17:14 -07:00
Dalton Hubble
7864e64fd2 Merge pull request #554 from dghubble/documentation-fix
*: Update docs references to v0.6.1
2017-05-25 14:39:09 -07:00
Dalton Hubble
89bb5125b5 *: Update docs references to v0.6.1 2017-05-25 14:24:04 -07:00
Dalton Hubble
cff053328d Merge pull request #551 from coreos/prep-point-release
CHANGES.md: Prepare for a v0.6.1 docs point release
2017-05-25 10:43:58 -07:00
Dalton Hubble
698b6f6118 CHANGES.md: Prepare for a v0.6.1 docs point release 2017-05-25 10:27:43 -07:00
Dalton Hubble
23f23c1dcb Merge pull request #552 from coreos/go-bump
Update openpgp package and bump Go to 1.8.3
2017-05-24 15:39:35 -07:00
Dalton Hubble
51cf859587 glide.yaml: Update and vendor the crypto openpgp package 2017-05-24 15:28:16 -07:00
Dalton Hubble
8061f57346 travis.yml: Use Go 1.8.3 in tests and published images 2017-05-24 15:14:31 -07:00
Dalton Hubble
8000c323b6 Merge pull request #524 from coreos/organize-scripts
scripts: Organize dev-only scripts and use a single scripts/tls location
2017-05-24 14:21:00 -07:00
Dalton Hubble
314a317271 scripts: Move examples/etc/matchbox to scripts/tls
* Use the same TLS cert-gen location in source as in releases
2017-05-24 13:19:21 -07:00
Dalton Hubble
d437167ebf scripts: Move development-only scripts under scripts/dev 2017-05-24 10:15:24 -07:00
Dalton Hubble
4067702641 Merge pull request #548 from coreos/multi-controller
examples/terraform: Add tfvars showing multi-controller case
2017-05-24 09:49:21 -07:00
Dalton Hubble
86c07da76e examples/terraform: Add tfvars showing multi-controller case 2017-05-23 15:54:18 -07:00
Dalton Hubble
be00fdbca0 Merge pull request #546 from coreos/update-container-linux
Bump Container Linux version to stable 1353.7.0
2017-05-23 12:09:05 -07:00
enilfodne
abbf7faf56 examples: Bump Container Linux version to stable 1353.7.0 2017-05-23 11:01:24 -07:00
Dalton Hubble
76cc8cb13c scripts: Remove unused static k8s generation scripts
* Remove static rktnetes cluster docs
* Bump devnet matchbox version
2017-05-22 18:11:11 -07:00
Dalton Hubble
ed6dde528a Merge pull request #543 from coreos/remove-pixiecore
Remove pixiecore handler and support
2017-05-22 17:51:21 -07:00
Dalton Hubble
1e095661ad matchbox: Remove pixiecore handler and support
* Pixiecore was deprecated in v0.5.0 and can be removed
2017-05-22 17:13:02 -07:00
296 changed files with 7155 additions and 3068 deletions

View File

@@ -3,22 +3,23 @@ sudo: required
services:
- docker
go:
- 1.7.4
- 1.8
- 1.10.x
- 1.11.x
- 1.11.1
- tip
matrix:
allow_failures:
- go: tip
install:
- go get github.com/golang/lint/golint
- go get golang.org/x/lint/golint
script:
- make test
deploy:
provider: script
script: scripts/travis-docker-push
script: scripts/dev/travis-docker-push
skip_cleanup: true
on:
branch: master
go: '1.8'
go: '1.11.1'
notifications:
email: change

View File

@@ -1,9 +1,46 @@
# matchbox
# Matchbox
Notable changes between releases.
## Latest
## v0.7.1 (2018-11-01)
* Add `kernel_args` variable to the terraform bootkube-install cluster definition
* Add `get-flatcar` helper script
* Add optional TLS support to read-only HTTP API
* Build Matchbox with Go 1.11.1 for images and binaries
### Examples
* Upgrade Kubernetes example clusters to v1.10.0 (Terraform-based)
* Upgrade Kubernetes example clusters to v1.8.5
## v0.7.0 (2017-12-12)
* Add gRPC API endpoints for managing generic (experimental) templates
* Update Container Linux config transpiler to v0.5.0
* Update Ignition to v0.19.0, render v2.1.0 Ignition configs
* Drop support for Container Linux versions below 1465.0.0 (breaking)
* Build Matchbox with Go 1.8.5 for images and binaries
* Remove Profile `Cmdline` map (deprecated in v0.5.0), use `Args` slice instead
* Remove pixiecore support (deprecated in v0.5.0)
* Remove `ContextHandler`, `ContextHandlerFunc`, and `NewHandler` from the `matchbox/http` package.
### Examples / Modules
* Upgrade Kubernetes example clusters to v1.8.4
* Kubernetes examples clusters enable etcd TLS
* Deploy the Container Linux Update Operator (CLUO) to coordinate reboots of Container Linux nodes in Kubernetes clusters. See the cluster [addon docs](Documentation/cluster-addons.md).
* Kubernetes examples (terraform and non-terraform) mask locksmithd
* Terraform modules `bootkube` and `profiles` (Kubernetes) mask locksmithd
## v0.6.1 (2017-05-25)
* Improve the installation documentation
* Move examples/etc/matchbox/cert-gen to scripts/tls
* Build Matchbox with Go 1.8.3 for images and binaries
### Examples
* Upgrade self-hosted Kubernetes cluster examples to v1.6.4

View File

@@ -1,4 +1,4 @@
FROM alpine:3.5
FROM alpine:3.6
MAINTAINER Dalton Hubble <dalton.hubble@coreos.com>
COPY bin/matchbox /matchbox
EXPOSE 8080

View File

@@ -39,8 +39,8 @@ GET http://matchbox.foo/ipxe?label=value
```
#!ipxe
kernel /assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp} coreos.first_boot=1 coreos.autologin
initrd /assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz
kernel /assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp} coreos.first_boot=1 coreos.autologin
initrd /assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz
boot
```
@@ -67,15 +67,15 @@ default=0
timeout=1
menuentry "CoreOS" {
echo "Loading kernel"
linuxefi "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz" "coreos.autologin" "coreos.config.url=http://matchbox.foo:8080/ignition" "coreos.first_boot"
linuxefi "(http;matchbox.foo:8080)/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz" "coreos.autologin" "coreos.config.url=http://matchbox.foo:8080/ignition" "coreos.first_boot"
echo "Loading initrd"
initrdefi "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"
initrdefi "(http;matchbox.foo:8080)/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"
}
```
## Cloud config
Finds the profile matching the machine and renders the corresponding Cloud-Config with group metadata, selectors, and query params.
DEPRECATED: Finds the profile matching the machine and renders the corresponding Cloud-Config with group metadata, selectors, and query params.
```
GET http://matchbox.foo/cloud?label=value
@@ -101,7 +101,7 @@ coreos:
command: start
```
## Ignition Config
## Container Linux Config / Ignition Config
Finds the profile matching the machine and renders the corresponding Ignition Config with group metadata, selectors, and query params.
@@ -231,7 +231,7 @@ If you need to serve static assets (e.g. kernel, initrd), `matchbox` can serve a
```
matchbox.foo/assets/
└── coreos
└── 1298.7.0
└── 1576.5.0
├── coreos_production_pxe.vmlinuz
└── coreos_production_pxe_image.cpio.gz
└── 1153.0.0

View File

@@ -1,9 +1,16 @@
# Upgrading self-hosted Kubernetes
[Self-hosted](bootkube.md) Kubernetes clusters schedule Kubernetes components such as the apiserver, kubelet, scheduler, and controller-manager as pods like other applications (except with node selectors). This allows Kubernetes level operations to be performed to upgrade clusters in place, rather than by re-provisioning.
CoreOS Kubernetes clusters "self-host" the apiserver, scheduler, controller-manager, flannel, kube-dns, and kube-proxy as Kubernetes pods, like ordinary applications (except with taint tolerations). This allows upgrades to be performed in-place using (mostly) `kubectl` as an alternative to re-provisioning.
Let's upgrade a self-hosted Kubernetes v1.4.1 cluster to v1.4.3 as an example.
Let's upgrade a Kubernetes v1.6.6 cluster to v1.6.7 as an example.
## Stability
This guide shows how to attempt a in-place upgrade of a Kubernetes cluster setup via the [examples](../examples). It does not provide exact diffs, migrations between breaking changes, the stability of a fresh re-provision, or any guarantees. Evaluate whether in-place updates are appropriate for your Kubernetes cluster and be prepared to perform a fresh re-provision if something goes wrong, especially between Kubernetes minor releases (e.g. 1.6 to 1.7).
Matchbox Kubernetes examples provide a vanilla Kubernetes cluster with only free (as in freedom and cost) software components. If you require currated updates, migrations, or guarantees for production, consider [Tectonic](https://coreos.com/tectonic/) by CoreOS.
**Note: Tectonic users should NOT manually upgrade. Follow the [Tectonic docs](https://coreos.com/tectonic/docs/latest/admin/upgrade.html)**
## Inspect
@@ -11,193 +18,130 @@ Show the control plane daemonsets and deployments which will need to be updated.
```sh
$ kubectl get daemonsets -n=kube-system
NAME DESIRED CURRENT NODE-SELECTOR AGE
kube-apiserver 1 1 master=true 5m
kube-proxy 3 3 <none> 5m
kubelet 3 3 <none> 5m
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE-SELECTOR AGE
kube-apiserver 1 1 1 1 1 node-role.kubernetes.io/master= 21d
kube-etcd-network-checkpointer 1 1 1 1 1 node-role.kubernetes.io/master= 21d
kube-flannel 4 4 4 4 4 <none> 21d
kube-proxy 4 4 4 4 4 <none> 21d
pod-checkpointer 1 1 1 1 1 node-role.kubernetes.io/master= 21d
$ kubectl get deployments -n=kube-system
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
kube-controller-manager 1 1 1 1 5m
kube-dns-v20 1 1 1 1 5m
kube-scheduler 1 1 1 1 5m
kube-controller-manager 2 2 2 2 21d
kube-dns 1 1 1 1 21d
kube-scheduler 2 2 2 2 21d
```
Check the current Kubernetes version.
```sh
$ kubectl version
Client Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.0", GitCommit:"a16c0a7f71a6f93c7e0f222d961f4675cd97a46b", GitTreeState:"clean", BuildDate:"2016-09-26T18:16:57Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.1+coreos.0", GitCommit:"b7a02f46b972c5211e5c04fdb1d5b86ac16c00eb", GitTreeState:"clean", BuildDate:"2016-10-11T20:13:55Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.6+coreos.1", GitCommit:"42a5c8b99c994a51d9ceaed5d0254f177e97d419", GitTreeState:"clean", BuildDate:"2017-06-21T01:10:07Z", GoVersion:"go1.7.6", Compiler:"gc", Platform:"linux/amd64"}
```
In this case, Kubernetes is `v1.4.1+coreos.0` and our goal is to upgrade to `v1.4.3+coreos.0`. First, update the control plane pods. Then the kubelets and proxies on all nodes.
```sh
$ kubectl get nodes
NAME STATUS AGE VERSION
node1.example.com Ready 21d v1.6.6+coreos.1
node2.example.com Ready 21d v1.6.6+coreos.1
node3.example.com Ready 21d v1.6.6+coreos.1
node4.example.com Ready 21d v1.6.6+coreos.1
```
**Tip**: Follow along with a QEMU/KVM self-hosted Kubernetes cluster the first time, before upgrading your production bare-metal clusters ([tutorial](bootkube.md)).
## Strategy
Update control plane components with `kubectl`. Then update the `kubelet` systemd unit on each host.
Prepare the changes to the Kubernetes manifests by generating assets for a target Kubernetes cluster (e.g. bootkube `v0.5.0` produces Kubernetes 1.6.6 and bootkube `v0.5.1` produces Kubernetes 1.6.7). Choose the tool used during creation of the cluster:
* [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) - install the `bootkube` binary for the target version and render assets
* [poseidon/bootkube-terraform](https://github.com/poseidon/bootkube-terraform) - checkout the tag for the target version and `terraform apply` to render assets
Diff the generated assets against the assets used when originally creating the cluster. In simple cases, you may only need to bump the hyperkube image. In more complex cases, some manifests may have new flags or configuration.
## Control Plane
### kube-apiserver
Edit the kube-apiserver daemonset. Change the container image name to `quay.io/coreos/hyperkube:v1.4.3_coreos.0`.
Edit the `kube-apiserver` daemonset to rolling update the apiserver.
```sh
$ kubectl edit daemonset kube-apiserver -n=kube-system
```
Since daemonsets don't yet support rolling, manually delete each apiserver one by one and wait for each to be re-scheduled.
```sh
$ kubectl get pods -n=kube-system
# WARNING: Self-hosted Kubernetes is still new and this may fail
$ kubectl delete pod kube-apiserver-s62kb -n=kube-system
```
If you only have one, your cluster will be temporarily unavailable. Remember the Hyperkube image is quite large and this can take a minute.
```sh
$ kubectl get pods -n=kube-system
NAME READY STATUS RESTARTS AGE
kube-api-checkpoint-node1.example.com 1/1 Running 0 12m
kube-apiserver-vyg3t 2/2 Running 0 2m
kube-controller-manager-1510822774-qebia 1/1 Running 2 12m
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 12m
kube-proxy-8jthl 1/1 Running 0 12m
kube-proxy-bnvgy 1/1 Running 0 12m
kube-proxy-gkyx8 1/1 Running 0 12m
kube-scheduler-2099299605-67ezp 1/1 Running 2 12m
kubelet-exe5k 1/1 Running 0 12m
kubelet-p3g98 1/1 Running 0 12m
kubelet-quhhg 1/1 Running 0 12m
```
If you only have one apiserver, the cluster may be momentarily unavailable.
### kube-scheduler
Edit the scheduler deployment to rolling update the scheduler. Change the container image name for the hyperkube.
Edit the `kube-scheduler` deployment to rolling update the scheduler.
```sh
$ kubectl edit deployments kube-scheduler -n=kube-system
```
Wait for the schduler to be deployed.
### kube-controller-manager
Edit the controller-manager deployment to rolling update the controller manager. Change the container image name for the hyperkube.
Edit the `kube-controller-manager` deployment to rolling update the controller manager.
```sh
$ kubectl edit deployments kube-controller-manager -n=kube-system
```
Wait for the controller manager to be deployed.
### kube-proxy
Edit the `kube-proxy` daemonset to rolling update the proxy.
```sh
$ kubectl get pods -n=kube-system
NAME READY STATUS RESTARTS AGE
kube-api-checkpoint-node1.example.com 1/1 Running 0 28m
kube-apiserver-vyg3t 2/2 Running 0 18m
kube-controller-manager-1709527928-zj8c4 1/1 Running 0 4m
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 28m
kube-proxy-8jthl 1/1 Running 0 28m
kube-proxy-bnvgy 1/1 Running 0 28m
kube-proxy-gkyx8 1/1 Running 0 28m
kube-scheduler-2255275287-hti6w 1/1 Running 0 6m
kubelet-exe5k 1/1 Running 0 28m
kubelet-p3g98 1/1 Running 0 28m
kubelet-quhhg 1/1 Running 0 28m
$ kubectl edit daemonset kube-proxy -n=kube-system
```
### Others
If there are changes between the prior version and target version manifests, update the `kube-dns` deployment, `kube-flannel` daemonset, or `pod-checkpointer` daemonset.
### Verify
Verify the control plane components updated.
```sh
$ kubectl version
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.7+coreos.0", GitCommit:"c8c505ee26ac3ab4d1dff506c46bc5538bc66733", GitTreeState:"clean", BuildDate:"2017-07-06T17:38:33Z", GoVersion:"go1.7.6", Compiler:"gc", Platform:"linux/amd64"}
```
```sh
$ kubectl get nodes
NAME STATUS AGE VERSION
node1.example.com Ready 21d v1.6.7+coreos.0
node2.example.com Ready 21d v1.6.7+coreos.0
node3.example.com Ready 21d v1.6.7+coreos.0
node4.example.com Ready 21d v1.6.7+coreos.0
```
## kubelet
SSH to each node and update `/etc/kubernetes/kubelet.env`. Restart the `kubelet.service`.
```sh
ssh core@node1.example.com
sudo vim /etc/kubernetes/kubelet.env
sudo systemctl restart kubelet
```
### Verify
At this point, the control plane components have been upgraded to v1.4.3.
```sh
$ kubectl version
Client Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.0", GitCommit:"a16c0a7f71a6f93c7e0f222d961f4675cd97a46b", GitTreeState:"clean", BuildDate:"2016-09-26T18:16:57Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"4", GitVersion:"v1.4.3+coreos.0", GitCommit:"7819c84f25e8c661321ee80d6b9fa5f4ff06676f", GitTreeState:"clean", BuildDate:"2016-10-17T21:19:17Z", GoVersion:"go1.6.3", Compiler:"gc", Platform:"linux/amd64"}
```
Finally, upgrade the kubelets and kube-proxies.
## kubelet and kube-proxy
Show the current kubelet and kube-proxy version on each node.
Verify the kubelet and kube-proxy of each node updated.
```sh
$ kubectl get nodes -o yaml | grep 'kubeletVersion\|kubeProxyVersion'
kubeProxyVersion: v1.4.1+coreos.0
kubeletVersion: v1.4.1+coreos.0
kubeProxyVersion: v1.4.1+coreos.0
kubeletVersion: v1.4.1+coreos.0
kubeProxyVersion: v1.4.1+coreos.0
kubeletVersion: v1.4.1+coreos.0
kubeProxyVersion: v1.6.7+coreos.0
kubeletVersion: v1.6.7+coreos.0
kubeProxyVersion: v1.6.7+coreos.0
kubeletVersion: v1.6.7+coreos.0
kubeProxyVersion: v1.6.7+coreos.0
kubeletVersion: v1.6.7+coreos.0
kubeProxyVersion: v1.6.7+coreos.0
kubeletVersion: v1.6.7+coreos.0
```
Edit the kubelet and kube-proxy daemonsets. Change the container image name for the hyperkube.
```sh
$ kubectl edit daemonset kubelet -n=kube-system
$ kubectl edit daemonset kube-proxy -n=kube-system
```
Since daemonsets don't yet support rolling, manually delete each kubelet and each kube-proxy. The daemonset controller will create new (upgraded) replics.
```sh
$ kubectl get pods -n=kube-system
$ kubectl delete pod kubelet-quhhg
...repeat
$ kubectl delete pod kube-proxy-8jthl -n=kube-system
...repeat
$ kubectl get pods -n=kube-system
NAME READY STATUS RESTARTS AGE
kube-api-checkpoint-node1.example.com 1/1 Running 0 1h
kube-apiserver-vyg3t 2/2 Running 0 1h
kube-controller-manager-1709527928-zj8c4 1/1 Running 0 47m
kube-dns-v20-3531996453-0tlv9 3/3 Running 0 1h
kube-proxy-6dbne 1/1 Running 0 1s
kube-proxy-sm4jv 1/1 Running 0 8s
kube-proxy-xmuao 1/1 Running 0 14s
kube-scheduler-2255275287-hti6w 1/1 Running 0 49m
kubelet-hfdwr 1/1 Running 0 38s
kubelet-oia47 1/1 Running 0 52s
kubelet-s6dab 1/1 Running 0 59s
```
## Verify
Verify that the kubelet and kube-proxy on each node have been upgraded.
```sh
$ kubectl get nodes -o yaml | grep 'kubeletVersion\|kubeProxyVersion'
kubeProxyVersion: v1.4.3+coreos.0
kubeletVersion: v1.4.3+coreos.0
kubeProxyVersion: v1.4.3+coreos.0
kubeletVersion: v1.4.3+coreos.0
kubeProxyVersion: v1.4.3+coreos.0
kubeletVersion: v1.4.3+coreos.0
```
Now, Kubernetes components have been upgraded to a new version of Kubernetes!
## Going further
Bare-metal or virtualized self-hosted Kubernetes clusters can be upgraded in place in 5-10 minutes. Here is a bare-metal example:
```sh
$ kubectl -n=kube-system get pods
NAME READY STATUS RESTARTS AGE
kube-api-checkpoint-ibm0.lab.dghubble.io 1/1 Running 0 2d
kube-apiserver-j6atn 2/2 Running 0 5m
kube-controller-manager-1709527928-y05n5 1/1 Running 0 1m
kube-dns-v20-3531996453-zwbl8 3/3 Running 0 2d
kube-proxy-e49p5 1/1 Running 0 14s
kube-proxy-eu5dc 1/1 Running 0 8s
kube-proxy-gjrzq 1/1 Running 0 3s
kube-scheduler-2255275287-96n56 1/1 Running 0 2m
kubelet-9ob0e 1/1 Running 0 19s
kubelet-bvwp0 1/1 Running 0 14s
kubelet-xlrql 1/1 Running 0 24s
```
Check upstream for updates to addons like `kube-dns` or `kube-dashboard` and update them like any other applications. Some kube-system components use version labels and you may wish to clean those up as well.
Kubernetes control plane components have been successfully updated!

View File

@@ -1,6 +1,6 @@
# Self-hosted Kubernetes
# Kubernetes
The self-hosted Kubernetes example provisions a 3 node "self-hosted" Kubernetes v1.6.4 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting. An etcd3 cluster across controllers is used to back Kubernetes and coordinate Container Linux auto-updates (enabled for disk installs).
The Kubernetes example provisions a 3 node Kubernetes v1.8.5 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting. An etcd3 cluster across controllers is used to back Kubernetes.
## Requirements
@@ -9,13 +9,13 @@ Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md)
* Use rkt or Docker to start `matchbox`
* Create a network boot environment with `coreos/dnsmasq`
* Create the example libvirt client VMs
* `/etc/hosts` entries for `node[1-3].example.com` (or pass custom names to `k8s-certgen`)
* `/etc/hosts` entries for `node[1-3].example.com`
Install [bootkube](https://github.com/kubernetes-incubator/bootkube/releases) v0.4.4 and add it somewhere on your PATH.
Install [bootkube](https://github.com/kubernetes-incubator/bootkube/releases) v0.9.1 and add it on your $PATH.
```sh
$ bootkube version
Version: v0.4.4
Version: v0.9.1
```
## Examples
@@ -27,10 +27,10 @@ The [examples](../examples) statically assign IP addresses to libvirt client VMs
## Assets
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
Download the CoreOS Container Linux image assets referenced in the target [profile](../examples/profiles).
```sh
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
```
Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
@@ -44,39 +44,50 @@ Add your SSH public key to each machine group definition [as shown](../examples/
}
```
Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Later, `bootkube` will schedule these manifests during bootstrapping and the credentials will be used to access your cluster.
Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Set the `--network-provider` to `flannel` (default) or `experimental-calico` if desired.
```sh
$ bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=http://127.0.0.1:2379
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=https://node1.example.com:2379
```
Later, a controller will use `bootkube` to bootstrap these manifests and the credentials will be used to access your cluster.
## Containers
Use rkt or docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with rkt](getting-started-rkt.md) or [matchbox with Docker](getting-started-docker.md) for help.
Client machines should boot and provision themselves. Local client VMs should network boot CoreOS and become available via SSH in about 1 minute. If you chose `bootkube-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
Client machines should boot and provision themselves. Local client VMs should network boot Container Linux and become available via SSH in about 1 minute. If you chose `bootkube-install`, notice that machines install Container Linux and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
## bootkube
We're ready to use bootkube to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster.
Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every** node which will path activate the `kubelet.service`.
Secure copy the etcd TLS assets to `/etc/ssl/etcd/*` on **every controller** node.
```bash
```sh
for node in 'node1'; do
scp -r assets/tls/etcd-* assets/tls/etcd core@$node.example.com:/home/core/
ssh core@$node.example.com 'sudo mkdir -p /etc/ssl/etcd && sudo mv etcd-* etcd /etc/ssl/etcd/ && sudo chown -R etcd:etcd /etc/ssl/etcd && sudo chmod -R 500 /etc/ssl/etcd/'
done
```
Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every node** to path activate the `kubelet.service`.
```sh
for node in 'node1' 'node2' 'node3'; do
scp assets/auth/kubeconfig core@$node.example.com:/home/core/kubeconfig
ssh core@$node.example.com 'sudo mv kubeconfig /etc/kubernetes/kubeconfig'
done
```
Secure copy the `bootkube` generated assets to any controller node and run `bootkube-start`.
Secure copy the `bootkube` generated assets to **any controller** node and run `bootkube-start` (takes ~10 minutes).
```sh
$ scp -r assets core@node1.example.com:/home/core
$ ssh core@node1.example.com 'sudo mv assets /opt/bootkube/assets && sudo systemctl start bootkube'
scp -r assets core@node1.example.com:/home/core
ssh core@node1.example.com 'sudo mv assets /opt/bootkube/assets && sudo systemctl start bootkube'
```
Optionally watch the Kubernetes control plane bootstrapping with the bootkube temporary api-server. You will see quite a bit of output.
Watch the Kubernetes control plane bootstrapping with the bootkube temporary api-server. You will see quite a bit of output.
```sh
$ ssh core@node1.example.com 'journalctl -f -u bootkube'
@@ -87,39 +98,41 @@ $ ssh core@node1.example.com 'journalctl -f -u bootkube'
[ 299.311743] bootkube[5]: All self-hosted control plane components successfully started
```
You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` used to access the cluster.
[Verify](#verify) the Kubernetes cluster is accessible once complete. Then install **important** cluster [addons](cluster-addons.md). You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` used to access the cluster.
## Verify
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the kubelet, apiserver, scheduler, and controller-manager are running as pods.
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the apiserver, scheduler, and controller-manager are running as pods.
```sh
$ KUBECONFIG=assets/auth/kubeconfig
$ export KUBECONFIG=assets/auth/kubeconfig
$ kubectl get nodes
NAME STATUS AGE
node1.example.com Ready 3m
node2.example.com Ready 3m
node3.example.com Ready 3m
NAME STATUS AGE VERSION
node1.example.com Ready 11m v1.8.5
node2.example.com Ready 11m v1.8.5
node3.example.com Ready 11m v1.8.5
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system checkpoint-installer-p8g8r 1/1 Running 1 13m
kube-system kube-apiserver-s5gnx 1/1 Running 1 41s
kube-system kube-controller-manager-3438979800-jrlnd 1/1 Running 1 13m
kube-system kube-controller-manager-3438979800-tkjx7 1/1 Running 1 13m
kube-system kube-dns-4101612645-xt55f 4/4 Running 4 13m
kube-system kube-flannel-pl5c2 2/2 Running 0 13m
kube-system kube-flannel-r9t5r 2/2 Running 3 13m
kube-system kube-flannel-vfb0s 2/2 Running 4 13m
kube-system kube-proxy-cvhmj 1/1 Running 0 13m
kube-system kube-proxy-hf9mh 1/1 Running 1 13m
kube-system kube-proxy-kpl73 1/1 Running 1 13m
kube-system kube-scheduler-694795526-1l23b 1/1 Running 1 13m
kube-system kube-scheduler-694795526-fks0b 1/1 Running 1 13m
kube-system pod-checkpointer-node1.example.com 1/1 Running 2 10m
kube-system kube-apiserver-zd1k3 1/1 Running 0 7m
kube-system kube-controller-manager-762207937-2ztxb 1/1 Running 0 7m
kube-system kube-controller-manager-762207937-vf6bk 1/1 Running 1 7m
kube-system kube-dns-2431531914-qc752 3/3 Running 0 7m
kube-system kube-flannel-180mz 2/2 Running 1 7m
kube-system kube-flannel-jjr0x 2/2 Running 0 7m
kube-system kube-flannel-mlr9w 2/2 Running 0 7m
kube-system kube-proxy-0jlq7 1/1 Running 0 7m
kube-system kube-proxy-k4mjl 1/1 Running 0 7m
kube-system kube-proxy-l4xrd 1/1 Running 0 7m
kube-system kube-scheduler-1873228005-5d2mk 1/1 Running 0 7m
kube-system kube-scheduler-1873228005-s4w27 1/1 Running 0 7m
kube-system pod-checkpointer-hb960 1/1 Running 0 7m
kube-system pod-checkpointer-hb960-node1.example.com 1/1 Running 0 6m
```
Try deleting pods to see that the cluster is resilient to failures and machine restarts (CoreOS auto-updates).
## Addons
Install **important** cluster [addons](cluster-addons.md).
## Going further

View File

@@ -1,7 +1,7 @@
# Cloud config
**Note:** We recommend migrating to [Container Linux Configs](container-linux-config.md) for hardware provisioning.
**Note:** Please migrate to [Container Linux Configs](container-linux-config.md). Cloud-Config support will be removed in the future.
CoreOS Cloud-Config is a system for configuring machines with a Cloud-Config file or executable script from user-data. Cloud-Config runs in userspace on each boot and implements a subset of the [cloud-init spec](http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data). See the cloud-config [docs](https://coreos.com/os/docs/latest/cloud-config.html) for details.

View File

@@ -0,0 +1,30 @@
## Cluster Addons
Kubernetes clusters run cluster addons atop Kubernetes itself. Addons may be considered essential for bootstrapping (non-optional), important (highly recommended), or optional.
## Essential
Several addons are considered essential. CoreOS cluster creation tools ensure these addons are included. Kubernetes clusters deployed via the Matchbox examples or using our Terraform Modules include these addons as well.
### kube-proxy
`kube-proxy` is deployed as a DaemonSet.
### kube-dns
`kube-dns` is deployed as a Deployment.
## Important
### Container Linux Update Operator
The [Container Linux Update Operator](https://github.com/coreos/container-linux-update-operator) (i.e. CLUO) coordinates reboots of auto-updating Container Linux nodes so that one node reboots at a time and nodes are drained before reboot. CLUO enables the auto-update behavior Container Linux clusters are known for, but does it in a Kubernetes native way. Deploying CLUO is strongly recommended.
Create the `update-operator` deployment and `update-agent` DaemonSet.
```
kubectl apply -f examples/addons/cluo/update-operator.yaml
kubectl apply -f examples/addons/cluo/update-agent.yaml
```
*Note, CLUO replaces `locksmithd` reboot coordination. The `update_engine` systemd unit on hosts still performs the Container Linux update check, download, and install to the inactive partition.*

View File

@@ -1,6 +1,6 @@
# Container Linux Configs
A Container Linux Config is a YAML document which declares how Container Linux instances' disks should be provisioned on network boot and first-boot from disk. Configs can declare disk paritions, write files (regular files, systemd units, networkd units, etc.), and configure users. See the Container Linux Config [spec](https://coreos.com/os/docs/latest/configuration.html).
A Container Linux Config is a YAML document which declares how Container Linux instances' disks should be provisioned on network boot and first-boot from disk. Configs can declare disk partitions, write files (regular files, systemd units, networkd units, etc.), and configure users. See the Container Linux Config [spec](https://coreos.com/os/docs/latest/configuration.html).
### Ignition
@@ -75,7 +75,7 @@ passwd:
```
<!-- {% endraw %} -->
The Ignition config response (formatted) to a query `/ignition?label=value` for a CoreOS instance supporting Ignition 2.0.0 would be:
The Ignition config response (formatted) to a query `/ignition?label=value` for a Container Linux instance supporting Ignition 2.0.0 would be:
```json
{

View File

@@ -4,11 +4,11 @@ This guide walks through deploying the `matchbox` service on a Linux host (via R
## Provisoner
`matchbox` is a service for network booting and provisioning machines to create Container Linux clusters. `matchbox` should be installed on a provisioner machine (CoreOS or any Linux distribution) or cluster (Kubernetes) which can serve configs to client machines in a lab or datacenter.
`matchbox` is a service for network booting and provisioning machines to create CoreOS Container Linux clusters. `matchbox` should be installed on a provisioner machine (Container Linux or any Linux distribution) or cluster (Kubernetes) which can serve configs to client machines in a lab or datacenter.
Choose one of the supported installation options:
* [CoreOS (rkt)](#coreos)
* [CoreOS Container Linux (rkt)](#coreos-container-linux)
* [RPM-based](#rpm-based-distro)
* [Generic Linux (binary)](#generic-linux)
* [With rkt](#rkt)
@@ -20,39 +20,41 @@ Choose one of the supported installation options:
Download the latest matchbox [release](https://github.com/coreos/matchbox/releases) to the provisioner host.
```sh
$ wget https://github.com/coreos/matchbox/releases/download/v0.6.0/matchbox-v0.6.0-linux-amd64.tar.gz
$ wget https://github.com/coreos/matchbox/releases/download/v0.6.0/matchbox-v0.6.0-linux-amd64.tar.gz.asc
$ wget https://github.com/coreos/matchbox/releases/download/v0.7.1/matchbox-v0.7.1-linux-amd64.tar.gz
$ wget https://github.com/coreos/matchbox/releases/download/v0.7.1/matchbox-v0.7.1-linux-amd64.tar.gz.asc
```
Verify the release has been signed by the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/).
```sh
$ gpg --keyserver pgp.mit.edu --recv-key 18AD5014C99EF7E3BA5F6CE950BDD3E0FC8A365E
$ gpg --verify matchbox-v0.6.0-linux-amd64.tar.gz.asc matchbox-v0.6.0-linux-amd64.tar.gz
$ gpg --verify matchbox-v0.7.1-linux-amd64.tar.gz.asc matchbox-v0.7.1-linux-amd64.tar.gz
# gpg: Good signature from "CoreOS Application Signing Key <security@coreos.com>"
```
Untar the release.
```sh
$ tar xzvf matchbox-v0.6.0-linux-amd64.tar.gz
$ cd matchbox-v0.6.0-linux-amd64
$ tar xzvf matchbox-v0.7.1-linux-amd64.tar.gz
$ cd matchbox-v0.7.1-linux-amd64
```
## Install
### RPM-based distro
On an RPM-based provisioner, install the `matchbox` RPM from the Copr [repository](https://copr.fedorainfracloud.org/coprs/g/CoreOS/matchbox/) using `dnf` or `yum`.
On an RPM-based provisioner (Fedora 24+), install the `matchbox` RPM from the Copr [repository](https://copr.fedorainfracloud.org/coprs/g/CoreOS/matchbox/) using `dnf`.
```sh
dnf copr enable @CoreOS/matchbox
dnf install matchbox
```
### CoreOS
RPMs are not currently available for CentOS and RHEL (due to Go version). CentOS and RHEL users should follow the Generic Linux section below.
On a CoreOS provisioner, rkt run `matchbox` image with the provided systemd unit.
### CoreOS Container Linux
On a Container Linux provisioner, rkt run `matchbox` image with the provided systemd unit.
```sh
$ sudo cp contrib/systemd/matchbox-on-coreos.service /etc/systemd/system/matchbox.service
@@ -81,7 +83,7 @@ $ sudo chown -R matchbox:matchbox /var/lib/matchbox
Copy the provided `matchbox` systemd unit file.
```sh
$ sudo cp contrib/systemd/matchbox-local.service /etc/systemd/system/
$ sudo cp contrib/systemd/matchbox-local.service /etc/systemd/system/matchbox.service
```
## Customization
@@ -110,7 +112,7 @@ Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
```
The Tectonic [Installer](https://tectonic.com/enterprise/docs/latest/install/bare-metal/index.html) uses this API. Tectonic users with a CoreOS provisioner can start with an example that enables it.
The Tectonic [Installer](https://tectonic.com/enterprise/docs/latest/install/bare-metal/index.html) uses this API. Tectonic users with a Container Linux provisioner can start with an example that enables it.
```sh
$ sudo cp contrib/systemd/matchbox-for-tectonic.service /etc/systemd/system/matchbox.service
@@ -127,31 +129,44 @@ $ sudo firewall-cmd --zone=MYZONE --add-port=8080/tcp --permanent
$ sudo firewall-cmd --zone=MYZONE --add-port=8081/tcp --permanent
```
## Generate TLS credentials
## Generate TLS Certificates
*Skip this unless you need to enable the gRPC API*
The Matchbox gRPC API allows clients (terraform-provider-matchbox) to create and update Matchbox resources. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
The `matchbox` gRPC API allows client apps (terraform-provider-matchbox, Tectonic Installer, etc.) to update how machines are provisioned. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
The `cert-gen` helper script generates a self-signed CA, server certificate, and client certificate. **Prefer your organization's PKI, if possible**
If your organization manages public key infrastructure and a certificate authority, create a server certificate and key for the `matchbox` service and a client certificate and key for each client tool.
Otherwise, generate a self-signed `ca.crt`, a server certificate (`server.crt`, `server.key`), and client credentials (`client.crt`, `client.key`) with the `examples/etc/matchbox/cert-gen` script. Export the DNS name or IP (discouraged) of the provisioner host.
Navigate to the `scripts/tls` directory.
```sh
$ cd scripts/tls
```
Export `SAN` to set the Subject Alt Names which should be used in certificates. Provide the fully qualified domain name or IP (discouraged) where Matchbox will be installed.
```sh
# DNS or IP Subject Alt Names where matchbox runs
$ export SAN=DNS.1:matchbox.example.com,IP.1:172.18.0.2
```
Generate a `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`.
```sh
$ cd examples/etc/matchbox
# DNS or IP Subject Alt Names where matchbox can be reached
$ export SAN=DNS.1:matchbox.example.com,IP.1:192.168.1.42
$ ./cert-gen
```
Place the TLS credentials in the default location:
Move TLS credentials to the matchbox server's default location.
```sh
$ sudo mkdir -p /etc/matchbox
$ sudo cp ca.crt server.crt server.key /etc/matchbox/
$ sudo cp ca.crt server.crt server.key /etc/matchbox
```
Save `client.crt`, `client.key`, and `ca.crt` to use with a client tool later.
Save `client.crt`, `client.key`, and `ca.crt` for later use (e.g. `~/.matchbox`).
```sh
$ mkdir -p ~/.matchbox
$ cp client.crt client.key ca.crt ~/.matchbox/
```
## Start matchbox
@@ -182,7 +197,7 @@ matchbox
If you enabled the gRPC API,
```sh
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert examples/etc/matchbox/client.crt -key examples/etc/matchbox/client.key
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert scripts/tls/client.crt -key scripts/tls/client.key
CONNECTED(00000003)
depth=1 CN = fake-ca
verify return:1
@@ -196,14 +211,14 @@ Certificate chain
....
```
## Download CoreOS (optional)
## Download Container Linux (optional)
`matchbox` can serve CoreOS images in development or lab environments to reduce bandwidth usage and increase the speed of CoreOS PXE boots and installs to disk.
`matchbox` can serve Container Linux images in development or lab environments to reduce bandwidth usage and increase the speed of Container Linux PXE boots and installs to disk.
Download a recent CoreOS [release](https://coreos.com/releases/) with signatures.
Download a recent Container Linux [release](https://coreos.com/releases/) with signatures.
```sh
$ ./scripts/get-coreos stable 1298.7.0 . # note the "." 3rd argument
$ ./scripts/get-coreos stable 1576.5.0 . # note the "." 3rd argument
```
Move the images to `/var/lib/matchbox/assets`,
@@ -215,7 +230,7 @@ $ sudo cp -r coreos /var/lib/matchbox/assets
```
/var/lib/matchbox/assets/
├── coreos
│   └── 1298.7.0
│   └── 1576.5.0
│   ├── CoreOS_Image_Signing_Key.asc
│   ├── coreos_production_image.bin.bz2
│   ├── coreos_production_image.bin.bz2.sig
@@ -228,11 +243,11 @@ $ sudo cp -r coreos /var/lib/matchbox/assets
and verify the images are acessible.
```sh
$ curl http://matchbox.example.com:8080/assets/coreos/1298.7.0/
$ curl http://matchbox.example.com:8080/assets/coreos/1576.5.0/
<pre>...
```
For large production environments, use a cache proxy or mirror suitable for your environment to serve CoreOS images. See [contrib/squid](../contrib/squid/README.md) for details.
For large production environments, use a cache proxy or mirror suitable for your environment to serve Container Linux images. See [contrib/squid](../contrib/squid/README.md) for details.
## Network
@@ -292,7 +307,8 @@ Create an Ingress resource to expose the HTTP read-only and gRPC API endpoints.
$ kubectl create -f contrib/k8s/matchbox-ingress.yaml
$ kubectl get ingress
NAME HOSTS ADDRESS PORTS AGE
matchbox matchbox.example.com,matchbox-rpc.example.com 10.128.0.3,10... 80, 443 32m
matchbox matchbox.example.com 10.128.0.3,10... 80 29m
matchbox-rpc matchbox-rpc.example.com 10.128.0.3,10... 80, 443 29m
```
Add DNS records `matchbox.example.com` and `matchbox-rpc.example.com` to route traffic to the Ingress Controller.
@@ -304,6 +320,16 @@ $ curl http://matchbox.example.com
$ openssl s_client -connect matchbox-rpc.example.com:443 -CAfile ca.crt -cert client.crt -key client.key
```
# HTTPS - The read-only Matchbox API is also available with HTTPS
To start matchbox in this mode you will need the following flags set:
| Name | Type | Description |
|----------------|--------|---------------------------------------------------------------|
| -web-ssl | bool | true/false |
| -web-cert-file | string | Path to the server TLS certificate file |
| -web-key-file | string | Path to the server TLS key file |
### Operational notes
* Secrets: Matchbox **can** be run as a public facing service. However, you **must** follow best practices and avoid writing secret material into machine user-data. Instead, load secret materials from an internal secret store.

View File

@@ -8,7 +8,7 @@ This guide covers releasing new versions of matchbox.
Create a release commit which updates old version references.
```sh
$ export VERSION=v0.6.0
$ export VERSION=v0.7.1
```
## Tag
@@ -45,7 +45,7 @@ $ make release
Verify the reported version.
```
./_output/matchbox-v0.6.0-linux-amd64/matchbox -version
./_output/matchbox-v0.7.1-linux-amd64/matchbox -version
```
## Signing
@@ -54,10 +54,10 @@ Sign the release tarballs and ACI with a [CoreOS App Signing Key](https://coreos
```sh
cd _output
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-amd64.tar.gz
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-darwin-amd64.tar.gz
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-arm.tar.gz
gpg2 --armor --local-user FC8A365E! --detach-sign matchbox-$VERSION-linux-arm64.tar.gz
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-amd64.tar.gz
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-darwin-amd64.tar.gz
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-arm.tar.gz
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-arm64.tar.gz
```
Verify the signatures.

View File

@@ -1,7 +1,6 @@
# Getting started with Docker
In this tutorial, we'll run `matchbox` on your Linux machine with Docker to network boot and provision a cluster of QEMU/KVM CoreOS machines locally. You'll be able to create Kubernetes clusters, etcd3 clusters, and test network setups.
In this tutorial, we'll run `matchbox` on your Linux machine with Docker to network boot and provision a cluster of QEMU/KVM Container Linux machines locally. You'll be able to create Kubernetes clusters, etcd3 clusters, and test network setups.
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
@@ -26,13 +25,13 @@ $ git clone https://github.com/coreos/matchbox.git
$ cd matchbox
```
Download CoreOS image assets referenced by the `etcd-docker` [example](../examples) to `examples/assets`.
Download CoreOS Container Linux image assets referenced by the `etcd3` [example](../examples) to `examples/assets`.
```sh
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
```
For development convenience, add `/etc/hosts` entries for nodes so they may be referenced by name as you would in production.
For development convenience, add `/etc/hosts` entries for nodes so they may be referenced by name.
```sh
# /etc/hosts
@@ -44,11 +43,18 @@ For development convenience, add `/etc/hosts` entries for nodes so they may be r
## Containers
Run the latest `matchbox` Docker image from `quay.io/coreos/matchbox` with the `etcd-docker` example. The container should receive the IP address 172.17.0.2 on the `docker0` bridge.
Run the `matchbox` and `dnsmasq` services on the `docker0` bridge. `dnsmasq` will run DHCP, DNS and TFTP services to create a suitable network boot environment. `matchbox` will serve configs to machines as they PXE boot.
The `devnet` convenience script can start these services and accepts the name of any example cluster in [examples](../examples).
```sh
$ sudo docker pull quay.io/coreos/matchbox:latest
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd3:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
$ sudo ./scripts/devnet create etcd3
```
Inspect the logs.
```
$ sudo ./scripts/devnet status
```
Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of how machines are mapped to Profiles. Explore some endpoints exposed by the service, say for QEMU/KVM node1.
@@ -57,28 +63,28 @@ Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of ho
* Ignition [http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae)
* Metadata [http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae)
## Network
### Manual
Since the virtual network has no network boot services, use the `dnsmasq` image to create an iPXE network boot environment which runs DHCP, DNS, and TFTP.
If you prefer to start the containers yourself, instead of using `devnet`,
```sh
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd3:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
$ sudo docker run --name dnsmasq --cap-add=NET_ADMIN -v $PWD/contrib/dnsmasq/docker0.conf:/etc/dnsmasq.conf:Z quay.io/coreos/dnsmasq -d
```
In this case, dnsmasq runs a DHCP server allocating IPs to VMs between 172.17.0.43 and 172.17.0.99, resolves `matchbox.foo` to 172.17.0.2 (the IP where `matchbox` runs), and points iPXE clients to `http://matchbox.foo:8080/boot.ipxe`.
## Client VMs
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `docker0` bridge, where Docker's containers run.
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `docker0` bridge, where Docker containers run.
```sh
$ sudo ./scripts/libvirt create-docker
$ sudo ./scripts/libvirt create
```
You can connect to the serial console of any node. If you provisioned nodes with an SSH key, you can SSH after bring-up.
You can connect to the serial console of any node (ctrl+] to exit). If you provisioned nodes with an SSH key, you can SSH after bring-up.
```sh
$ sudo virsh console node1
$ ssh core@node1.example.com
```
You can also use `virt-manager` to watch the console.
@@ -101,7 +107,6 @@ The example profile added autologin so you can verify that etcd3 works between n
```sh
$ systemctl status etcd-member
$ ETCDCTL_API=3
$ etcdctl set /message hello
$ etcdctl get /message
```
@@ -110,8 +115,7 @@ $ etcdctl get /message
Clean up the containers and VM machines.
```sh
$ sudo docker rm -f dnsmasq
$ sudo ./scripts/libvirt poweroff
$ sudo ./scripts/devnet destroy
$ sudo ./scripts/libvirt destroy
```

View File

@@ -1,6 +1,6 @@
# Getting started with rkt
In this tutorial, we'll run `matchbox` on your Linux machine with `rkt` and `CNI` to network boot and provision a cluster of QEMU/KVM CoreOS machines locally. You'll be able to create Kubernetes clustes, etcd3 clusters, and test network setups.
In this tutorial, we'll run `matchbox` on your Linux machine with `rkt` and `CNI` to network boot and provision a cluster of QEMU/KVM Container Linux machines locally. You'll be able to create Kubernetes clustes, etcd3 clusters, and test network setups.
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
@@ -27,10 +27,10 @@ $ git clone https://github.com/coreos/matchbox.git
$ cd matchbox
```
Download CoreOS image assets referenced by the `etcd` [example](../examples) to `examples/assets`.
Download CoreOS Container Linux image assets referenced by the `etcd3` [example](../examples) to `examples/assets`.
```sh
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
```
## Network
@@ -74,18 +74,19 @@ For development convenience, you may wish to add `/etc/hosts` entries for nodes
## Containers
Run the `matchbox` and `dnsmasq` services on the `metal0` bridge. `dnsmasq` will run DHCP, DNS, and TFTP services to create a suitable network boot environment. `matchbox` will serve provisioning configs to machines on the network which attempt to PXE boot.
Run the `matchbox` and `dnsmasq` services on the `metal0` bridge. `dnsmasq` will run DHCP, DNS, and TFTP services to create a suitable network boot environment. `matchbox` will serve configs to machinesas they PXE boot.
The `devnet` wrapper script rkt runs `matchbox` and `dnsmasq` in systemd transient units. Create can take the name of any example cluster in [examples](../examples).
The `devnet` convenience script can rkt run these services in systemd transient units and accepts the name of any example cluster in [examples](../examples).
```sh
$ sudo ./scripts/devnet create etcd3
$ export CONTAINER_RUNTIME=rkt
$ sudo -E ./scripts/devnet create etcd3
```
Inspect the journal logs or check the status of the systemd services.
Inspect the journal logs.
```
$ sudo ./scripts/devnet status
$ sudo -E ./scripts/devnet status
$ journalctl -f -u dev-matchbox
$ journalctl -f -u dev-dnsmasq
```
@@ -106,14 +107,14 @@ sudo rkt run --net=metal0:IP=172.18.0.2 \
--volume data,kind=host,source=$PWD/examples \
--mount volume=groups,target=/var/lib/matchbox/groups \
--volume groups,kind=host,source=$PWD/examples/groups/etcd3 \
quay.io/coreos/matchbox:v0.6.0 -- -address=0.0.0.0:8080 -log-level=debug
quay.io/coreos/matchbox:v0.7.1 -- -address=0.0.0.0:8080 -log-level=debug
```
```sh
sudo rkt run --net=metal0:IP=172.18.0.3 \
--dns=host \
--mount volume=config,target=/etc/dnsmasq.conf \
--volume config,kind=host,source=$PWD/contrib/dnsmasq/metal0.conf \
quay.io/coreos/dnsmasq:v0.4.0 \
quay.io/coreos/dnsmasq:v0.4.1 \
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW
```
@@ -128,13 +129,14 @@ $ sudo rkt gc --grace-period=0
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `metal0` bridge, where your pods run.
```sh
$ sudo ./scripts/libvirt create
$ sudo ./scripts/libvirt create-rkt
```
You can connect to the serial console of any node. If you provisioned nodes with an SSH key, you can SSH after bring-up.
You can connect to the serial console of any node (ctrl+] to exit). If you provisioned nodes with an SSH key, you can SSH after bring-up.
```sh
$ sudo virsh console node1
$ ssh core@node1.example.com
```
You can also use `virt-manager` to watch the console.
@@ -157,7 +159,6 @@ The example profile added autologin so you can verify that etcd3 works between n
```sh
$ systemctl status etcd-member
$ ETCDCTL_API=3
$ etcdctl set /message hello
$ etcdctl get /message
```
@@ -167,7 +168,7 @@ $ etcdctl get /message
Clean up the systemd units running `matchbox` and `dnsmasq`.
```sh
$ sudo ./scripts/devnet destroy
$ sudo -E ./scripts/devnet destroy
```
Clean up VM machines.

View File

@@ -8,7 +8,7 @@ You'll install the `matchbox` service, setup a PXE network boot environment, and
Install `matchbox` on a dedicated server or Kubernetes cluster. Generate TLS credentials and enable the gRPC API as directed. Save the `ca.crt`, `client.crt`, and `client.key` on your local machine (e.g. `~/.matchbox`).
* Installing on [CoreOS / Linux distros](deployment.md)
* Installing on [Container Linux / other distros](deployment.md)
* Installing on [Kubernetes](deployment.md#kubernetes)
* Running with [rkt](deployment.md#rkt) / [docker](deployment.md#docker)
@@ -34,7 +34,7 @@ Install [Terraform][terraform-dl] v0.9+ on your system.
```sh
$ terraform version
Terraform v0.9.2
Terraform v0.9.4
```
Add the `terraform-provider-matchbox` plugin binary on your system.
@@ -61,7 +61,7 @@ $ git clone https://github.com/coreos/matchbox.git
$ cd matchbox/examples/terraform
```
Let's start with the `simple-install` example. With `simple-install`, any machines which PXE boot from matchbox will install CoreOS to `dev/sda`, reboot, and have your SSH key set. Its not much of a cluster, but we'll get to that later.
Let's start with the `simple-install` example. With `simple-install`, any machines which PXE boot from matchbox will install Container Linux to `dev/sda`, reboot, and have your SSH key set. Its not much of a cluster, but we'll get to that later.
```sh
$ cd simple-install
@@ -122,9 +122,9 @@ resource "matchbox_profile" "coreos-install" {
#### Groups
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and template in machine-specific values. This group does not have a `selector` block, so any machines which network boot from matchbox will match this group and be provisioned using the `coreos-install` profile. Machines are matched to the most specific matching group.
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and templates in machine-specific values. This group does not have a `selector` block, so any machines which network boot from matchbox will match this group and be provisioned using the `coreos-install` profile. Machines are matched to the most specific matching group.
```
```hcl
resource "matchbox_group" "default" {
name = "default"
profile = "${matchbox_profile.coreos-install.name}"

View File

@@ -62,5 +62,5 @@ $ sudo docker run --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-r
Create a VM to verify the machine network boots.
```sh
$ sudo virt-install --name uefi-test --pxe --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
$ sudo virt-install --name uefi-test --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
```

View File

@@ -4,9 +4,9 @@
Physical machines [network boot](network-booting.md) in an network boot environment with DHCP/TFTP/DNS services or with [coreos/dnsmasq](../contrib/dnsmasq).
`matchbox` serves iPXE, GRUB, or Pixiecore boot configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS. The "first boot" Ignition config if fetched and CoreOS is installed.
`matchbox` serves iPXE or GRUB configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS Container Linux. The "first boot" Ignition config if fetched and Container Linux is installed.
CoreOS boots ("first boot" from disk) and runs Ignition to provision its disk with systemd units, files, keys, and more to become a cluster node. Systemd units may fetch metadata from a remote source if needed.
Container Linux boots ("first boot" from disk) and runs Ignition to provision its disk with systemd units, files, keys, and more to become a cluster node. Systemd units may fetch metadata from a remote source if needed.
Coordinated auto-updates are enabled. Systems like [fleet](https://coreos.com/docs/#fleet) or [Kubernetes](http://kubernetes.io/docs/) coordinate container services. IPMI, vendor utilities, or first-boot are used to re-provision machines into new roles.

View File

@@ -1,6 +1,6 @@
# matchbox
`matchbox` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create Container Linux clusters. `matchbox` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use CoreOS Linux's early-boot capabilities to provision CoreOS machines.
`matchbox` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create CoreOS Container Linux clusters. `matchbox` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use Container Linux's early-boot capabilities to provision Container Linux machines.
Network boot endpoints provide PXE, iPXE, GRUB support. `matchbox` can be deployed as a binary, as an [appc](https://github.com/appc/spec) container with rkt, or as a Docker container.
@@ -59,13 +59,13 @@ Profiles reference an Ignition config, Cloud-Config, and/or generic config by na
```json
{
"id": "etcd",
"name": "CoreOS with etcd2",
"name": "Container Linux with etcd2",
"cloud_id": "",
"ignition_id": "etcd.yaml",
"generic_id": "some-service.cfg",
"boot": {
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
@@ -75,7 +75,7 @@ Profiles reference an Ignition config, Cloud-Config, and/or generic config by na
}
```
The `"boot"` settings will be used to render configs to network boot programs such as iPXE, GRUB, or Pixiecore. You may reference remote kernel and initrd assets or [local assets](#assets).
The `"boot"` settings will be used to render configs to network boot programs such as iPXE or GRUB. You may reference remote kernel and initrd assets or [local assets](#assets).
To use Ignition, set the `coreos.config.url` kernel option to reference the `matchbox` [Ignition endpoint](api.md#ignition-config), which will render the `ignition_id` file. Be sure to add the `coreos.first_boot` option as well.
@@ -173,7 +173,7 @@ matchbox.foo/assets/
For example, a `Profile` might refer to a local asset `/assets/coreos/VERSION/coreos_production_pxe.vmlinuz` instead of `http://stable.release.core-os.net/amd64-usr/VERSION/coreos_production_pxe.vmlinuz`.
See the [get-coreos](../scripts/README.md#get-coreos) script to quickly download, verify, and place CoreOS assets.
See the [get-coreos](../scripts/README.md#get-coreos) script to quickly download, verify, and place Container Linux assets.
## Network

View File

@@ -15,7 +15,7 @@ The network environment can be set up in a number of ways, which we'll discuss.
### Network boot programs
Machines can be booted and configured with CoreOS using several network boot programs and approaches. Let's review them. If you're new to network booting or unsure which to choose, iPXE is a reasonable and flexible choice.
Machines can be booted and configured with CoreOS Container Linux using several network boot programs and approaches. Let's review them. If you're new to network booting or unsure which to choose, iPXE is a reasonable and flexible choice.
#### PXELINUX
@@ -26,7 +26,7 @@ $ mybootdir/pxelinux.cfg/b8945908-d6a6-41a9-611d-74a6ab80b83d
$ mybootdir/pxelinux.cfg/default
```
Here is an example PXE config file which boots a CoreOS image hosted on the TFTP server.
Here is an example PXE config file which boots a Container Linux image hosted on the TFTP server.
```
default coreos
@@ -53,7 +53,7 @@ This approach has a number of drawbacks. TFTP can be slow, managing config files
A DHCPOFFER to iPXE client firmware specifies an HTTP boot script such as `http://matchbox.foo/boot.ipxe`.
Here is an example iPXE script for booting the remote CoreOS stable image.
Here is an example iPXE script for booting the remote Container Linux stable image.
```
#!ipxe

View File

@@ -1,8 +1,8 @@
# Network setup
This guide shows how to create a DHCP/TFTP/DNS network boot environment to work with `matchbox` to boot and provision PXE, iPXE, or GRUB2 client machines.
This guide shows how to create a DHCP/TFTP/DNS network boot environment to boot and provision BIOS/PXE, iPXE, or UEFI client machines.
`matchbox` serves iPXE scripts or GRUB configs over HTTP to serve as the entrypoint for CoreOS cluster bring-up. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, you can configure your own network services to point to `matchbox` or use the convenient [coreos/dnsmasq](../contrib/dnsmasq) container image (used in libvirt demos).
Matchbox serves iPXE scripts over HTTP to serve as the entrypoint for provisioning clusters. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, configure your network environment to point to Matchbox or use the convenient [coreos/dnsmasq](../contrib/dnsmasq) container image (used in local QEMU/KVM setup).
*Note*: These are just suggestions. Your network administrator or system administrator should choose the right network setup for your company.
@@ -13,13 +13,14 @@ Client hardware must have a network interface which supports PXE or iPXE.
## Goals
* Add a DNS name which resolves to a `matchbox` deploy.
* Chainload PXE firmware to iPXE or GRUB2
* Point iPXE clients to `http://matchbox.foo:port/boot.ipxe`
* Point GRUB clients to `http://matchbox.foo:port/grub`
* Chainload BIOS clients (legacy PXE) to iPXE (undionly.kpxe)
* Chainload UEFI clients to iPXE (ipxe.efi)
* Point iPXE clients to `http://matchbox.example.com:port/boot.ipxe`
* Point GRUB clients to `http://matchbox.example.com:port/grub`
## Setup
Many companies already have DHCP/TFTP configured to "PXE-boot" PXE/iPXE clients. In this case, machines (or a subset of machines) can be made to chainload from `chain http://matchbox.foo:port/boot.ipxe`. Older PXE clients can be made to chainload into iPXE or GRUB to be able to fetch subsequent configs via HTTP.
Many companies already have DHCP/TFTP configured to "PXE-boot" PXE/iPXE clients. In this case, machines (or a subset of machines) can be made to chainload from `chain http://matchbox.example.com:port/boot.ipxe`. Older PXE clients can be made to chainload into iPXE to be able to fetch subsequent configs via HTTP.
On simpler networks, such as what a developer might have at home, a relatively inflexible DHCP server may be in place, with no TFTP server. In this case, a proxy DHCP server can be run alongside a non-PXE capable DHCP server.
@@ -31,17 +32,17 @@ The setup of DHCP, TFTP, and DNS services on a network varies greatly. If you wi
## DNS
Add a DNS entry (e.g. `matchbox.foo`, `provisoner.mycompany-internal`) that resolves to a deployment of the CoreOS `matchbox` service from machines you intend to boot and provision.
Add a DNS entry (e.g. `matchbox.example.com`, `provisoner.mycompany-internal`) that resolves to a deployment of the CoreOS `matchbox` service from machines you intend to boot and provision.
```sh
$ dig matchbox.foo
$ dig matchbox.example.com
```
If you deployed `matchbox` to a known IP address (e.g. dedicated host, load balanced endpoint, Kubernetes NodePort) and use `dnsmasq`, a domain name to IPv4/IPv6 address mapping could be added to the `/etc/dnsmasq.conf`.
```
# dnsmasq.conf
address=/matchbox.foo/172.18.0.2
address=/matchbox.example.com/172.18.0.2
```
## iPXE
@@ -50,7 +51,7 @@ Networks which already run DHCP and TFTP services to network boot PXE/iPXE clien
```
# /var/www/html/ipxe/default.ipxe
chain http://matchbox.foo:8080/boot.ipxe
chain http://matchbox.example.com:8080/boot.ipxe
```
You can chainload from a menu entry or use other [iPXE commands](http://ipxe.org/cmd) if you need to do more than simple delegation.
@@ -67,26 +68,35 @@ dhcp-range=192.168.1.1,192.168.1.254,30m
enable-tftp
tftp-root=/var/lib/tftpboot
# if request comes from older PXE ROM, chainload to iPXE (via TFTP)
dhcp-boot=tag:!ipxe,undionly.kpxe
# if request comes from iPXE user class, set tag "ipxe"
# Legacy PXE
dhcp-match=set:bios,option:client-arch,0
dhcp-boot=tag:bios,undionly.kpxe
# UEFI
dhcp-match=set:efi32,option:client-arch,6
dhcp-boot=tag:efi32,ipxe.efi
dhcp-match=set:efibc,option:client-arch,7
dhcp-boot=tag:efibc,ipxe.efi
dhcp-match=set:efi64,option:client-arch,9
dhcp-boot=tag:efi64,ipxe.efi
# iPXE - chainload to matchbox ipxe boot script
dhcp-userclass=set:ipxe,iPXE
# point ipxe tagged requests to the matchbox iPXE boot script (via HTTP)
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
# verbose
log-queries
log-dhcp
# static DNS assignements
address=/matchbox.foo/192.168.1.100
address=/matchbox.example.com/192.168.1.100
# (optional) disable DNS and specify alternate
# port=0
# dhcp-option=6,192.168.1.100
```
Add [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) (and undionly.kpxe.0 if using dnsmasq) to your tftp-root (e.g. `/var/lib/tftpboot`).
Add [ipxe.efi](http://boot.ipxe.org/ipxe.efi) and [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) to your tftp-root (e.g. `/var/lib/tftpboot`).
```sh
$ sudo systemctl start dnsmasq
@@ -113,7 +123,7 @@ pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe
# if request comes from iPXE user class, set tag "ipxe"
dhcp-userclass=set:ipxe,iPXE
# point ipxe tagged requests to the matchbox iPXE boot script (via HTTP)
pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.foo:8080/boot.ipxe
pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe
# verbose
log-queries
@@ -141,14 +151,14 @@ timeout 10
default iPXE
LABEL iPXE
KERNEL ipxe.lkrn
APPEND dhcp && chain http://matchbox.foo:8080/boot.ipxe
APPEND dhcp && chain http://matchbox.example.com:8080/boot.ipxe
```
Add ipxe.lkrn to `/var/lib/tftpboot` (see [iPXE docs](http://ipxe.org/embed)).
## coreos/dnsmasq
The [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image can run DHCP, TFTP, and DNS services via rkt or docker. The image bundles `undionly.kpxe` and `grub.efi` for convenience. See [contrib/dnsmasq](contrib/dnsmasq) for details.
The [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image can run DHCP, TFTP, and DNS services via rkt or docker. The image bundles `ipxe.efi`, `undionly.kpxe`, and `grub.efi` for convenience. See [contrib/dnsmasq](../contrib/dnsmasq) for details.
Run DHCP, TFTP, and DNS on the host's network:
@@ -159,9 +169,16 @@ sudo rkt run --net=host quay.io/coreos/dnsmasq \
--dhcp-range=192.168.1.3,192.168.1.254 \
--enable-tftp \
--tftp-root=/var/lib/tftpboot \
--dhcp-match=set:bios,option:client-arch,0 \
--dhcp-boot=tag:bios,undionly.kpxe \
--dhcp-match=set:efi32,option:client-arch,6 \
--dhcp-boot=tag:efi32,ipxe.efi \
--dhcp-match=set:efibc,option:client-arch,7 \
--dhcp-boot=tag:efibc,ipxe.efi \
--dhcp-match=set:efi64,option:client-arch,9 \
--dhcp-boot=tag:efi64,ipxe.efi \
--dhcp-userclass=set:ipxe,iPXE \
--dhcp-boot=tag:#ipxe,undionly.kpxe \
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
--address=/matchbox.example.com/192.168.1.2 \
--log-queries \
--log-dhcp
@@ -171,10 +188,17 @@ sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
-d -q \
--dhcp-range=192.168.1.3,192.168.1.254 \
--enable-tftp --tftp-root=/var/lib/tftpboot \
--dhcp-match=set:bios,option:client-arch,0 \
--dhcp-boot=tag:bios,undionly.kpxe \
--dhcp-match=set:efi32,option:client-arch,6 \
--dhcp-boot=tag:efi32,ipxe.efi \
--dhcp-match=set:efibc,option:client-arch,7 \
--dhcp-boot=tag:efibc,ipxe.efi \
--dhcp-match=set:efi64,option:client-arch,9 \
--dhcp-boot=tag:efi64,ipxe.efi \
--dhcp-userclass=set:ipxe,iPXE \
--dhcp-boot=tag:#ipxe,undionly.kpxe \
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
--address=/matchbox.example/192.168.1.2 \
--address=/matchbox.example.com/192.168.1.2 \
--log-queries \
--log-dhcp
```
@@ -211,20 +235,19 @@ Be sure to allow enabled services in your firewall configuration.
$ sudo firewall-cmd --add-service=dhcp --add-service=tftp --add-service=dns
```
## GRUB
## UEFI
Grub can be used to delegate as well.
### Development
`grub-mknetdir --net-directory=/var/lib/tftpboot`
Install the dependencies for [QEMU with UEFI](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU). Walk through the [getting-started-with-docker](getting-started-with-docker.md) tutorial. Launch client VMs using `create-uefi`.
/var/lib/tftpboot/boot/grub/grub.cfg:
```ini
insmod i386-pc/http.mod
set root=http,matchbox.foo:8080
configfile /grub
Create UEFI QEMU/KVM VMs attached to the `docker0` bridge.
```sh
$ sudo ./scripts/libvirt create-uefi
```
Make sure to replace variables in the example config files; instead of iPXE variables, use GRUB variables. Check the [GRUB2 manual](https://www.gnu.org/software/grub/manual/grub.html#Network).
UEFI clients should chainload `ipxe.efi`, load iPXE and Ignition configs from Matchbox, and Container Linux should boot as usual.
## Troubleshooting

View File

@@ -1,87 +0,0 @@
# Kubernetes (with rkt)
The `rktnetes` example provisions a 3 node Kubernetes v1.5.5 cluster with [rkt](https://github.com/coreos/rkt) as the container runtime. The cluster has one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs).
## Requirements
Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md) or [matchbox with docker](getting-started-docker.md) guide and understand the basics. In particular, you should be able to:
* Use rkt or Docker to start `matchbox`
* Create a network boot environment with `coreos/dnsmasq`
* Create the example libvirt client VMs
* `/etc/hosts` entries for `node[1-3].example.com` (or pass custom names to `k8s-certgen`)
## Examples
The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. VMs are setup on the `metal0` CNI bridge for rkt or the `docker0` bridge for Docker. The examples can be used for physical machines if you update the MAC addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
* [rktnetes](../examples/groups/rktnetes) - iPXE boot a Kubernetes cluster
* [rktnetes-install](../examples/groups/rktnetes-install) - Install a Kubernetes cluster to disk
* [Lab examples](https://github.com/dghubble/metal) - Lab hardware examples
## Assets
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
```sh
$ ./scripts/get-coreos stable 1298.7.0 ./examples/assets
```
Optionally, add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
Generate a root CA and Kubernetes TLS assets for components (`admin`, `apiserver`, `worker`) with SANs for `node1.example.com`, etc.
```sh
$ rm -rf examples/assets/tls
$ ./scripts/tls/k8s-certgen
```
**Note**: TLS assets are served to any machines which request them, which requires a trusted network. Alternately, provisioning may be tweaked to require TLS assets be securely copied to each host.
## Containers
Use rkt or docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with rkt](getting-started-rkt.md) or [matchbox with Docker](getting-started-docker.md) for help.
Client machines should boot and provision themselves. Local client VMs should network boot CoreOS in about a 1 minute and the Kubernetes API should be available after 3-4 minutes (each node downloads a ~160MB Hyperkube). If you chose `rktnetes-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision Kubernetes clusters on physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
## Verify
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster created on rkt `metal0` or `docker0`.
```sh
$ KUBECONFIG=examples/assets/tls/kubeconfig
$ kubectl get nodes
NAME STATUS AGE
node1.example.com Ready 3m
node2.example.com Ready 3m
node3.example.com Ready 3m
```
Get all pods.
```sh
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system heapster-v1.2.0-4088228293-k3yn8 2/2 Running 0 3m
kube-system kube-apiserver-node1.example.com 1/1 Running 0 4m
kube-system kube-controller-manager-node1.example.com 1/1 Running 0 3m
kube-system kube-dns-v19-l2u8r 3/3 Running 0 4m
kube-system kube-proxy-node1.example.com 1/1 Running 0 3m
kube-system kube-proxy-node2.example.com 1/1 Running 0 3m
kube-system kube-proxy-node3.example.com 1/1 Running 0 3m
kube-system kube-scheduler-node1.example.com 1/1 Running 0 3m
kube-system kubernetes-dashboard-v1.4.1-0iy07 1/1 Running 0 4m
```
## Kubernetes Dashboard
Access the Kubernetes Dashboard with `kubeconfig` credentials by port forwarding to the dashboard pod.
```sh
$ kubectl port-forward kubernetes-dashboard-v1.4.1-SOME-ID 9090 -n=kube-system
Forwarding from 127.0.0.1:9090 -> 9090
```
Then visit [http://127.0.0.1:9090](http://127.0.0.1:9090/).
<img src='img/kubernetes-dashboard.png' class="img-center" alt="Kubernetes Dashboard"/>

107
Jenkinsfile vendored
View File

@@ -1,46 +1,63 @@
properties([
[$class: 'BuildDiscarderProperty', strategy: [$class: 'LogRotator', numToKeepStr: '20']],
[$class: 'GithubProjectProperty', projectUrlStr: 'https://github.com/coreos/matchbox'],
[$class: 'PipelineTriggersJobProperty', triggers: [
[$class: 'GitHubPushTrigger'],
]]
])
parallel (
etcd3: {
node('fedora && bare-metal') {
stage('etcd3') {
timeout(time:5, unit:'MINUTES') {
checkout scm
sh '''#!/bin/bash -e
export ASSETS_DIR=~/assets; ./tests/smoke/etcd3
'''
}
}
pipeline {
agent none
options {
timeout(time:45, unit:'MINUTES')
buildDiscarder(logRotator(numToKeepStr:'20'))
}
stages {
stage('Cluster Tests') {
steps {
parallel (
etcd3: {
node('fedora && bare-metal') {
timeout(time:5, unit:'MINUTES') {
checkout scm
sh '''#!/bin/bash -e
export ASSETS_DIR=~/assets; ./tests/smoke/etcd3
'''
deleteDir()
}
}
},
bootkube: {
node('fedora && bare-metal') {
timeout(time:60, unit:'MINUTES') {
checkout scm
sh '''#!/bin/bash -e
chmod 600 ./tests/smoke/fake_rsa
export ASSETS_DIR=~/assets; ./tests/smoke/bootkube
'''
deleteDir()
}
}
},
"etcd3-terraform": {
node('fedora && bare-metal') {
timeout(time:10, unit:'MINUTES') {
checkout scm
sh '''#!/bin/bash -e
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/etcd3-terraform
'''
deleteDir()
}
}
},
"bootkube-terraform": {
node('fedora && bare-metal') {
timeout(time:60, unit:'MINUTES') {
checkout scm
sh '''#!/bin/bash -e
chmod 600 ./tests/smoke/fake_rsa
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/bootkube-terraform
'''
deleteDir()
}
}
},
)
}
}
},
bootkube: {
node('fedora && bare-metal') {
stage('bootkube') {
timeout(time:12, unit:'MINUTES') {
checkout scm
sh '''#!/bin/bash -e
chmod 600 ./tests/smoke/fake_rsa
export ASSETS_DIR=~/assets; ./tests/smoke/bootkube
'''
}
}
}
},
"etcd3-terraform": {
node('fedora && bare-metal') {
stage('etcd3-terraform') {
timeout(time:10, unit:'MINUTES') {
checkout scm
sh '''#!/bin/bash -e
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/etcd3-terraform
'''
}
}
}
},
)
}
}

View File

@@ -1,6 +1,6 @@
export CGO_ENABLED:=0
VERSION=$(shell ./scripts/git-version)
VERSION=$(shell ./scripts/dev/git-version)
LD_FLAGS="-w -X github.com/coreos/matchbox/matchbox/version.Version=$(VERSION)"
REPO=github.com/coreos/matchbox
@@ -15,11 +15,11 @@ bin/%:
@go build -o bin/$* -v -ldflags $(LD_FLAGS) $(REPO)/cmd/$*
test:
@./scripts/test
@./scripts/dev/test
.PHONY: aci
aci: clean build
@sudo ./scripts/build-aci
@sudo ./scripts/dev/build-aci
.PHONY: docker-image
docker-image:
@@ -40,13 +40,13 @@ vendor:
.PHONY: codegen
codegen: tools
@./scripts/codegen
@./scripts/dev/codegen
.PHONY: tools
tools: bin/protoc bin/protoc-gen-go
bin/protoc:
@./scripts/get-protoc
@./scripts/dev/get-protoc
bin/protoc-gen-go:
@go build -o bin/protoc-gen-go $(REPO)/vendor/github.com/golang/protobuf/protoc-gen-go
@@ -78,7 +78,7 @@ _output/matchbox-%.tar.gz: DEST=_output/$(NAME)
_output/matchbox-%.tar.gz: bin/%/matchbox
mkdir -p $(DEST)
cp bin/$*/matchbox $(DEST)
./scripts/release-files $(DEST)
./scripts/dev/release-files $(DEST)
tar zcvf $(DEST).tar.gz -C _output $(NAME)
.PHONY: all build clean test release

View File

@@ -1,16 +1,14 @@
# matchbox [![Build Status](https://travis-ci.org/coreos/matchbox.svg?branch=master)](https://travis-ci.org/coreos/matchbox) [![GoDoc](https://godoc.org/github.com/coreos/matchbox?status.png)](https://godoc.org/github.com/coreos/matchbox) [![Docker Repository on Quay](https://quay.io/repository/coreos/matchbox/status "Docker Repository on Quay")](https://quay.io/repository/coreos/matchbox) [![IRC](https://img.shields.io/badge/irc-%23coreos-449FD8.svg)](https://botbot.me/freenode/coreos)
# matchbox [![Build Status](https://travis-ci.org/coreos/matchbox.svg?branch=master)](https://travis-ci.org/coreos/matchbox) [![GoDoc](https://godoc.org/github.com/coreos/matchbox?status.svg)](https://godoc.org/github.com/coreos/matchbox) [![Docker Repository on Quay](https://quay.io/repository/coreos/matchbox/status "Docker Repository on Quay")](https://quay.io/repository/coreos/matchbox) [![IRC](https://img.shields.io/badge/irc-%23coreos-449FD8.svg)](https://botbot.me/freenode/coreos)
**Announcement**: Matchbox [v0.6.0](https://github.com/coreos/matchbox/releases) is released with a new [Matchbox Terraform Provider][terraform] and [tutorial](Documentation/getting-started.md).
`matchbox` is a service that matches bare-metal machines (based on labels like MAC, UUID, etc.) to profiles to PXE boot and provision Container Linux clusters. Profiles specify the kernel/initrd, kernel arguments, iPXE config, GRUB config, [Container Linux Config][cl-config], [Cloud-Config][cloud-config], or other configs a machine should use. Matchbox can be [installed](Documentation/deployment.md) as a binary, RPM, container image, or deployed on a Kubernetes cluster and it provides an authenticated gRPC API for clients like [terraform][terraform].
`matchbox` is a service that matches bare-metal machines (based on labels like MAC, UUID, etc.) to profiles that PXE boot and provision Container Linux clusters. Profiles specify the kernel/initrd, kernel arguments, iPXE config, GRUB config, [Container Linux Config][cl-config], or other configs a machine should use. Matchbox can be [installed](Documentation/deployment.md) as a binary, RPM, container image, or deployed on a Kubernetes cluster and it provides an authenticated gRPC API for clients like [Terraform][terraform].
* [Documentation][docs]
* [matchbox Service](Documentation/matchbox.md)
* [Profiles](Documentation/matchbox.md#profiles)
* [Groups](Documentation/matchbox.md#groups)
* Config Templates
* [Container Linux Config][cl-config]
* [Cloud-Config][cloud-config]
* [Container Linux Config][cl-config]
* [Cloud-Config][cloud-config]
* [Configuration](Documentation/config.md)
* [HTTP API](Documentation/api.md) / [gRPC API](https://godoc.org/github.com/coreos/matchbox/matchbox/client)
* [Background: Machine Lifecycle](Documentation/machine-lifecycle.md)
@@ -19,51 +17,34 @@
### Installation
* Installation
* Installing on [CoreOS / Linux distros](Documentation/deployment.md)
* Installing on [Kubernetes](Documentation/deployment.md#kubernetes)
* Running with [rkt](Documentation/deployment.md#rkt) / [docker](Documentation/deployment.md#docker)
* Installing on [Container Linux / other distros](Documentation/deployment.md)
* Installing on [Kubernetes](Documentation/deployment.md#kubernetes)
* Running with [rkt](Documentation/deployment.md#rkt) / [docker](Documentation/deployment.md#docker)
* [Network Setup](Documentation/network-setup.md)
### Tutorials
* [Getting Started](Documentation/getting-started.md)
* [Getting Started](Documentation/getting-started.md) - provision physical machines with Container Linux
* Local QEMU/KVM
* [matchbox with Docker](Documentation/getting-started-docker.md)
* [matchbox with rkt](Documentation/getting-started-rkt.md)
* Clusters
* [etcd3](Documentation/getting-started-rkt.md) - Install a 3-node etcd3 cluster
* [Kubernetes](Documentation/bootkube.md) - Install a 3-node Kubernetes v1.8.5 cluster
* Clusters (Terraform-based)
* [etcd3](examples/terraform/etcd3-install/README.md) - Install a 3-node etcd3 cluster
* [Kubernetes](examples/terraform/bootkube-install/README.md) - Install a 3-node Kubernetes v1.10.3 cluster
Local QEMU/KVM
### Projects
* [matchbox with rkt](Documentation/getting-started-rkt.md)
* [matchbox with Docker](Documentation/getting-started-docker.md)
### Example Clusters
Create [example](examples) clusters on-premise or locally with [QEMU/KVM](scripts/README.md#libvirt).
**Terraform-based**
* [simple-install](Documentation/getting-started.md) - Install Container Linux with an SSH key on all machines (beginner)
* [etcd3](examples/terraform/etcd3-install/README.md) - Install a 3-node etcd3 cluster
* [Kubernetes](examples/terraform/bootkube-install/README.md) - Install a 3-node self-hosted Kubernetes v1.6.4 cluster
* Terraform [Modules](examples/terraform/modules) - Re-usable Terraform Modules
**Manual**
* [etcd3](Documentation/getting-started-rkt.md) - Install a 3-node etcd3 cluster
* [Kubernetes](Documentation/bootkube.md) - Install a 3-node self-hosted Kubernetes v1.6.4 cluster
* [Tectonic](https://coreos.com/tectonic/docs/latest/index.html) - enterprise-ready Kubernetes
* [Typhoon](https://typhoon.psdn.io/) - minimal and free Kubernetes
## Contrib
* [dnsmasq](contrib/dnsmasq/README.md) - Run DHCP, TFTP, and DNS services with docker or rkt
* [squid](contrib/squid/README.md) - Run a transparent cache proxy
* [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) - Terraform plugin which supports "matchbox" provider
## Enterprise
[Tectonic](https://coreos.com/tectonic/) is the enterprise-ready Kubernetes offering from CoreOS (free for 10 nodes!). The [Tectonic Installer](https://coreos.com/tectonic/docs/latest/install/bare-metal/#4-tectonic-installer) app integrates directly with `matchbox` through its gRPC API to provide a rich graphical client for populating `matchbox` with machine configs.
Learn more from our [docs](https://coreos.com/tectonic/docs/latest/) or [blog](https://coreos.com/blog/announcing-tectonic-1.6).
![Tectonic Installer](Documentation/img/tectonic-installer.png)
![Tectonic Console](Documentation/img/tectonic-console.png)
* [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) - Terraform provider plugin for Matchbox
[docs]: https://coreos.com/matchbox/docs/latest
[terraform]: https://github.com/coreos/terraform-provider-matchbox

View File

@@ -8,8 +8,6 @@ import (
"os"
"github.com/Sirupsen/logrus"
"github.com/coreos/pkg/flagutil"
web "github.com/coreos/matchbox/matchbox/http"
"github.com/coreos/matchbox/matchbox/rpc"
"github.com/coreos/matchbox/matchbox/server"
@@ -17,6 +15,7 @@ import (
"github.com/coreos/matchbox/matchbox/storage"
"github.com/coreos/matchbox/matchbox/tlsutil"
"github.com/coreos/matchbox/matchbox/version"
"github.com/coreos/pkg/flagutil"
)
var (
@@ -26,17 +25,20 @@ var (
func main() {
flags := struct {
address string
rpcAddress string
dataPath string
assetsPath string
logLevel string
certFile string
keyFile string
caFile string
keyRingPath string
version bool
help bool
address string
rpcAddress string
dataPath string
assetsPath string
logLevel string
grpcCAFile string
grpcCertFile string
grpcKeyFile string
tlsCertFile string
tlsKeyFile string
tlsEnabled bool
keyRingPath string
version bool
help bool
}{}
flag.StringVar(&flags.address, "address", "127.0.0.1:8080", "HTTP listen address")
flag.StringVar(&flags.rpcAddress, "rpc-address", "", "RPC listen address")
@@ -47,14 +49,20 @@ func main() {
flag.StringVar(&flags.logLevel, "log-level", "info", "Set the logging level")
// gRPC Server TLS
flag.StringVar(&flags.certFile, "cert-file", "/etc/matchbox/server.crt", "Path to the server TLS certificate file")
flag.StringVar(&flags.keyFile, "key-file", "/etc/matchbox/server.key", "Path to the server TLS key file")
// TLS Client Authentication
flag.StringVar(&flags.caFile, "ca-file", "/etc/matchbox/ca.crt", "Path to the CA verify and authenticate client certificates")
flag.StringVar(&flags.grpcCertFile, "cert-file", "/etc/matchbox/server.crt", "Path to the server TLS certificate file")
flag.StringVar(&flags.grpcKeyFile, "key-file", "/etc/matchbox/server.key", "Path to the server TLS key file")
// gRPC TLS Client Authentication
flag.StringVar(&flags.grpcCAFile, "ca-file", "/etc/matchbox/ca.crt", "Path to the CA verify and authenticate client certificates")
// Signing
flag.StringVar(&flags.keyRingPath, "key-ring-path", "", "Path to a private keyring file")
// SSL flags
flag.StringVar(&flags.tlsCertFile, "web-cert-file", "/etc/matchbox/ssl/server.crt", "Path to the server TLS certificate file")
flag.StringVar(&flags.tlsKeyFile, "web-key-file", "/etc/matchbox/ssl/server.key", "Path to the server TLS key file")
flag.BoolVar(&flags.tlsEnabled, "web-ssl", false, "True to enable HTTPS")
// subcommands
flag.BoolVar(&flags.version, "version", false, "print version and exit")
flag.BoolVar(&flags.help, "help", false, "print usage and exit")
@@ -87,16 +95,24 @@ func main() {
}
}
if flags.rpcAddress != "" {
if _, err := os.Stat(flags.certFile); err != nil {
if _, err := os.Stat(flags.grpcCertFile); err != nil {
log.Fatalf("Provide a valid TLS server certificate with -cert-file: %v", err)
}
if _, err := os.Stat(flags.keyFile); err != nil {
if _, err := os.Stat(flags.grpcKeyFile); err != nil {
log.Fatalf("Provide a valid TLS server key with -key-file: %v", err)
}
if _, err := os.Stat(flags.caFile); err != nil {
if _, err := os.Stat(flags.grpcCAFile); err != nil {
log.Fatalf("Provide a valid TLS certificate authority for authorizing client certificates: %v", err)
}
}
if flags.tlsEnabled {
if _, err := os.Stat(flags.tlsCertFile); err != nil {
log.Fatalf("Provide a valid SSL server certificate with -web-cert-file: %v", err)
}
if _, err := os.Stat(flags.tlsKeyFile); err != nil {
log.Fatalf("Provide a valid SSL server key with -web-key-file: %v", err)
}
}
// logging setup
lvl, err := logrus.ParseLevel(flags.logLevel)
@@ -130,17 +146,17 @@ func main() {
// gRPC Server (feature disabled by default)
if flags.rpcAddress != "" {
log.Infof("Starting matchbox gRPC server on %s", flags.rpcAddress)
log.Infof("Using TLS server certificate: %s", flags.certFile)
log.Infof("Using TLS server key: %s", flags.keyFile)
log.Infof("Using CA certificate: %s to authenticate client certificates", flags.caFile)
log.Infof("Using TLS server certificate: %s", flags.grpcCertFile)
log.Infof("Using TLS server key: %s", flags.grpcKeyFile)
log.Infof("Using CA certificate: %s to authenticate client certificates", flags.grpcCAFile)
lis, err := net.Listen("tcp", flags.rpcAddress)
if err != nil {
log.Fatalf("failed to start listening: %v", err)
}
tlsinfo := tlsutil.TLSInfo{
CertFile: flags.certFile,
KeyFile: flags.keyFile,
CAFile: flags.caFile,
CertFile: flags.grpcCertFile,
KeyFile: flags.grpcKeyFile,
CAFile: flags.grpcCAFile,
}
tlscfg, err := tlsinfo.ServerConfig()
if err != nil {
@@ -151,7 +167,6 @@ func main() {
defer grpcServer.Stop()
}
// HTTP Server
config := &web.Config{
Core: server,
Logger: log,
@@ -160,9 +175,23 @@ func main() {
ArmoredSigner: armoredSigner,
}
httpServer := web.NewServer(config)
log.Infof("Starting matchbox HTTP server on %s", flags.address)
err = http.ListenAndServe(flags.address, httpServer.HTTPHandler())
if err != nil {
log.Fatalf("failed to start listening: %v", err)
if flags.tlsEnabled {
// HTTPS Server
log.Infof("Starting matchbox HTTPS server on %s", flags.address)
log.Infof("Using SSL server certificate: %s", flags.tlsCertFile)
log.Infof("Using SSL server key: %s", flags.tlsKeyFile)
err = http.ListenAndServeTLS(flags.address, flags.tlsCertFile, flags.tlsKeyFile, httpServer.HTTPHandler())
if err != nil {
log.Fatalf("failed to start listening: %v", err)
}
} else {
// HTTP Server
log.Infof("Starting matchbox HTTP server on %s", flags.address)
err = http.ListenAndServe(flags.address, httpServer.HTTPHandler())
if err != nil {
log.Fatalf("failed to start listening: %v", err)
}
}
}

61
code-of-conduct.md Normal file
View File

@@ -0,0 +1,61 @@
## CoreOS Community Code of Conduct
### Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of
fostering an open and welcoming community, we pledge to respect all people who
contribute through reporting issues, posting feature requests, updating
documentation, submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free
experience for everyone, regardless of level of experience, gender, gender
identity and expression, sexual orientation, disability, personal appearance,
body size, race, ethnicity, age, religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing others' private information, such as physical or electronic addresses, without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
project maintainers commit themselves to fairly and consistently applying these
principles to every aspect of managing this project. Project maintainers who do
not follow or enforce the Code of Conduct may be permanently removed from the
project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting a project maintainer, Brandon Philips
<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 1.2.0, available at
http://contributor-covenant.org/version/1/2/0/
### CoreOS Events Code of Conduct
CoreOS events are working conferences intended for professional networking and
collaboration in the CoreOS community. Attendees are expected to behave
according to professional standards and in accordance with their employers
policies on appropriate workplace behavior.
While at CoreOS events or related social networking opportunities, attendees
should not engage in discriminatory or offensive speech or actions including
but not limited to gender, sexuality, race, age, disability, or religion.
Speakers should be especially aware of these concerns.
CoreOS does not condone any statements by speakers contrary to these standards.
CoreOS reserves the right to deny entrance and/or eject from an event (without
refund) any individual found to be engaging in discriminatory or offensive
speech or actions.
Please bring any concerns to the immediate attention of designated on-site
staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.

View File

@@ -2,6 +2,11 @@
Notable changes image releases. The dnsmasq project [upstream](http://www.thekelleys.org.uk/dnsmasq/doc.html) has its own [changelog](http://www.thekelleys.org.uk/dnsmasq/CHANGELOG).
## v0.4.1
* Rebuild with alpine:3.6 base image
* Add EXPOSE ports 67 and 69 to Dockerfile
## v0.4.0
* `dnsmasq` package version 2.76

View File

@@ -1,6 +1,6 @@
FROM alpine:3.5
FROM alpine:3.6
MAINTAINER Dalton Hubble <dalton.hubble@coreos.com>
RUN apk -U add dnsmasq curl
COPY tftpboot /var/lib/tftpboot
EXPOSE 53
ENTRYPOINT ["/usr/sbin/dnsmasq"]
EXPOSE 53 67 69
ENTRYPOINT ["/usr/sbin/dnsmasq"]

View File

@@ -1,4 +1,4 @@
VERSION=v0.4.0
VERSION=v0.5.0
IMAGE_REPO=coreos/dnsmasq
QUAY_REPO=quay.io/coreos/dnsmasq
@@ -6,12 +6,12 @@ QUAY_REPO=quay.io/coreos/dnsmasq
.PHONY: all
all: docker-image
.PHONY: undionly
undionly:
.PHONY: tftp
tftp:
@./get-tftp-files
.PHONY: docker-image
docker-image: undionly
docker-image: tftp
@sudo docker build --rm=true -t $(IMAGE_REPO):$(VERSION) .
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(IMAGE_REPO):latest

View File

@@ -2,7 +2,7 @@
`dnsmasq` provides a container image for running DHCP, proxy DHCP, DNS, and/or TFTP with [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html). Use it to test different network setups with clusters of network bootable machines.
The image bundles `undionly.kpxe` which chainloads PXE clients to iPXE and `grub.efi` (experimental) which chainloads UEFI architectures to GRUB2.
The image bundles `undionly.kpxe`, `ipxe.efi`, and `grub.efi` (experimental) for chainloading BIOS and UEFI clients to iPXE.
## Usage
@@ -15,8 +15,15 @@ sudo rkt run --net=host quay.io/coreos/dnsmasq \
--dhcp-range=192.168.1.3,192.168.1.254 \
--enable-tftp \
--tftp-root=/var/lib/tftpboot \
--dhcp-match=set:bios,option:client-arch,0 \
--dhcp-boot=tag:bios,undionly.kpxe \
--dhcp-match=set:efi32,option:client-arch,6 \
--dhcp-boot=tag:efi32,ipxe.efi \
--dhcp-match=set:efibc,option:client-arch,7 \
--dhcp-boot=tag:efibc,ipxe.efi \
--dhcp-match=set:efi64,option:client-arch,9 \
--dhcp-boot=tag:efi64,ipxe.efi \
--dhcp-userclass=set:ipxe,iPXE \
--dhcp-boot=tag:#ipxe,undionly.kpxe \
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
--address=/matchbox.example.com/192.168.1.2 \
--log-queries \
@@ -28,8 +35,15 @@ sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
-d -q \
--dhcp-range=192.168.1.3,192.168.1.254 \
--enable-tftp --tftp-root=/var/lib/tftpboot \
--dhcp-match=set:bios,option:client-arch,0 \
--dhcp-boot=tag:bios,undionly.kpxe \
--dhcp-match=set:efi32,option:client-arch,6 \
--dhcp-boot=tag:efi32,ipxe.efi \
--dhcp-match=set:efibc,option:client-arch,7 \
--dhcp-boot=tag:efibc,ipxe.efi \
--dhcp-match=set:efi64,option:client-arch,9 \
--dhcp-boot=tag:efi64,ipxe.efi \
--dhcp-userclass=set:ipxe,iPXE \
--dhcp-boot=tag:#ipxe,undionly.kpxe \
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
--address=/matchbox.example/192.168.1.2 \
--log-queries \
@@ -53,8 +67,13 @@ Configuration arguments can be provided as flags. Check the dnsmasq [man pages](
Build a container image locally.
make docker-image
```
make docker-image
```
Run the image with Docker on the `docker0` bridge (default).
sudo docker run --rm --cap-add=NET_ADMIN coreos/dnsmasq -d -q
```
sudo docker run --rm --cap-add=NET_ADMIN coreos/dnsmasq -d -q
```

View File

@@ -1,5 +1,6 @@
# dnsmasq.conf
no-daemon
dhcp-range=172.17.0.50,172.17.0.99
dhcp-option=3,172.17.0.1
dhcp-host=52:54:00:a1:9c:ae,172.17.0.21,1h
@@ -10,15 +11,27 @@ dhcp-host=52:54:00:d7:99:c7,172.17.0.24,1h
enable-tftp
tftp-root=/var/lib/tftpboot
# Legacy PXE
dhcp-match=set:bios,option:client-arch,0
dhcp-boot=tag:bios,undionly.kpxe
# UEFI
dhcp-match=set:efi32,option:client-arch,6
dhcp-boot=tag:efi32,ipxe.efi
dhcp-match=set:efibc,option:client-arch,7
dhcp-boot=tag:efibc,ipxe.efi
dhcp-match=set:efi64,option:client-arch,9
dhcp-boot=tag:efi64,ipxe.efi
# iPXE
dhcp-userclass=set:ipxe,iPXE
dhcp-boot=tag:#ipxe,undionly.kpxe
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
log-queries
log-dhcp
address=/bootcfg.foo/172.18.0.2
address=/matchbox.foo/172.17.0.2
address=/matchbox.example.com/172.17.0.2
address=/node1.example.com/172.17.0.21
address=/node2.example.com/172.17.0.22

View File

@@ -10,6 +10,7 @@ fi
curl -s -o $DEST/undionly.kpxe http://boot.ipxe.org/undionly.kpxe
cp $DEST/undionly.kpxe $DEST/undionly.kpxe.0
curl -s -o $DEST/ipxe.efi http://boot.ipxe.org/ipxe.efi
# Any vaguely recent CoreOS grub.efi is fine
curl -s -o $DEST/grub.efi https://stable.release.core-os.net/amd64-usr/1298.7.0/coreos_production_pxe_grub.efi
curl -s -o $DEST/grub.efi https://stable.release.core-os.net/amd64-usr/1353.7.0/coreos_production_pxe_grub.efi

View File

@@ -13,13 +13,11 @@ tftp-root=/var/lib/tftpboot
dhcp-userclass=set:ipxe,iPXE
dhcp-boot=tag:#ipxe,undionly.kpxe
dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
log-queries
log-dhcp
address=/bootcfg.foo/172.18.0.2
address=/matchbox.foo/172.18.0.2
address=/matchbox.example.com/172.18.0.2
address=/node1.example.com/172.18.0.21
address=/node2.example.com/172.18.0.22

View File

@@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: matchbox
image: quay.io/coreos/matchbox:v0.6.0
image: quay.io/coreos/matchbox:v0.7.1
env:
- name: MATCHBOX_ADDRESS
value: "0.0.0.0:8080"

View File

@@ -2,12 +2,7 @@ apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: matchbox
annotations:
ingress.kubernetes.io/ssl-passthrough: "true"
spec:
tls:
- hosts:
- matchbox-rpc.example.com
rules:
- host: matchbox.example.com
http:
@@ -16,6 +11,18 @@ spec:
backend:
serviceName: matchbox
servicePort: 8080
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: matchbox
annotations:
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
spec:
tls:
- hosts:
- matchbox-rpc.example.com
rules:
- host: matchbox-rpc.example.com
http:
paths:

View File

@@ -65,7 +65,7 @@ iptables -t nat -A PREROUTING -i enp14s0 -p tcp --dport 80 -j REDIRECT --to-port
Your DHCP server should be configured so the Squid host is the default gateway for PXE, iPXE, or GRUB2 clients. For deployments that run Squid on the same host as dnsmasq, remove any DHCP option 3 settings. For example ```--dhcp-option=3,192.168.10.1"```
Update Matchbox policies to use the url of the CoreOS kernel/initrd download site:
Update Matchbox policies to use the url of the Container Linux kernel/initrd download site:
```
cat policy/etcd3.json
{

View File

@@ -4,7 +4,7 @@ Documentation=https://github.com/coreos/matchbox
[Service]
Environment="IMAGE=quay.io/coreos/matchbox"
Environment="VERSION=v0.6.0"
Environment="VERSION=v0.7.1"
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
Environment="MATCHBOX_LOG_LEVEL=debug"

View File

@@ -4,7 +4,7 @@ Documentation=https://github.com/coreos/matchbox
[Service]
Environment="IMAGE=quay.io/coreos/matchbox"
Environment="VERSION=v0.6.0"
Environment="VERSION=v0.7.1"
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
ExecStartPre=/usr/bin/mkdir -p /etc/matchbox
ExecStartPre=/usr/bin/mkdir -p /var/lib/matchbox/assets

View File

@@ -8,9 +8,9 @@ These examples use [Terraform](https://www.terraform.io/intro/) as a client to M
| Name | Description |
|-------------------------------|-------------------------------|
| [simple-install](terraform/simple-install) | Install Container Linux with an SSH key |
| [etcd3-install](terraform/etcd3-install) | Install a 3-node etcd3 cluster |
| [bootkube-install](terraform/bootkube-install) | Install a 3-node self-hosted Kubernetes v1.6.4 cluster |
| [simple-install](terraform/simple-install/) | Install Container Linux with an SSH key |
| [etcd3-install](terraform/etcd3-install/) | Install a 3-node etcd3 cluster |
| [bootkube-install](terraform/bootkube-install/) | Install a 3-node Kubernetes v1.10.3 cluster |
### Customization
@@ -20,15 +20,15 @@ You are encouraged to look through the examples and Terraform modules. Implement
These examples mount raw Matchbox objects into a Matchbox server's `/var/lib/matchbox/` directory.
| Name | Description | CoreOS Version | FS | Docs |
| Name | Description | CoreOS Container Linux Version | FS | Docs |
|------------|-------------|----------------|----|-----------|
| simple | CoreOS with autologin, using iPXE | stable/1298.7.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
| simple-install | CoreOS Install, using iPXE | stable/1298.7.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
| grub | CoreOS via GRUB2 Netboot | stable/1298.7.0 | RAM | NA |
| etcd3 | PXE boot 3 node etcd3 cluster with proxies | stable/1298.7.0 | RAM | None |
| etcd3-install | Install a 3 node etcd3 cluster to disk | stable/1298.7.0 | Disk | None |
| bootkube | PXE boot a self-hosted Kubernetes v1.6.4 cluster | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) |
| bootkube-install | Install a self-hosted Kubernetes v1.6.4 cluster | stable/1298.7.0 | Disk | [tutorial](../Documentation/bootkube.md) |
| simple | CoreOS Container Linux with autologin, using iPXE | stable/1576.5.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
| simple-install | CoreOS Container Linux Install, using iPXE | stable/1576.5.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
| grub | CoreOS Container Linux via GRUB2 Netboot | stable/1576.5.0 | RAM | NA |
| etcd3 | PXE boot a 3-node etcd3 cluster with proxies | stable/1576.5.0 | RAM | None |
| etcd3-install | Install a 3-node etcd3 cluster to disk | stable/1576.5.0 | Disk | None |
| bootkube | PXE boot a 3-node Kubernetes v1.8.5 cluster | stable/1576.5.0 | Disk | [tutorial](../Documentation/bootkube.md) |
| bootkube-install | Install a 3-node Kubernetes v1.8.5 cluster | stable/1576.5.0 | Disk | [tutorial](../Documentation/bootkube.md) |
### Customization

View File

@@ -0,0 +1,56 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: container-linux-update-agent
namespace: kube-system
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
app: container-linux-update-agent
spec:
containers:
- name: update-agent
image: quay.io/coreos/container-linux-update-operator:v0.3.1
command:
- "/bin/update-agent"
volumeMounts:
- mountPath: /var/run/dbus
name: var-run-dbus
- mountPath: /etc/coreos
name: etc-coreos
- mountPath: /usr/share/coreos
name: usr-share-coreos
- mountPath: /etc/os-release
name: etc-os-release
env:
# read by update-agent as the node name to manage reboots for
- name: UPDATE_AGENT_NODE
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: var-run-dbus
hostPath:
path: /var/run/dbus
- name: etc-coreos
hostPath:
path: /etc/coreos
- name: usr-share-coreos
hostPath:
path: /usr/share/coreos
- name: etc-os-release
hostPath:
path: /etc/os-release

View File

@@ -0,0 +1,22 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: container-linux-update-operator
namespace: kube-system
spec:
replicas: 1
template:
metadata:
labels:
app: container-linux-update-operator
spec:
containers:
- name: update-operator
image: quay.io/coreos/container-linux-update-operator:v0.3.1
command:
- "/bin/update-operator"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace

View File

@@ -1,44 +0,0 @@
## gRPC API Credentials
Create FAKE TLS credentials for running the `matchbox` gRPC API examples.
**DO NOT** use these certificates for anything other than running `matchbox` examples. Use your organization's production PKI for production deployments.
Navigate to the example directory which will be mounted as `/etc/matchbox` in examples:
cd matchbox/examples/etc/matchbox
Set certificate subject alt names which should be used by exporting `SAN`. Use the DNS name or IP at which `matchbox` is hosted.
# for examples on metal0 or docker0 bridges
export SAN=IP.1:127.0.0.1,IP.2:172.18.0.2
# production example
export SAN=DNS.1:matchbox.example.com
Create a fake `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`. Type 'Y' when prompted.
$ ./cert-gen
Creating FAKE CA, server cert/key, and client cert/key...
...
...
...
******************************************************************
WARNING: Generated TLS credentials are ONLY SUITABLE FOR EXAMPLES!
Use your organization's production PKI for production deployments!
## Inpsect
Inspect the generated FAKE certificates if desired.
openssl x509 -noout -text -in ca.crt
openssl x509 -noout -text -in server.crt
openssl x509 -noout -text -in client.crt
## Verify
Verify that the FAKE server and client certificates were signed by the fake CA.
openssl verify -CAfile ca.crt server.crt
openssl verify -CAfile ca.crt client.crt

View File

@@ -1,11 +1,11 @@
{
"id": "coreos-install",
"name": "CoreOS Install",
"name": "CoreOS Container Linux Install",
"profile": "install-reboot",
"metadata": {
"coreos_channel": "stable",
"coreos_version": "1298.7.0",
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
"baseurl": "http://matchbox.foo:8080/assets/coreos"
"coreos_version": "1576.5.0",
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
}
}

View File

@@ -8,11 +8,12 @@
},
"metadata": {
"domain_name": "node1.example.com",
"etcd_initial_cluster": "node1=http://node1.example.com:2380",
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
"etcd_name": "node1",
"k8s_dns_service_ip": "10.3.0.10",
"ssh_authorized_keys": [
"ADD ME"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
]
}
}

View File

@@ -8,10 +8,9 @@
},
"metadata": {
"domain_name": "node2.example.com",
"etcd_endpoints": "node1.example.com:2379",
"k8s_dns_service_ip": "10.3.0.10",
"ssh_authorized_keys": [
"ADD ME"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
]
}
}

View File

@@ -8,10 +8,9 @@
},
"metadata": {
"domain_name": "node3.example.com",
"etcd_endpoints": "node1.example.com:2379",
"k8s_dns_service_ip": "10.3.0.10",
"ssh_authorized_keys": [
"ADD ME"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
]
}
}

View File

@@ -7,7 +7,7 @@
},
"metadata": {
"domain_name": "node1.example.com",
"etcd_initial_cluster": "node1=http://node1.example.com:2380",
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
"etcd_name": "node1",
"k8s_dns_service_ip": "10.3.0.10",
"pxe": "true",

View File

@@ -7,7 +7,6 @@
},
"metadata": {
"domain_name": "node2.example.com",
"etcd_endpoints": "node1.example.com:2379",
"k8s_dns_service_ip": "10.3.0.10",
"pxe": "true",
"ssh_authorized_keys": [

View File

@@ -7,7 +7,6 @@
},
"metadata": {
"domain_name": "node3.example.com",
"etcd_endpoints": "node1.example.com:2379",
"k8s_dns_service_ip": "10.3.0.10",
"pxe": "true",
"ssh_authorized_keys": [

View File

@@ -1,11 +1,11 @@
{
"id": "coreos-install",
"name": "CoreOS Install",
"name": "CoreOS Container Linux Install",
"profile": "install-reboot",
"metadata": {
"coreos_channel": "stable",
"coreos_version": "1298.7.0",
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
"baseurl": "http://matchbox.foo:8080/assets/coreos"
"coreos_version": "1576.5.0",
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
}
}

View File

@@ -1,5 +1,5 @@
{
"id": "default",
"name": "GRUB CoreOS alpha",
"name": "GRUB CoreOS Container Linux alpha",
"profile": "grub"
}

View File

@@ -1,11 +1,11 @@
{
"id": "install",
"name": "Simple CoreOS Alpha Install",
"name": "Simple CoreOS Container Linux Install",
"profile": "simple-install",
"metadata": {
"coreos_channel": "stable",
"coreos_version": "1298.7.0",
"ignition_endpoint": "http://matchbox.foo:8080/ignition",
"baseurl": "http://matchbox.foo:8080/assets/coreos"
"coreos_version": "1576.5.0",
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
}
}

View File

@@ -1,6 +1,6 @@
{
"id": "simple",
"name": "Simple CoreOS Alpha",
"name": "Simple CoreOS Container Linux Alpha",
"profile": "simple",
"selector": {
"os": "installed"

View File

@@ -1,5 +1,5 @@
{
"id": "default",
"name": "Simple CoreOS Alpha with RAM disk",
"name": "Simple CoreOS Container Linux Alpha with RAM disk",
"profile": "simple"
}

View File

@@ -7,22 +7,27 @@ systemd:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.1.6"
Environment="ETCD_IMAGE_TAG=v3.2.0"
Environment="ETCD_NAME={{.etcd_name}}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379"
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
Environment="ETCD_ADVERTISE_CLIENT_URLS=https://{{.domain_name}}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=https://{{.domain_name}}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379"
Environment="ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380"
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
Environment="ETCD_SSL_DIR=/etc/ssl/etcd"
Environment="ETCD_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/server-ca.crt"
Environment="ETCD_CERT_FILE=/etc/ssl/certs/etcd/server.crt"
Environment="ETCD_KEY_FILE=/etc/ssl/certs/etcd/server.key"
Environment="ETCD_CLIENT_CERT_AUTH=true"
Environment="ETCD_PEER_TRUSTED_CA_FILE=/etc/ssl/certs/etcd/peer-ca.crt"
Environment="ETCD_PEER_CERT_FILE=/etc/ssl/certs/etcd/peer.crt"
Environment="ETCD_PEER_KEY_FILE=/etc/ssl/certs/etcd/peer.key"
Environment="ETCD_PEER_CLIENT_CERT_AUTH=true"
- name: docker.service
enable: true
- name: locksmithd.service
dropins:
- name: 40-etcd-lock.conf
contents: |
[Service]
Environment="REBOOT_STRATEGY=etcd-lock"
mask: true
- name: kubelet.path
enable: true
contents: |
@@ -51,37 +56,41 @@ systemd:
Description=Kubelet via Hyperkube ACI
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
--mount volume=opt-cni-bin,target=/opt/cni/bin \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log"
--mount volume=var-log,target=/var/log \
--insecure-options=image"
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--client-ca-file=/etc/kubernetes/ca.crt \
--anonymous-auth=false \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--hostname-override={{.domain_name}} \
--node-labels=node-role.kubernetes.io/master \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--anonymous-auth=false \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns={{.k8s_dns_service_ip}} \
--cluster_domain=cluster.local
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
--cluster_domain=cluster.local \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--hostname-override={{.domain_name}} \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/master \
--pod-manifest-path=/etc/kubernetes/manifests \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--require-kubeconfig
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
Restart=always
RestartSec=10
[Install]
@@ -117,8 +126,14 @@ storage:
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
KUBELET_IMAGE_URL=docker://gcr.io/google_containers/hyperkube
KUBELET_IMAGE_TAG=v1.8.5
- path: /etc/ssl/etcd/.empty
filesystem: root
mode: 0644
contents:
inline: |
empty
- path: /etc/hostname
filesystem: root
mode: 0644
@@ -143,7 +158,7 @@ storage:
# Wrapper for bootkube start
set -e
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.4.4}"
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.9.1}"
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
exec /usr/bin/rkt run \
--trust-keys-from-https \

View File

@@ -1,25 +1,10 @@
---
systemd:
units:
- name: etcd-member.service
enable: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.1.6"
ExecStart=
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
--listen-addr=127.0.0.1:2379 \
--endpoints={{.etcd_endpoints}}
- name: docker.service
enable: true
- name: locksmithd.service
dropins:
- name: 40-etcd-lock.conf
contents: |
[Service]
Environment="REBOOT_STRATEGY=etcd-lock"
mask: true
- name: kubelet.path
enable: true
contents: |
@@ -48,36 +33,40 @@ systemd:
Description=Kubelet via Hyperkube ACI
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume opt-cni-bin,kind=host,source=/opt/cni/bin \
--mount volume=opt-cni-bin,target=/opt/cni/bin \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log"
--mount volume=var-log,target=/var/log \
--insecure-options=image"
ExecStartPre=/bin/mkdir -p /opt/cni/bin
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--client-ca-file=/etc/kubernetes/ca.crt \
--anonymous-auth=false \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--hostname-override={{.domain_name}} \
--node-labels=node-role.kubernetes.io/node \
--anonymous-auth=false \
--client-ca-file=/etc/kubernetes/ca.crt \
--cluster_dns={{.k8s_dns_service_ip}} \
--cluster_domain=cluster.local
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
--cluster_domain=cluster.local \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--exit-on-lock-contention \
--hostname-override={{.domain_name}} \
--kubeconfig=/etc/kubernetes/kubeconfig \
--lock-file=/var/run/lock/kubelet.lock \
--network-plugin=cni \
--node-labels=node-role.kubernetes.io/node \
--pod-manifest-path=/etc/kubernetes/manifests \
--require-kubeconfig
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid
Restart=always
RestartSec=5
[Install]
@@ -106,8 +95,14 @@ storage:
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
KUBELET_IMAGE_URL=docker://gcr.io/google_containers/hyperkube
KUBELET_IMAGE_TAG=v1.8.5
- path: /etc/ssl/etcd/.empty
filesystem: root
mode: 0644
contents:
inline: |
empty
- path: /etc/hostname
filesystem: root
mode: 0644

View File

@@ -7,7 +7,7 @@ systemd:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.1.6"
Environment="ETCD_IMAGE_TAG=v3.2.0"
ExecStart=
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
--listen-addr=127.0.0.1:2379 \

View File

@@ -7,7 +7,7 @@ systemd:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.1.6"
Environment="ETCD_IMAGE_TAG=v3.2.0"
Environment="ETCD_NAME={{.etcd_name}}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"

View File

@@ -20,7 +20,7 @@ storage:
contents:
inline: |
#!/bin/bash -ex
curl --fail "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
curl --retry 10 --fail "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
coreos-install -d /dev/sda -C {{.coreos_channel}} -V {{.coreos_version}} -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}}
udevadm settle
systemctl reboot

View File

@@ -2,11 +2,12 @@
"id": "bootkube-controller",
"name": "bootkube Ready Controller",
"boot": {
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"initrd=coreos_production_pxe_image.cpio.gz",
"root=/dev/sda1",
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",

View File

@@ -2,11 +2,12 @@
"id": "bootkube-worker",
"name": "bootkube Ready Worker",
"boot": {
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"initrd=coreos_production_pxe_image.cpio.gz",
"root=/dev/sda1",
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",

View File

@@ -2,10 +2,11 @@
"id": "etcd3-gateway",
"name": "etcd3-gateway",
"boot": {
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"initrd=coreos_production_pxe_image.cpio.gz",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",

View File

@@ -2,10 +2,11 @@
"id": "etcd3",
"name": "etcd3",
"boot": {
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"initrd=coreos_production_pxe_image.cpio.gz",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
@@ -13,4 +14,4 @@
]
},
"ignition_id": "etcd3.yaml"
}
}

View File

@@ -1,11 +1,11 @@
{
"id": "grub",
"name": "CoreOS via GRUB2",
"name": "CoreOS Container Linux via GRUB2",
"boot": {
"kernel": "(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["(http;matchbox.foo:8080)/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "(http;matchbox.example.com:8080)/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": ["(http;matchbox.example.com:8080)/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"coreos.config.url=http://matchbox.foo:8080/ignition",
"coreos.config.url=http://matchbox.example.com:8080/ignition",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
@@ -13,4 +13,4 @@
]
},
"ignition_id": "ssh.yaml"
}
}

View File

@@ -1,11 +1,12 @@
{
"id": "install-reboot",
"name": "Install CoreOS and Reboot",
"name": "Install CoreOS Container Linux and Reboot",
"boot": {
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"initrd=coreos_production_pxe_image.cpio.gz",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
@@ -13,4 +14,4 @@
]
},
"ignition_id": "install-reboot.yaml"
}
}

View File

@@ -1,11 +1,12 @@
{
"id": "simple-install",
"name": "Simple CoreOS Alpha Install",
"name": "Simple CoreOS Container Linux Alpha Install",
"boot": {
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"initrd=coreos_production_pxe_image.cpio.gz",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
@@ -13,4 +14,4 @@
]
},
"ignition_id": "install-reboot.yaml"
}
}

View File

@@ -1,16 +1,19 @@
{
"id": "simple",
"name": "Simple CoreOS Alpha",
"boot": {
"kernel": "/assets/coreos/1298.7.0/coreos_production_pxe.vmlinuz",
"initrd": ["/assets/coreos/1298.7.0/coreos_production_pxe_image.cpio.gz"],
"args": [
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
"coreos.autologin"
]
},
"ignition_id": "ssh.yaml"
}
"id": "simple",
"name": "Simple CoreOS Container Linux Alpha",
"boot": {
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
"initrd": [
"/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"
],
"args": [
"initrd=coreos_production_pxe_image.cpio.gz",
"coreos.config.url=http://matchbox.example.com:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
"coreos.autologin"
]
},
"ignition_id": "ssh.yaml"
}

View File

@@ -1,6 +1,6 @@
# Self-hosted Kubernetes
# Kubernetes
The self-hosted Kubernetes example shows how to use matchbox to network boot and provision a 3 node "self-hosted" Kubernetes v1.6.4 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting.
The Kubernetes example shows how to use Matchbox to network boot and provision a 3 node Kubernetes v1.10.3 cluster. This example uses [Terraform](https://www.terraform.io/intro/index.html) and a module provided by [Typhoon](https://github.com/poseidon/typhoon) to describe cluster resources. [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) is run once to bootstrap the Kubernetes control plane.
## Requirements
@@ -9,7 +9,7 @@ Follow the getting started [tutorial](../../../Documentation/getting-started.md)
* Matchbox v0.6+ [installation](../../../Documentation/deployment.md) with gRPC API enabled
* Matchbox provider credentials `client.crt`, `client.key`, and `ca.crt`
* PXE [network boot](../../../Documentation/network-setup.md) environment
* Terraform v0.9+ and [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) installed locally on your system
* Terraform v0.10.x or v0.11.x and [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) installed locally on your system
* Machines with known DNS names and MAC addresses
If you prefer to provision QEMU/KVM VMs on your local Linux machine, set up the matchbox [development environment](../../../Documentation/getting-started-rkt.md).
@@ -32,16 +32,16 @@ Copy the `terraform.tfvars.example` file to `terraform.tfvars`. Ensure `provider
```hcl
matchbox_http_endpoint = "http://matchbox.example.com:8080"
matchbox_rpc_endpoint = "matchbox.example.com:8081"
ssh_authorized_key = "ADD ME"
cluster_name = "demo"
container_linux_version = "1298.7.0"
container_linux_channel = "stable"
ssh_authorized_key = "ADD ME"
os_channel = "coreos-stable"
os_version = "1576.5.0"
```
Provide an ordered list of controller names, MAC addresses, and domain names. Provide an ordered list of worker names, MAC addresses, and domain names.
```
```hcl
controller_names = ["node1"]
controller_macs = ["52:54:00:a1:9c:ae"]
controller_domains = ["node1.example.com"]
@@ -50,48 +50,80 @@ worker_macs = ["52:54:00:b2:2f:86", "52:54:00:c3:61:77"]
worker_domains = ["node2.example.com", "node3.example.com"]
```
Finally, provide an `assets_dir` for generated manifests and a DNS name which you've setup to resolves to controller(s) (e.g. round-robin). Worker nodes and your kubeconfig will communicate via this endpoint.
Provide an `assets_dir` for generated manifests and a DNS name which you've setup to resolves to controller(s) (e.g. round-robin). Worker nodes and your kubeconfig will communicate via this endpoint.
```
```hcl
k8s_domain_name = "cluster.example.com"
asset_dir = "assets"
```
You may set `experimental_self_hosted_etcd = "true"` to deploy "self-hosted" etcd atop Kubernetes instead of running etcd on hosts directly. Warning, this is experimental and potentially dangerous.
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
## Apply
### Optional
Fetch the [bootkube](../README.md#modules) Terraform [module](https://www.terraform.io/docs/modules/index.html) for bare-metal, which is maintained in the in the matchbox repo.
You may set certain optional variables to override defaults. Set `networking` to either "flannel" or "calico" to set the networking provider. [Check upstream](https://typhoon.psdn.io/bare-metal/) for the full list of options.
```sh
$ terraform get
```hcl
# Optional (defaults)
# cached_install = "false"
# install_disk = "/dev/sda"
# container_linux_oem = ""
# networking = "flannel"
```
Plan and apply to create the resources on Matchbox.
The default is to create a Kubernetes cluster with 1 controller and 2 workers as an example, but check `multi-controller.tfvars.example` for an example which defines 3 controllers and 1 worker.
## Terraform
Initialize Terraform from the `bootkube-install` directory.
```sh
terraform init
```
Get or update Terraform modules.
```sh
$ terraform get # downloads missing modules
$ terraform get --update # updates all modules
Get: git::https://github.com/poseidon/typhoon (update)
Get: git::https://github.com/poseidon/bootkube-terraform.git?ref=v0.11.0 (update)
```
Plan the resources to be created.
```sh
$ terraform plan
Plan: 37 to add, 0 to change, 0 to destroy.
Plan: 55 to add, 0 to change, 0 to destroy.
```
Terraform will configure matchbox with profiles (e.g. `cached-container-linux-install`, `bootkube-controller`, `bootkube-worker`) and add groups to match machines by MAC address to a profile. These resources declare that each machine should PXE boot and install Container Linux to disk. `node1` will provision itself as a controller, while `node2` and `noe3` provision themselves as workers.
Terraform will configure matchbox with profiles (e.g. `cached-container-linux-install`, `bootkube-controller`, `bootkube-worker`) and add groups to match machines by MAC address to a profile. These resources declare that each machine should PXE boot and install Container Linux to disk. `node1` will provision itself as a controller, while `node2` and `node3` provision themselves as workers.
The module referenced in `cluster.tf` will also generate bootkube assets to `assets_dir` (exactly like the [bootkube](https://github.com/kubernetes-incubator/bootkube) binary would). These assets include Kubernetes bootstrapping and control plane manifests as well as a kubeconfig you can use to access the cluster.
### ssh-agent
Initial bootstrapping requires `bootkube.service` be started on one controller node. Terraform uses `ssh-agent` to automate this step. Add your SSH private key to `ssh-agent`, otherwise `terraform apply` will hang.
```sh
$ terraform apply
module.cluster.null_resource.copy-kubeconfig.0: Still creating... (5m0s elapsed)
module.cluster.null_resource.copy-kubeconfig.1: Still creating... (5m0s elapsed)
module.cluster.null_resource.copy-kubeconfig.2: Still creating... (5m0s elapsed)
ssh-add ~/.ssh/id_rsa
ssh-add -L
```
### Apply
Apply the changes.
```sh
module.cluster.null_resource.copy-secrets.0: Still creating... (5m0s elapsed)
module.cluster.null_resource.copy-secrets.1: Still creating... (5m0s elapsed)
module.cluster.null_resource.copy-secrets.2: Still creating... (5m0s elapsed)
...
module.cluster.null_resource.bootkube-start: Still creating... (8m40s elapsed)
...
Apply complete! Resources: 37 added, 0 changed, 0 destroyed.
```
You can now move on to the "Machines" section. Apply will loop until it can successfully copy the kubeconfig to each node and start the one-time Kubernetes bootstrapping process on a controller. In practice, you may see `apply` fail if it connects before the disk install has completed. Run terraform apply until it reconciles successfully.
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
Apply will then loop until it can successfully copy credentials to each machine and start the one-time Kubernetes bootstrap service. Proceed to the next step while this loops.
## Machines
@@ -114,32 +146,34 @@ $ sudo ./scripts/libvirt [start|reboot|shutdown|poweroff|destroy]
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the apiserver, scheduler, and controller-manager are running as pods.
```sh
$ KUBECONFIG=assets/auth/kubeconfig
$ export KUBECONFIG=assets/auth/kubeconfig
$ kubectl get nodes
NAME STATUS AGE
node1.example.com Ready 3m
node2.example.com Ready 3m
node3.example.com Ready 3m
NAME STATUS AGE VERSION
node1.example.com Ready 11m v1.10.0
node2.example.com Ready 11m v1.10.0
node3.example.com Ready 11m v1.10.0
$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system checkpoint-installer-p8g8r 1/1 Running 1 13m
kube-system kube-apiserver-s5gnx 1/1 Running 1 41s
kube-system kube-controller-manager-3438979800-jrlnd 1/1 Running 1 13m
kube-system kube-controller-manager-3438979800-tkjx7 1/1 Running 1 13m
kube-system kube-dns-4101612645-xt55f 4/4 Running 4 13m
kube-system kube-flannel-pl5c2 2/2 Running 0 13m
kube-system kube-flannel-r9t5r 2/2 Running 3 13m
kube-system kube-flannel-vfb0s 2/2 Running 4 13m
kube-system kube-proxy-cvhmj 1/1 Running 0 13m
kube-system kube-proxy-hf9mh 1/1 Running 1 13m
kube-system kube-proxy-kpl73 1/1 Running 1 13m
kube-system kube-scheduler-694795526-1l23b 1/1 Running 1 13m
kube-system kube-scheduler-694795526-fks0b 1/1 Running 1 13m
kube-system pod-checkpointer-node1.example.com 1/1 Running 2 10m
kube-system kube-flannel-fqp7f 2/2 Running 1 11m
kube-system kube-flannel-gnjrm 2/2 Running 0 11m
kube-system kube-flannel-llbgt 2/2 Running 0 11m
kube-system kube-apiserver-7336w 1/1 Running 0 11m
kube-system kube-controller-manager-3271970485-b9chx 1/1 Running 0 11m
kube-system kube-controller-manager-3271970485-v30js 1/1 Running 1 11m
kube-system kube-dns-1187388186-mx9rt 3/3 Running 0 11m
kube-system kube-proxy-50sd4 1/1 Running 0 11m
kube-system kube-proxy-bczhp 1/1 Running 0 11m
kube-system kube-proxy-mp2fw 1/1 Running 0 11m
kube-system kube-scheduler-3895335239-fd3l7 1/1 Running 1 11m
kube-system kube-scheduler-3895335239-hfjv0 1/1 Running 0 11m
kube-system pod-checkpointer-wf65d 1/1 Running 0 11m
kube-system pod-checkpointer-wf65d-node1.example.com 1/1 Running 0 11m
```
Try restarting machines or deleting pods to see that the cluster is resilient to failures.
## Addons
Install **important** cluster [addons](../../../Documentation/cluster-addons.md).
## Going Further

View File

@@ -1,15 +1,26 @@
// Self-hosted Kubernetes cluster
// Kubernetes cluster
module "cluster" {
source = "../modules/bootkube"
source = "git::https://github.com/poseidon/typhoon//bare-metal/container-linux/kubernetes?ref=v1.10.3"
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
ssh_authorized_key = "${var.ssh_authorized_key}"
providers = {
local = "local.default"
null = "null.default"
template = "template.default"
tls = "tls.default"
}
# bare-metal
cluster_name = "${var.cluster_name}"
container_linux_channel = "${var.container_linux_channel}"
container_linux_version = "${var.container_linux_version}"
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
os_channel = "${var.os_channel}"
os_version = "${var.os_version}"
# Machines
# configuration
k8s_domain_name = "${var.k8s_domain_name}"
ssh_authorized_key = "${var.ssh_authorized_key}"
asset_dir = "${var.asset_dir}"
# machines
controller_names = "${var.controller_names}"
controller_macs = "${var.controller_macs}"
controller_domains = "${var.controller_domains}"
@@ -17,11 +28,10 @@ module "cluster" {
worker_macs = "${var.worker_macs}"
worker_domains = "${var.worker_domains}"
# bootkube assets
k8s_domain_name = "${var.k8s_domain_name}"
asset_dir = "${var.asset_dir}"
# Optional
container_linux_oem = "${var.container_linux_oem}"
experimental_self_hosted_etcd = "${var.experimental_self_hosted_etcd}"
# optional
networking = "${var.networking}"
cached_install = "${var.cached_install}"
install_disk = "${var.install_disk}"
container_linux_oem = "${var.container_linux_oem}"
kernel_args = "${var.kernel_args}"
}

View File

@@ -0,0 +1,22 @@
matchbox_http_endpoint = "http://matchbox.example.com:8080"
matchbox_rpc_endpoint = "matchbox.example.com:8081"
# ssh_authorized_key = "ADD ME"
cluster_name = "example"
os_channel = "coreos-stable"
os_version = "1576.5.0"
# Machines
controller_names = ["node1", "node2", "node3"]
controller_macs = ["52:54:00:a1:9c:ae", "52:54:00:b2:2f:86", "52:54:00:c3:61:77"]
controller_domains = ["node1.example.com", "node2.example.com", "node3.example.com"]
worker_names = ["node4"]
worker_macs = ["52:54:00:d7:99:c7"]
worker_domains = ["node4.example.com"]
# Bootkube
k8s_domain_name = "cluster.example.com"
asset_dir = "assets"
# Optional
# container_linux_oem = ""

View File

@@ -5,3 +5,23 @@ provider "matchbox" {
client_key = "${file("~/.matchbox/client.key")}"
ca = "${file("~/.matchbox/ca.crt")}"
}
provider "local" {
version = "~> 1.0"
alias = "default"
}
provider "null" {
version = "~> 1.0"
alias = "default"
}
provider "template" {
version = "~> 1.0"
alias = "default"
}
provider "tls" {
version = "~> 1.0"
alias = "default"
}

View File

@@ -3,8 +3,8 @@ matchbox_rpc_endpoint = "matchbox.example.com:8081"
# ssh_authorized_key = "ADD ME"
cluster_name = "example"
container_linux_version = "1298.7.0"
container_linux_channel = "stable"
os_channel = "coreos-stable"
os_version = "1576.5.0"
# Machines
controller_names = ["node1"]
@@ -18,6 +18,8 @@ worker_domains = ["node2.example.com", "node3.example.com"]
k8s_domain_name = "cluster.example.com"
asset_dir = "assets"
# Optional
# Optional (defaults)
cached_install = "true"
# install_disk = "/dev/sda"
# container_linux_oem = ""
# experimental_self_hosted_etcd = "true"
# networking = "flannel"

View File

@@ -8,14 +8,14 @@ variable "matchbox_rpc_endpoint" {
description = "Matchbox gRPC API endpoint, without the protocol (e.g. matchbox.example.com:8081)"
}
variable "container_linux_channel" {
variable "os_channel" {
type = "string"
description = "Container Linux channel corresponding to the container_linux_version"
description = "Channel for a Container Linux derivative"
}
variable "container_linux_version" {
variable "os_version" {
type = "string"
description = "Container Linux version of the kernel/initrd to PXE or the image to install"
description = "Version for a Container Linux to PXE and install"
}
variable "cluster_name" {
@@ -62,6 +62,12 @@ variable "k8s_domain_name" {
type = "string"
}
variable "networking" {
description = "Choice of networking provider (flannel or calico)"
type = "string"
default = "flannel"
}
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = "string"
@@ -78,8 +84,23 @@ variable "service_cidr" {
CIDR IP range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
EOD
type = "string"
default = "10.3.0.0/16"
}
# optional
variable "cached_install" {
type = "string"
default = "10.3.0.0/16"
default = "false"
description = "Whether Container Linux should PXE boot and install from matchbox /assets cache. Note that the admin must have downloaded the os_version into matchbox assets."
}
variable "install_disk" {
type = "string"
default = "/dev/sda"
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
}
variable "container_linux_oem" {
@@ -88,7 +109,8 @@ variable "container_linux_oem" {
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
}
variable "experimental_self_hosted_etcd" {
default = "false"
description = "Create self-hosted etcd cluster as pods on Kubernetes, instead of on-hosts"
variable "kernel_args" {
description = "Additional kernel arguments to provide at PXE boot."
type = "list"
default = []
}

View File

@@ -37,6 +37,19 @@ ssh_authorized_key = "ADD ME"
Configs in `etcd3-install` configure the matchbox provider, define profiles (e.g. `cached-container-linux-install`, `etcd3`), and define 3 groups which match machines by MAC address to a profile. These resources declare that the machines should PXE boot, install Container Linux to disk, and provision themselves into peers in a 3-node etcd3 cluster.
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
### Optional
You may set certain optional variables to override defaults.
```hcl
# install_disk = "/dev/sda"
# container_linux_oem = ""
```
## Apply
Fetch the [profiles](../README.md#modules) Terraform [module](https://www.terraform.io/docs/modules/index.html) which let's you use common machine profiles maintained in the matchbox repo (like `etcd3`).
```sh
@@ -52,8 +65,6 @@ $ terraform apply
Apply complete! Resources: 10 added, 0 changed, 0 destroyed.
```
Note: The `cached-container-linux-install` profile will PXE boot and install Container Linux from matchbox [assets](https://github.com/coreos/matchbox/blob/master/Documentation/api.md#assets). If you have not populated the assets cache, use the `container-linux-install` profile to use public images (slower).
## Machines
Power on each machine (with PXE boot device on next boot). Machines should network boot, install Container Linux to disk, reboot, and provision themselves as a 3-node etcd3 cluster.
@@ -82,7 +93,6 @@ $ systemctl status etcd-member
Verify that etcd3 peers are healthy and communicating.
```sh
$ ETCDCTL_API=3
$ etcdctl cluster-health
$ etcdctl set /message hello
$ etcdctl get /message

View File

@@ -2,8 +2,10 @@
module "profiles" {
source = "../modules/profiles"
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
container_linux_version = "1298.7.0"
container_linux_version = "1576.5.0"
container_linux_channel = "stable"
install_disk = "${var.install_disk}"
container_linux_oem = "${var.container_linux_oem}"
}
// Install Container Linux to disk before provisioning
@@ -12,13 +14,9 @@ resource "matchbox_group" "default" {
profile = "${module.profiles.cached-container-linux-install}"
// No selector, matches all nodes
metadata {
container_linux_channel = "stable"
container_linux_version = "1298.7.0"
container_linux_oem = "${var.container_linux_oem}"
ignition_endpoint = "${var.matchbox_http_endpoint}/ignition"
baseurl = "${var.matchbox_http_endpoint}/assets/coreos"
ssh_authorized_key = "${var.ssh_authorized_key}"
ssh_authorized_key = "${var.ssh_authorized_key}"
}
}

View File

@@ -2,5 +2,6 @@ matchbox_http_endpoint = "http://matchbox.example.com:8080"
matchbox_rpc_endpoint = "matchbox.example.com:8081"
# ssh_authorized_key = "ADD ME"
# Optional
# Optional (defaults)
# install_disk = "/dev/sda"
# container_linux_oem = ""

View File

@@ -13,8 +13,16 @@ variable "ssh_authorized_key" {
description = "SSH public key to set as an authorized_key on machines"
}
# optional
variable "install_disk" {
type = "string"
default = "/dev/sda"
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
}
variable "container_linux_oem" {
type = "string"
default = ""
type = "string"
default = ""
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
}

View File

@@ -4,7 +4,7 @@ Matchbox provides Terraform [modules](https://www.terraform.io/docs/modules/usag
```hcl
module "profiles" {
source = "git::https://github.com/coreos/matchbox.git//examples/terraform/modules/profiles?ref=4451425db8f230012c36de6e6628c72aa34e1c10"
source = "git::https://github.com/coreos/matchbox.git//examples/terraform/modules/profiles?ref=08f4e9908b167fba608e60169ec6a803df9db37f"
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
container_linux_version = "${var.container_linux_version}"
container_linux_channel = "${var.container_linux_channel}"
@@ -27,9 +27,6 @@ Available modules:
| | cached-container-linux-install | Install Container Linux to disk from matchbox assets cache |
| | etcd3 | Provision an etcd3 peer node |
| | etcd3-gateway | Provision an etcd3 gateway node |
| | bootkube-controller | Provision a self-hosted Kubernetes controller/master node |
| | bootkube-worker | Provisioner a self-hosted Kubernetes worker node |
| bootkube | | Creates a multi-controller, multi-worker self-hosted Kubernetes cluster |
## Customization

View File

@@ -1,12 +0,0 @@
# Self-hosted Kubernetes assets (kubeconfig, manifests)
module "bootkube" {
source = "git::https://github.com/dghubble/bootkube-terraform.git?ref=3720aff28a465987e079dcd74fe3b6d5046d7010"
cluster_name = "${var.cluster_name}"
api_servers = ["${var.k8s_domain_name}"]
etcd_servers = ["http://127.0.0.1:2379"]
asset_dir = "${var.asset_dir}"
pod_cidr = "${var.pod_cidr}"
service_cidr = "${var.service_cidr}"
experimental_self_hosted_etcd = "${var.experimental_self_hosted_etcd}"
}

View File

@@ -1,61 +0,0 @@
// Install Container Linux to disk
resource "matchbox_group" "container-linux-install" {
count = "${length(var.controller_names) + length(var.worker_names)}"
name = "${format("container-linux-install-%s", element(concat(var.controller_names, var.worker_names), count.index))}"
profile = "${module.profiles.cached-container-linux-install}"
selector {
mac = "${element(concat(var.controller_macs, var.worker_macs), count.index)}"
}
metadata {
container_linux_channel = "${var.container_linux_channel}"
container_linux_version = "${var.container_linux_version}"
container_linux_oem = "${var.container_linux_oem}"
ignition_endpoint = "${var.matchbox_http_endpoint}/ignition"
baseurl = "${var.matchbox_http_endpoint}/assets/coreos"
ssh_authorized_key = "${var.ssh_authorized_key}"
}
}
resource "matchbox_group" "controller" {
count = "${length(var.controller_names)}"
name = "${format("%s-%s", var.cluster_name, element(var.controller_names, count.index))}"
profile = "${module.profiles.bootkube-controller}"
selector {
mac = "${element(var.controller_macs, count.index)}"
os = "installed"
}
metadata {
domain_name = "${element(var.controller_domains, count.index)}"
etcd_name = "${element(var.controller_names, count.index)}"
etcd_initial_cluster = "${join(",", formatlist("%s=http://%s:2380", var.controller_names, var.controller_domains))}"
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
k8s_etcd_service_ip = "${module.bootkube.etcd_service_ip}"
ssh_authorized_key = "${var.ssh_authorized_key}"
}
}
resource "matchbox_group" "worker" {
count = "${length(var.worker_names)}"
name = "${format("%s-%s", var.cluster_name, element(var.worker_names, count.index))}"
profile = "${module.profiles.bootkube-worker}"
selector {
mac = "${element(var.worker_macs, count.index)}"
os = "installed"
}
metadata {
domain_name = "${element(var.worker_domains, count.index)}"
etcd_endpoints = "${join(",", formatlist("%s:2379", var.controller_domains))}"
etcd_on_host = "${var.experimental_self_hosted_etcd ? "false" : "true"}"
k8s_dns_service_ip = "${module.bootkube.kube_dns_service_ip}"
k8s_etcd_service_ip = "${module.bootkube.etcd_service_ip}"
ssh_authorized_key = "${var.ssh_authorized_key}"
}
}

View File

@@ -1,7 +0,0 @@
// Create common profiles
module "profiles" {
source = "../profiles"
matchbox_http_endpoint = "${var.matchbox_http_endpoint}"
container_linux_version = "${var.container_linux_version}"
container_linux_channel = "${var.container_linux_channel}"
}

View File

@@ -1,51 +0,0 @@
# Secure copy kubeconfig to all nodes to activate kubelet.service
resource "null_resource" "copy-kubeconfig" {
count = "${length(var.controller_names) + length(var.worker_names)}"
connection {
type = "ssh"
host = "${element(concat(var.controller_domains, var.worker_domains), count.index)}"
user = "core"
timeout = "60m"
}
provisioner "file" {
content = "${module.bootkube.kubeconfig}"
destination = "$HOME/kubeconfig"
}
provisioner "remote-exec" {
inline = [
"sudo mv /home/core/kubeconfig /etc/kubernetes/kubeconfig",
]
}
}
# Secure copy bootkube assets to ONE controller and start bootkube to perform
# one-time self-hosted cluster bootstrapping.
resource "null_resource" "bootkube-start" {
# Without depends_on, this remote-exec may start before the kubeconfig copy.
# Terraform only does one task at a time, so it would try to bootstrap
# Kubernetes and Tectonic while no Kubelets are running. Ensure all nodes
# receive a kubeconfig before proceeding with bootkube and tectonic.
depends_on = ["null_resource.copy-kubeconfig"]
connection {
type = "ssh"
host = "${element(var.controller_domains, 0)}"
user = "core"
timeout = "60m"
}
provisioner "file" {
source = "${var.asset_dir}"
destination = "$HOME/assets"
}
provisioner "remote-exec" {
inline = [
"sudo mv /home/core/assets /opt/bootkube",
"sudo systemctl start bootkube",
]
}
}

View File

@@ -1,89 +0,0 @@
variable "matchbox_http_endpoint" {
type = "string"
description = "Matchbox HTTP read-only endpoint (e.g. http://matchbox.example.com:8080)"
}
variable "container_linux_channel" {
type = "string"
description = "Container Linux channel corresponding to the container_linux_version"
}
variable "container_linux_version" {
type = "string"
description = "Container Linux version of the kernel/initrd to PXE or the image to install"
}
variable "cluster_name" {
type = "string"
description = "Cluster name"
}
variable "ssh_authorized_key" {
type = "string"
description = "SSH public key to set as an authorized_key on machines"
}
# Machines
# Terraform's crude "type system" does properly support lists of maps so we do this.
variable "controller_names" {
type = "list"
}
variable "controller_macs" {
type = "list"
}
variable "controller_domains" {
type = "list"
}
variable "worker_names" {
type = "list"
}
variable "worker_macs" {
type = "list"
}
variable "worker_domains" {
type = "list"
}
# bootkube assets
variable "k8s_domain_name" {
description = "Controller DNS name which resolves to a controller instance. Workers and kubeconfig's will communicate with this endpoint (e.g. cluster.example.com)"
type = "string"
}
variable "asset_dir" {
description = "Path to a directory where generated assets should be placed (contains secrets)"
type = "string"
}
variable "pod_cidr" {
description = "CIDR IP range to assign Kubernetes pods"
type = "string"
default = "10.2.0.0/16"
}
variable "service_cidr" {
description = <<EOD
CIDR IP range to assign Kubernetes services.
The 1st IP will be reserved for kube_apiserver, the 10th IP will be reserved for kube-dns, the 15th IP will be reserved for self-hosted etcd, and the 200th IP will be reserved for bootstrap self-hosted etcd.
EOD
type = "string"
default = "10.3.0.0/16"
}
variable "container_linux_oem" {
type = "string"
default = ""
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
}
variable "experimental_self_hosted_etcd" {
default = "false"
description = "Create self-hosted etcd cluster as pods on Kubernetes, instead of on-hosts"
}

View File

@@ -1,174 +0,0 @@
---
systemd:
units:
{{ if eq .etcd_on_host "true" }}
- name: etcd-member.service
enable: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.1.6"
Environment="ETCD_NAME={{.etcd_name}}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379"
Environment="ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380"
Environment="ETCD_INITIAL_CLUSTER={{.etcd_initial_cluster}}"
Environment="ETCD_STRICT_RECONFIG_CHECK=true"
{{ end }}
- name: docker.service
enable: true
- name: locksmithd.service
dropins:
- name: 40-etcd-lock.conf
contents: |
[Service]
Environment="REBOOT_STRATEGY=etcd-lock"
{{ if eq .etcd_on_host "false" -}}
Environment="LOCKSMITHD_ENDPOINT=http://{{.k8s_etcd_service_ip}}:2379"
{{ end }}
- name: kubelet.path
enable: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
contents: |
[Unit]
Description=Kubelet via Hyperkube ACI
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log"
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--client-ca-file=/etc/kubernetes/ca.crt \
--anonymous-auth=false \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--hostname-override={{.domain_name}} \
--node-labels=node-role.kubernetes.io/master \
--register-with-taints=node-role.kubernetes.io/master=:NoSchedule \
--cluster_dns={{.k8s_dns_service_ip}} \
--cluster_domain=cluster.local
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
- name: bootkube.service
contents: |
[Unit]
Description=Bootstrap a Kubernetes control plane with a temp api-server
ConditionPathExists=!/opt/bootkube/init_bootkube.done
[Service]
Type=oneshot
RemainAfterExit=true
WorkingDirectory=/opt/bootkube
ExecStart=/opt/bootkube/bootkube-start
ExecStartPost=/bin/touch /opt/bootkube/init_bootkube.done
storage:
{{ if index . "pxe" }}
disks:
- device: /dev/sda
wipe_table: true
partitions:
- label: ROOT
filesystems:
- name: root
mount:
device: "/dev/sda1"
format: "ext4"
create:
force: true
options:
- "-LROOT"
{{end}}
files:
- path: /etc/kubernetes/kubelet.env
filesystem: root
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
- path: /etc/hostname
filesystem: root
mode: 0644
contents:
inline:
{{.domain_name}}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
- path: /opt/bootkube/bootkube-start
filesystem: root
mode: 0544
user:
id: 500
group:
id: 500
contents:
inline: |
#!/bin/bash
# Wrapper for bootkube start
set -e
# Move experimental manifests
[ -d /opt/bootkube/assets/experimental/manifests ] && mv /opt/bootkube/assets/experimental/manifests/* /opt/bootkube/assets/manifests && rm -r /opt/bootkube/assets/experimental/manifests
[ -d /opt/bootkube/assets/experimental/bootstrap-manifests ] && mv /opt/bootkube/assets/experimental/bootstrap-manifests/* /opt/bootkube/assets/bootstrap-manifests && rm -r /opt/bootkube/assets/experimental/bootstrap-manifests
BOOTKUBE_ACI="${BOOTKUBE_ACI:-quay.io/coreos/bootkube}"
BOOTKUBE_VERSION="${BOOTKUBE_VERSION:-v0.4.4}"
BOOTKUBE_ASSETS="${BOOTKUBE_ASSETS:-/opt/bootkube/assets}"
exec /usr/bin/rkt run \
--trust-keys-from-https \
--volume assets,kind=host,source=$BOOTKUBE_ASSETS \
--mount volume=assets,target=/assets \
--volume bootstrap,kind=host,source=/etc/kubernetes \
--mount volume=bootstrap,target=/etc/kubernetes \
$RKT_OPTS \
${BOOTKUBE_ACI}:${BOOTKUBE_VERSION} \
--net=host \
--dns=host \
--exec=/bootkube -- start --asset-dir=/assets "$@"
passwd:
users:
- name: core
ssh_authorized_keys:
- {{.ssh_authorized_key}}

View File

@@ -1,131 +0,0 @@
---
systemd:
units:
{{ if eq .etcd_on_host "true" }}
- name: etcd-member.service
enable: true
dropins:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.1.6"
ExecStart=
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
--listen-addr=127.0.0.1:2379 \
--endpoints={{.etcd_endpoints}}
{{ end }}
- name: docker.service
enable: true
- name: locksmithd.service
dropins:
- name: 40-etcd-lock.conf
contents: |
[Service]
Environment="REBOOT_STRATEGY=etcd-lock"
{{ if eq .etcd_on_host "false" -}}
Environment="LOCKSMITHD_ENDPOINT=http://{{.k8s_etcd_service_ip}}:2379"
{{ end }}
- name: kubelet.path
enable: true
contents: |
[Unit]
Description=Watch for kubeconfig
[Path]
PathExists=/etc/kubernetes/kubeconfig
[Install]
WantedBy=multi-user.target
- name: wait-for-dns.service
enable: true
contents: |
[Unit]
Description=Wait for DNS entries
Wants=systemd-resolved.service
Before=kubelet.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/bin/sh -c 'while ! /usr/bin/grep '^[^#[:space:]]' /etc/resolv.conf > /dev/null; do sleep 1; done'
[Install]
RequiredBy=kubelet.service
- name: kubelet.service
contents: |
[Unit]
Description=Kubelet via Hyperkube ACI
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.env
Environment="RKT_RUN_ARGS=--uuid-file-save=/var/run/kubelet-pod.uuid \
--volume=resolv,kind=host,source=/etc/resolv.conf \
--mount volume=resolv,target=/etc/resolv.conf \
--volume var-lib-cni,kind=host,source=/var/lib/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
--volume var-log,kind=host,source=/var/log \
--mount volume=var-log,target=/var/log"
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
ExecStartPre=/bin/mkdir -p /etc/kubernetes/inactive-manifests
ExecStartPre=/bin/mkdir -p /var/lib/cni
ExecStartPre=/usr/bin/bash -c "grep 'certificate-authority-data' /etc/kubernetes/kubeconfig | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt"
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet-pod.uuid
ExecStart=/usr/lib/coreos/kubelet-wrapper \
--kubeconfig=/etc/kubernetes/kubeconfig \
--require-kubeconfig \
--client-ca-file=/etc/kubernetes/ca.crt \
--anonymous-auth=false \
--cni-conf-dir=/etc/kubernetes/cni/net.d \
--network-plugin=cni \
--lock-file=/var/run/lock/kubelet.lock \
--exit-on-lock-contention \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged \
--hostname-override={{.domain_name}} \
--node-labels=node-role.kubernetes.io/node \
--cluster_dns={{.k8s_dns_service_ip}} \
--cluster_domain=cluster.local
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet-pod.uuid
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
storage:
{{ if index . "pxe" }}
disks:
- device: /dev/sda
wipe_table: true
partitions:
- label: ROOT
filesystems:
- name: root
mount:
device: "/dev/sda1"
format: "ext4"
create:
force: true
options:
- "-LROOT"
{{end}}
files:
- path: /etc/kubernetes/kubelet.env
filesystem: root
mode: 0644
contents:
inline: |
KUBELET_IMAGE_URL=quay.io/coreos/hyperkube
KUBELET_IMAGE_TAG=v1.6.4_coreos.0
- path: /etc/hostname
filesystem: root
mode: 0644
contents:
inline:
{{.domain_name}}
- path: /etc/sysctl.d/max-user-watches.conf
filesystem: root
contents:
inline: |
fs.inotify.max_user_watches=16184
passwd:
users:
- name: core
ssh_authorized_keys:
- {{.ssh_authorized_key}}

View File

@@ -20,8 +20,14 @@ storage:
contents:
inline: |
#!/bin/bash -ex
curl "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
coreos-install -d /dev/sda -C {{.container_linux_channel}} -V {{.container_linux_version}} -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}} {{if index . "container_linux_oem"}}-o {{.container_linux_oem}}{{end}}
curl --retry 10 "${ignition_endpoint}?{{.request.raw_query}}&os=installed" -o ignition.json
coreos-install \
-d ${install_disk} \
-C ${container_linux_channel} \
-V ${container_linux_version} \
-o "${container_linux_oem}" \
${baseurl_flag} \
-i ignition.json
udevadm settle
systemctl reboot
passwd:

View File

@@ -7,7 +7,7 @@ systemd:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.1.6"
Environment="ETCD_IMAGE_TAG=v3.2.0"
ExecStart=
ExecStart=/usr/lib/coreos/etcd-wrapper gateway start \
--listen-addr=127.0.0.1:2379 \

View File

@@ -7,7 +7,7 @@ systemd:
- name: 40-etcd-cluster.conf
contents: |
[Service]
Environment="ETCD_IMAGE_TAG=v3.1.6"
Environment="ETCD_IMAGE_TAG=v3.2.0"
Environment="ETCD_NAME={{.etcd_name}}"
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{.domain_name}}:2379"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{.domain_name}}:2380"

View File

@@ -13,11 +13,3 @@ output "etcd3" {
output "etcd3-gateway" {
value = "${matchbox_profile.etcd3-gateway.name}"
}
output "bootkube-controller" {
value = "${matchbox_profile.bootkube-controller.name}"
}
output "bootkube-worker" {
value = "${matchbox_profile.bootkube-worker.name}"
}

View File

@@ -8,13 +8,29 @@ resource "matchbox_profile" "container-linux-install" {
]
args = [
"initrd=coreos_production_pxe_image.cpio.gz",
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
]
container_linux_config = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
container_linux_config = "${data.template_file.container-linux-install-config.rendered}"
}
data "template_file" "container-linux-install-config" {
template = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
vars {
container_linux_channel = "${var.container_linux_channel}"
container_linux_version = "${var.container_linux_version}"
ignition_endpoint = "${format("%s/ignition", var.matchbox_http_endpoint)}"
install_disk = "${var.install_disk}"
container_linux_oem = "${var.container_linux_oem}"
# only cached-container-linux profile adds -b baseurl
baseurl_flag = ""
}
}
// Container Linux Install profile (from matchbox /assets cache)
@@ -28,13 +44,29 @@ resource "matchbox_profile" "cached-container-linux-install" {
]
args = [
"initrd=coreos_production_pxe_image.cpio.gz",
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",
"console=ttyS0",
]
container_linux_config = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
container_linux_config = "${data.template_file.cached-container-linux-install-config.rendered}"
}
data "template_file" "cached-container-linux-install-config" {
template = "${file("${path.module}/cl/container-linux-install.yaml.tmpl")}"
vars {
container_linux_channel = "${var.container_linux_channel}"
container_linux_version = "${var.container_linux_version}"
ignition_endpoint = "${format("%s/ignition", var.matchbox_http_endpoint)}"
install_disk = "${var.install_disk}"
container_linux_oem = "${var.container_linux_oem}"
# profile uses -b baseurl to install from matchbox cache
baseurl_flag = "-b ${var.matchbox_http_endpoint}/assets/coreos"
}
}
// etcd3 profile
@@ -48,15 +80,3 @@ resource "matchbox_profile" "etcd3-gateway" {
name = "etcd3-gateway"
container_linux_config = "${file("${path.module}/cl/etcd3-gateway.yaml.tmpl")}"
}
// Self-hosted Kubernetes (bootkube) Controller profile
resource "matchbox_profile" "bootkube-controller" {
name = "bootkube-controller"
container_linux_config = "${file("${path.module}/cl/bootkube-controller.yaml.tmpl")}"
}
// Self-hosted Kubernetes (bootkube) Worker profile
resource "matchbox_profile" "bootkube-worker" {
name = "bootkube-worker"
container_linux_config = "${file("${path.module}/cl/bootkube-worker.yaml.tmpl")}"
}

View File

@@ -12,3 +12,17 @@ variable "container_linux_channel" {
type = "string"
description = "Container Linux channel corresponding to the container_linux_version"
}
# optional
variable "install_disk" {
type = "string"
default = "/dev/sda"
description = "Disk device to which the install profiles should install Container Linux (e.g. /dev/sda)"
}
variable "container_linux_oem" {
type = "string"
default = ""
description = "Specify an OEM image id to use as base for the installation (e.g. ami, vmware_raw, xen) or leave blank for the default image"
}

View File

@@ -20,7 +20,7 @@ storage:
contents:
inline: |
#!/bin/bash -ex
curl "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
curl --retry 10 "{{.ignition_endpoint}}?{{.request.raw_query}}&os=installed" -o ignition.json
coreos-install -d /dev/sda -C stable -V current -i ignition.json {{if index . "baseurl"}}-b {{.baseurl}}{{end}}
udevadm settle
systemctl reboot

View File

@@ -10,7 +10,7 @@ resource "matchbox_group" "default" {
}
}
// Match machines which have CoreOS installed
// Match machines which have CoreOS Container Linux installed
resource "matchbox_group" "node1" {
name = "node1"
profile = "${matchbox_profile.simple.name}"

View File

@@ -8,6 +8,7 @@ resource "matchbox_profile" "coreos-install" {
]
args = [
"initrd=coreos_production_pxe_image.cpio.gz",
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
"coreos.first_boot=yes",
"console=tty0",

20
glide.lock generated
View File

@@ -1,20 +1,23 @@
hash: 205de0b66ed059a1f10d3fb36c7d465439818123940a9aaa68ddc71cc3bbfddd
updated: 2017-04-17T17:09:48.864562358-07:00
hash: b404b094b7ff5d83fac658393148a51f2b3f74ce1026502524be71772c30e9b2
updated: 2017-11-06T13:24:02.819805752-08:00
imports:
- name: github.com/ajeddeloh/go-json
version: 73d058cf8437a1989030afe571eeab9f90eebbbd
- name: github.com/ajeddeloh/yaml
version: 1072abfea31191db507785e2e0c1b8d1440d35a5
version: 6b94386aeefd8c4b8470aee72bfca084c2f91da9
- name: github.com/alecthomas/units
version: 6b4e7dc5e3143b85ea77909c72caf89416fc2915
- name: github.com/camlistore/camlistore
version: 9106ce829629773474c689b34aacd7d3aaa99426
- name: github.com/coreos/container-linux-config-transpiler
version: 12554ca0a5ce8ea4a6c594242ccb23d8b9bff493
version: be4cb16b0aaf0f6b4fdf63b8b2a081397276bf0f
subpackages:
- config
- config/astyaml
- config/platform
- config/templating
- config/types
- config/types/util
- name: github.com/coreos/coreos-cloudinit
version: 5be99bf577f2768193c7fb587ef5a8806c1503cf
subpackages:
@@ -29,7 +32,7 @@ imports:
- journal
- unit
- name: github.com/coreos/ignition
version: d75d0aa3bf307f0954ce4ea8cac56dacec8d16ce
version: 01c039a5ce59acd39e5741713e59abfcb74d0782
subpackages:
- config
- config/types
@@ -37,8 +40,11 @@ imports:
- config/v1/types
- config/v2_0
- config/v2_0/types
- config/v2_1
- config/v2_1/types
- config/validate
- config/validate/astjson
- config/validate/astnode
- config/validate/report
- name: github.com/coreos/pkg
version: 66fe44ad037ccb80329115cb4db0dbe8e9beb03a
@@ -80,7 +86,7 @@ imports:
subpackages:
- errorutil
- name: golang.org/x/crypto
version: 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
version: 7e9105388ebff089b3f99f0ef676ea55a6da3a7e
subpackages:
- cast5
- openpgp
@@ -98,7 +104,7 @@ imports:
- internal/timeseries
- trace
- name: golang.org/x/sys
version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
subpackages:
- unix
- name: google.golang.org/grpc

View File

@@ -19,13 +19,13 @@ import:
- transport
# Container Linux Config Transpiler and Ignition
- package: github.com/coreos/container-linux-config-transpiler
version: v0.2.2
version: v0.5.0
subpackages:
- config
- config/types
- config/templating
- package: github.com/coreos/ignition
version: d75d0aa3bf307f0954ce4ea8cac56dacec8d16ce
version: v0.19.0
subpackages:
- config
- config/types
@@ -35,7 +35,7 @@ import:
- config/validate/astjson
- config/validate/report
- package: github.com/ajeddeloh/yaml
version: 1072abfea31191db507785e2e0c1b8d1440d35a5
version: 6b94386aeefd8c4b8470aee72bfca084c2f91da9
- package: github.com/vincent-petithory/dataurl
version: 9a301d65acbb728fcc3ace14f45f511a4cfeea9c
- package: github.com/alecthomas/units
@@ -59,7 +59,7 @@ import:
- package: github.com/spf13/cobra
version: 65a708cee0a4424f4e353d031ce440643e312f92
- package: golang.org/x/crypto
version: 5dc8cb4b8a8eb076cbb5a06bc3b8682c15bdbbd3
version: 7e9105388ebff089b3f99f0ef676ea55a6da3a7e
subpackages:
- cast5
- openpgp

16
matchbox/cli/generic.go Normal file
View File

@@ -0,0 +1,16 @@
package cli
import (
"github.com/spf13/cobra"
)
// genericCmd represents the generic command
var genericCmd = &cobra.Command{
Use: "generic",
Short: "Manage Generic templates",
Long: `Manage Generic templates`,
}
func init() {
RootCmd.AddCommand(genericCmd)
}

View File

@@ -0,0 +1,48 @@
package cli
import (
"io/ioutil"
"path/filepath"
"context"
"github.com/spf13/cobra"
pb "github.com/coreos/matchbox/matchbox/server/serverpb"
)
// genericPutCmd creates and updates Generic templates.
var (
genericPutCmd = &cobra.Command{
Use: "create --file FILENAME",
Short: "Create an Generic template",
Long: `Create an Generic template`,
Run: runGenericPutCmd,
}
)
func init() {
genericCmd.AddCommand(genericPutCmd)
genericPutCmd.Flags().StringVarP(&flagFilename, "filename", "f", "", "filename to use to create an Generic template")
genericPutCmd.MarkFlagRequired("filename")
}
func runGenericPutCmd(cmd *cobra.Command, args []string) {
if len(flagFilename) == 0 {
cmd.Help()
return
}
if err := validateArgs(cmd, args); err != nil {
return
}
client := mustClientFromCmd(cmd)
config, err := ioutil.ReadFile(flagFilename)
if err != nil {
exitWithError(ExitError, err)
}
req := &pb.GenericPutRequest{Name: filepath.Base(flagFilename), Config: config}
_, err = client.Generic.GenericPut(context.TODO(), req)
if err != nil {
exitWithError(ExitError, err)
}
}

View File

@@ -31,7 +31,7 @@ func runProfileDescribeCmd(cmd *cobra.Command, args []string) {
tw := newTabWriter(os.Stdout)
defer tw.Flush()
// legend
fmt.Fprintf(tw, "ID\tNAME\tIGNITION\tCLOUD\tKERNEL\tINITRD\tCMDLINE\n")
fmt.Fprintf(tw, "ID\tNAME\tIGNITION\tCLOUD\tKERNEL\tINITRD\tARGS\n")
client := mustClientFromCmd(cmd)
request := &pb.ProfileGetRequest{
@@ -42,5 +42,5 @@ func runProfileDescribeCmd(cmd *cobra.Command, args []string) {
return
}
p := resp.Profile
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\t%#v\n", p.Id, p.Name, p.IgnitionId, p.CloudId, p.Boot.Kernel, p.Boot.Initrd, p.Boot.Cmdline)
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", p.Id, p.Name, p.IgnitionId, p.CloudId, p.Boot.Kernel, p.Boot.Initrd, p.Boot.Args)
}

View File

@@ -3,6 +3,8 @@ package client
import (
"crypto/tls"
"errors"
"fmt"
"net"
"time"
"google.golang.org/grpc"
@@ -31,6 +33,8 @@ type Client struct {
Groups rpcpb.GroupsClient
Profiles rpcpb.ProfilesClient
Ignition rpcpb.IgnitionClient
Generic rpcpb.GenericClient
Select rpcpb.SelectClient
conn *grpc.ClientConn
}
@@ -39,6 +43,11 @@ func New(config *Config) (*Client, error) {
if len(config.Endpoints) == 0 {
return nil, errNoEndpoints
}
for _, endpoint := range config.Endpoints {
if _, _, err := net.SplitHostPort(endpoint); err != nil {
return nil, fmt.Errorf("client: invalid host:port endpoint: %v", err)
}
}
return newClient(config)
}
@@ -57,6 +66,8 @@ func newClient(config *Config) (*Client, error) {
Groups: rpcpb.NewGroupsClient(conn),
Profiles: rpcpb.NewProfilesClient(conn),
Ignition: rpcpb.NewIgnitionClient(conn),
Generic: rpcpb.NewGenericClient(conn),
Select: rpcpb.NewSelectClient(conn),
}
return client, nil
}

View File

@@ -14,3 +14,20 @@ func TestNew_MissingEndpoints(t *testing.T) {
assert.Nil(t, client)
assert.Equal(t, errNoEndpoints, err)
}
// gRPC expects host:port with no scheme (e.g. matchbox.example.com:8081)
func TestNew_InvalidEndpoints(t *testing.T) {
invalid := []string{
"matchbox.example.com",
"http://matchbox.example.com:8081",
"https://matchbox.example.com:8081",
}
for _, endpoint := range invalid {
client, err := New(&Config{
Endpoints: []string{endpoint},
})
assert.Nil(t, client)
assert.Error(t, err)
}
}

Some files were not shown because too many files have changed in this diff Show More