mirror of
https://github.com/poseidon/matchbox.git
synced 2026-03-18 06:30:42 +00:00
Compare commits
585 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e78150218f | ||
|
|
cccb588855 | ||
|
|
9a177e83d7 | ||
|
|
dfd0457e03 | ||
|
|
9de30aea59 | ||
|
|
910ee6f18c | ||
|
|
0994b860b5 | ||
|
|
78f7e8d492 | ||
|
|
e804ace9e2 | ||
|
|
0012d691f4 | ||
|
|
e170c600b3 | ||
|
|
4f229d5d9a | ||
|
|
3cd8ba0a05 | ||
|
|
74f13a2f86 | ||
|
|
4eee84b17d | ||
|
|
845d1d0adc | ||
|
|
5b1c790d0c | ||
|
|
70400b7dd0 | ||
|
|
c6ebdfeb92 | ||
|
|
99acdf4c6b | ||
|
|
be057ed9c8 | ||
|
|
8bb99143e8 | ||
|
|
c802ce5805 | ||
|
|
c4e82c03a4 | ||
|
|
29c93046ef | ||
|
|
34e981dc7c | ||
|
|
3a88a663c3 | ||
|
|
572c8d26eb | ||
|
|
c22b273548 | ||
|
|
c3ef870ce5 | ||
|
|
e9ce7325ab | ||
|
|
948bdee165 | ||
|
|
50e923730e | ||
|
|
1799c8e23e | ||
|
|
454ae972a1 | ||
|
|
fe0c3438fd | ||
|
|
65b410e20b | ||
|
|
dced573acb | ||
|
|
4888c04dee | ||
|
|
4e9d542a87 | ||
|
|
08f4e9908b | ||
|
|
dd96f58417 | ||
|
|
f5ef2d156b | ||
|
|
f673d48007 | ||
|
|
7a58d944d8 | ||
|
|
5d975ec42a | ||
|
|
2404d34b0e | ||
|
|
c9b9711bca | ||
|
|
ae524f57f2 | ||
|
|
f26224c57d | ||
|
|
2c063a4674 | ||
|
|
7d5656ffe3 | ||
|
|
a683e8261e | ||
|
|
c75fc8f88e | ||
|
|
b10c777729 | ||
|
|
5992ba6ad5 | ||
|
|
ca223f800b | ||
|
|
1246d5a0db | ||
|
|
4f7dd0942c | ||
|
|
3e6aa4ee73 | ||
|
|
9c39221b71 | ||
|
|
4103461778 | ||
|
|
9a6d815978 | ||
|
|
6aa8759bfd | ||
|
|
d5027950e2 | ||
|
|
85a2a6b252 | ||
|
|
4bc5fcdc5e | ||
|
|
2f4d5b95e4 | ||
|
|
257f2fa553 | ||
|
|
7829c14d52 | ||
|
|
ce72fb72a0 | ||
|
|
41d5db4723 | ||
|
|
dfd08e48e5 | ||
|
|
347e142db9 | ||
|
|
b63e9b2589 | ||
|
|
4a32b0cd59 | ||
|
|
b0b8d97539 | ||
|
|
581be69da7 | ||
|
|
dc75fcc869 | ||
|
|
fc3e688c97 | ||
|
|
f07dc758c4 | ||
|
|
d2827d7ed0 | ||
|
|
692bf81df8 | ||
|
|
cfcec6ac03 | ||
|
|
592969134c | ||
|
|
2b605c8d9c | ||
|
|
63a95188be | ||
|
|
5aa301b72d | ||
|
|
7647a5d095 | ||
|
|
06f80fa003 | ||
|
|
01a767ab3e | ||
|
|
6be5c0f59c | ||
|
|
5efc514097 | ||
|
|
757f46e96f | ||
|
|
5aeb2d1d3d | ||
|
|
1119bb22f0 | ||
|
|
6195ae377e | ||
|
|
d7783a94e9 | ||
|
|
4228ccb330 | ||
|
|
e5d5280658 | ||
|
|
46f0477614 | ||
|
|
0e4265b2bc | ||
|
|
18de74e85b | ||
|
|
31040e9729 | ||
|
|
f0a4cfd1cb | ||
|
|
aeca5b08f9 | ||
|
|
7c1b9b17dc | ||
|
|
0e6ce19172 | ||
|
|
281fd5226a | ||
|
|
fb0ee0f05a | ||
|
|
7def0d7e86 | ||
|
|
1c076875c2 | ||
|
|
7ba0f1476b | ||
|
|
ec6844a43a | ||
|
|
6857c1319a | ||
|
|
cb6bb3c90d | ||
|
|
5c5be5ce5b | ||
|
|
4cbf2b7448 | ||
|
|
d781e43212 | ||
|
|
3ca88334d2 | ||
|
|
c7a649c731 | ||
|
|
d03f256976 | ||
|
|
9ecfcac0b9 | ||
|
|
035b01634f | ||
|
|
e8d3e8c70c | ||
|
|
cc490ff55d | ||
|
|
df6354ad45 | ||
|
|
3d8a3777f0 | ||
|
|
dfee550522 | ||
|
|
07e9676457 | ||
|
|
a69f6dd2d8 | ||
|
|
26d8b7d480 | ||
|
|
2c02549cd6 | ||
|
|
3c999d27e9 | ||
|
|
52b317dff9 | ||
|
|
97985b213b | ||
|
|
1ba353e5b6 | ||
|
|
398d12e148 | ||
|
|
be8fd3d488 | ||
|
|
27d1139a07 | ||
|
|
ee3445454e | ||
|
|
170f8c09ec | ||
|
|
e10525ded0 | ||
|
|
4c47adf390 | ||
|
|
ce3154cae9 | ||
|
|
5e54960a92 | ||
|
|
e008b8ea5e | ||
|
|
b636fc7a3d | ||
|
|
30cf06853d | ||
|
|
61377d2955 | ||
|
|
a7ba7714f5 | ||
|
|
ff916686e7 | ||
|
|
fbc4b39c59 | ||
|
|
be46b389bf | ||
|
|
a14e6c8bb9 | ||
|
|
c03b7a9627 | ||
|
|
ac40eeedb5 | ||
|
|
9e23f3a86d | ||
|
|
d1baa3fb65 | ||
|
|
c915fc2b52 | ||
|
|
6f02107448 | ||
|
|
ff06990edb | ||
|
|
9bc6edc65b | ||
|
|
5b8006ae35 | ||
|
|
ff5cd0468e | ||
|
|
4d9bd82c12 | ||
|
|
882793f230 | ||
|
|
858e1bda73 | ||
|
|
cfbb9cebd0 | ||
|
|
edbe5bab20 | ||
|
|
299701e7ea | ||
|
|
a20720a0d4 | ||
|
|
5a9c24ceb3 | ||
|
|
82af3f747d | ||
|
|
e955fecd30 | ||
|
|
0c1e20db27 | ||
|
|
8d6d0397ff | ||
|
|
abc7eb8dfb | ||
|
|
149f441ad8 | ||
|
|
cf43908a72 | ||
|
|
523b15ed13 | ||
|
|
aac270e937 | ||
|
|
1cfdce2970 | ||
|
|
9d3d08a26f | ||
|
|
b176de805e | ||
|
|
009b44b25d | ||
|
|
57e473b6f5 | ||
|
|
66cd8da417 | ||
|
|
50a3d11414 | ||
|
|
6fa13007c8 | ||
|
|
500a7b25e1 | ||
|
|
951e5ec4a3 | ||
|
|
f92743fa57 | ||
|
|
d84bb8e398 | ||
|
|
d54562f429 | ||
|
|
395494c1d9 | ||
|
|
ddbe17cd31 | ||
|
|
b1a866370a | ||
|
|
b8326e6db6 | ||
|
|
7864e64fd2 | ||
|
|
89bb5125b5 | ||
|
|
cff053328d | ||
|
|
698b6f6118 | ||
|
|
23f23c1dcb | ||
|
|
51cf859587 | ||
|
|
8061f57346 | ||
|
|
8000c323b6 | ||
|
|
314a317271 | ||
|
|
d437167ebf | ||
|
|
4067702641 | ||
|
|
86c07da76e | ||
|
|
be00fdbca0 | ||
|
|
abbf7faf56 | ||
|
|
76cc8cb13c | ||
|
|
ed6dde528a | ||
|
|
1e095661ad | ||
|
|
3f70f9f2e5 | ||
|
|
dabba64850 | ||
|
|
7a2764b17b | ||
|
|
9de41e29ab | ||
|
|
0592503652 | ||
|
|
40926b6d0f | ||
|
|
859ea5888b | ||
|
|
1736af5024 | ||
|
|
c476cf8928 | ||
|
|
a47087ec6a | ||
|
|
0961e50f64 | ||
|
|
7a017c2d7d | ||
|
|
41aaad3d6f | ||
|
|
ddf1f88cb9 | ||
|
|
af8abc7dc2 | ||
|
|
0d2173e446 | ||
|
|
e9bf13963c | ||
|
|
dbba1316b2 | ||
|
|
34d0f5003a | ||
|
|
79e5240d3f | ||
|
|
46dd95da0c | ||
|
|
f6522a561b | ||
|
|
e4fdcb204e | ||
|
|
81e00d7e79 | ||
|
|
06a9a28d7c | ||
|
|
756c28f2fc | ||
|
|
cc240286f3 | ||
|
|
75e428aece | ||
|
|
51c4371e39 | ||
|
|
ef85730d69 | ||
|
|
3752ee78d5 | ||
|
|
ea9042e86e | ||
|
|
d4e33efb38 | ||
|
|
459ce2d8bc | ||
|
|
31ed8dba2f | ||
|
|
2d69b2d734 | ||
|
|
2aea18e048 | ||
|
|
c2e5196d1a | ||
|
|
47d3dbacb1 | ||
|
|
5e2adb1eda | ||
|
|
7ee68aa1a4 | ||
|
|
e1cabcf8e8 | ||
|
|
6500ed51f3 | ||
|
|
4fb3ea2c7e | ||
|
|
b1beebe855 | ||
|
|
6743944390 | ||
|
|
4451425db8 | ||
|
|
23959a4dd2 | ||
|
|
0825fd2492 | ||
|
|
bb08cd5087 | ||
|
|
a117af6500 | ||
|
|
4304ee2aa5 | ||
|
|
6d6879ca4a | ||
|
|
cf301eed45 | ||
|
|
7bbd1f651f | ||
|
|
6455528f3c | ||
|
|
a6fde5a0c6 | ||
|
|
32baac329d | ||
|
|
73d40db168 | ||
|
|
96259aa5da | ||
|
|
fed01db5a6 | ||
|
|
c8af40108f | ||
|
|
bcae94efc7 | ||
|
|
348b48d886 | ||
|
|
2bc6934e44 | ||
|
|
6a53726119 | ||
|
|
64168bc42e | ||
|
|
37b050db3e | ||
|
|
4e544a8f39 | ||
|
|
c0c43abf49 | ||
|
|
12fc4f37cc | ||
|
|
1cefbe5d97 | ||
|
|
2b96139ff7 | ||
|
|
fa5a76d9de | ||
|
|
e30f800b2b | ||
|
|
7dfb04c4af | ||
|
|
45bece3cf7 | ||
|
|
fa31b0a58c | ||
|
|
fbbd1b88f7 | ||
|
|
8aac29bdf1 | ||
|
|
d2fdc8bfab | ||
|
|
c66360bee0 | ||
|
|
c9d8fcfbc1 | ||
|
|
311f1ec7cd | ||
|
|
32d48018e1 | ||
|
|
a948a97339 | ||
|
|
3f43e4ecb6 | ||
|
|
2a83612ffb | ||
|
|
bf7c6abc1d | ||
|
|
ed57a2a04a | ||
|
|
fd2c5e303d | ||
|
|
2eed5fdf58 | ||
|
|
54f0cc51ba | ||
|
|
bd17dd07a3 | ||
|
|
f162ab8943 | ||
|
|
370790804b | ||
|
|
9a42fb0701 | ||
|
|
a93a7f12bb | ||
|
|
5eb257f2eb | ||
|
|
43ce5c1d91 | ||
|
|
6bbf4a30a6 | ||
|
|
d65b1b58ec | ||
|
|
7aaf0bce1e | ||
|
|
a0a508b16b | ||
|
|
e5f428d412 | ||
|
|
585ce50284 | ||
|
|
fcdabd2f23 | ||
|
|
ebfc9b3f57 | ||
|
|
3464e38c85 | ||
|
|
81989cc64e | ||
|
|
7e05672ee7 | ||
|
|
5cd275bdc1 | ||
|
|
1537676484 | ||
|
|
9a3347f1b5 | ||
|
|
7787c6b787 | ||
|
|
630026a1ae | ||
|
|
ca4ab1a230 | ||
|
|
1a48a51253 | ||
|
|
07e22ca6ed | ||
|
|
a79d94947f | ||
|
|
31993b2e69 | ||
|
|
00dbbd9588 | ||
|
|
c498665bdd | ||
|
|
ad5caa1eee | ||
|
|
66fb51f006 | ||
|
|
7c53dc5b60 | ||
|
|
020768834c | ||
|
|
aaa29ec6b2 | ||
|
|
d465d97201 | ||
|
|
fa23c0706f | ||
|
|
3946d9ee66 | ||
|
|
b03f62814d | ||
|
|
0d1beeb632 | ||
|
|
3ff11fad17 | ||
|
|
08504aabc5 | ||
|
|
05cc9d8f1b | ||
|
|
30600915c6 | ||
|
|
fb80af3fe5 | ||
|
|
ce34cc8fa4 | ||
|
|
033efb5ebf | ||
|
|
f3f20104aa | ||
|
|
b84e92a05a | ||
|
|
26957d6fb3 | ||
|
|
90532afa3d | ||
|
|
2849f94cd9 | ||
|
|
fa14cf8c9c | ||
|
|
d2ec4f1ced | ||
|
|
9a1d87b143 | ||
|
|
1e6b8ece14 | ||
|
|
1b86919bac | ||
|
|
58fa667888 | ||
|
|
e47227750a | ||
|
|
66f2e35616 | ||
|
|
6a12032f51 | ||
|
|
2e5375f495 | ||
|
|
5fad9943da | ||
|
|
dde6a8972f | ||
|
|
8fd4bea89b | ||
|
|
dabf0eae54 | ||
|
|
e02f8f7a9e | ||
|
|
b27e1a8afa | ||
|
|
1a93282cb5 | ||
|
|
fd2b6e1cb1 | ||
|
|
2d5bce04c1 | ||
|
|
189f790a7e | ||
|
|
cfdec8cea0 | ||
|
|
0419b2f327 | ||
|
|
e1fda6b22b | ||
|
|
697d7ec73d | ||
|
|
7fa2c96d5d | ||
|
|
1526a2edaf | ||
|
|
e024a6b7b0 | ||
|
|
5812cab050 | ||
|
|
40f13e0587 | ||
|
|
cedeb868f9 | ||
|
|
354b434ffc | ||
|
|
88f9d637b1 | ||
|
|
a0d578d547 | ||
|
|
e739b98305 | ||
|
|
ea10f41886 | ||
|
|
ed017d7a86 | ||
|
|
3337f9ef60 | ||
|
|
23216b2a97 | ||
|
|
f609b85d30 | ||
|
|
3ac3063995 | ||
|
|
11c739949f | ||
|
|
6c6e2aadaf | ||
|
|
219da4d934 | ||
|
|
b9d73c58ee | ||
|
|
c749a28662 | ||
|
|
9e3efa5229 | ||
|
|
e54418633c | ||
|
|
e56e5a3a03 | ||
|
|
e4b4f82177 | ||
|
|
79a2bf2326 | ||
|
|
2b23f4a17c | ||
|
|
89accb281f | ||
|
|
fd61297a43 | ||
|
|
f064d72c28 | ||
|
|
9165dc202b | ||
|
|
2d8977b2e1 | ||
|
|
27427dbd1b | ||
|
|
b7377f54bc | ||
|
|
d496192032 | ||
|
|
86f737ff93 | ||
|
|
50432159d7 | ||
|
|
88da59560d | ||
|
|
3073e1ed22 | ||
|
|
6320cae91e | ||
|
|
0e06878714 | ||
|
|
5ee63aebe5 | ||
|
|
4a72802a32 | ||
|
|
da95af5625 | ||
|
|
8b50a84c4a | ||
|
|
b9f0f61bd5 | ||
|
|
8823e0fc30 | ||
|
|
dcb37f1acc | ||
|
|
e1334730ce | ||
|
|
6963994942 | ||
|
|
9f27efba9b | ||
|
|
b2317ec35e | ||
|
|
eb9809ee86 | ||
|
|
112c2949fa | ||
|
|
54f0be2b8a | ||
|
|
1e367634f3 | ||
|
|
0e495c5720 | ||
|
|
377ca3b1e8 | ||
|
|
d48b8e884f | ||
|
|
6cd016d019 | ||
|
|
d654c525dd | ||
|
|
3e2593c673 | ||
|
|
8005c51d56 | ||
|
|
9124f3f461 | ||
|
|
485f7bcc99 | ||
|
|
ce381ff788 | ||
|
|
a77dd0f55b | ||
|
|
2a73345e0f | ||
|
|
de093cb7aa | ||
|
|
e62e8419cd | ||
|
|
37a3fd9b3a | ||
|
|
61eafcb861 | ||
|
|
74e5e884ec | ||
|
|
1394ee4fd8 | ||
|
|
a78c3a0f75 | ||
|
|
2bdffc7569 | ||
|
|
43cf9cba66 | ||
|
|
b492b1a23a | ||
|
|
a00437c8c4 | ||
|
|
002412df2e | ||
|
|
dc36b7858d | ||
|
|
d5c5dde2e4 | ||
|
|
7edf503807 | ||
|
|
3a07ea3ac2 | ||
|
|
e1727e6cb3 | ||
|
|
afa5068dd6 | ||
|
|
cc07099687 | ||
|
|
962474e667 | ||
|
|
91d42b9e1f | ||
|
|
60842c155c | ||
|
|
dbc081913e | ||
|
|
7a13843b21 | ||
|
|
b2735d1f41 | ||
|
|
2c68b4a0d9 | ||
|
|
f97df85304 | ||
|
|
056a29ad0f | ||
|
|
eb0e109f09 | ||
|
|
cee750cf3e | ||
|
|
faf8e37938 | ||
|
|
fb2ab2a5d9 | ||
|
|
cef7c97945 | ||
|
|
f3cb1db4bc | ||
|
|
279ede31c7 | ||
|
|
ddd78bd2e0 | ||
|
|
6692148b87 | ||
|
|
621d6fce7d | ||
|
|
82c2ec62d1 | ||
|
|
ee15dae003 | ||
|
|
e739c3adfa | ||
|
|
2a11b387fb | ||
|
|
1ed777edb2 | ||
|
|
4daf997a73 | ||
|
|
640f734e50 | ||
|
|
c53062d491 | ||
|
|
abdb74f3b2 | ||
|
|
9f791af195 | ||
|
|
eae23dc30c | ||
|
|
197501c04a | ||
|
|
cbbd2a4a8a | ||
|
|
f0465c0d0b | ||
|
|
7b1640b1c6 | ||
|
|
b3dab0aa98 | ||
|
|
32b8a1108d | ||
|
|
bd131fc60d | ||
|
|
14c1a37e71 | ||
|
|
f6aec67eb8 | ||
|
|
dde306dec2 | ||
|
|
c4f46f1db2 | ||
|
|
27bd21eefa | ||
|
|
37ef166c15 | ||
|
|
34c7d01997 | ||
|
|
9b364b8efa | ||
|
|
94db98d854 | ||
|
|
cc675906c7 | ||
|
|
83d3d90b3e | ||
|
|
4b12a21acf | ||
|
|
e185a1e86c | ||
|
|
85d0d194fe | ||
|
|
099f3dbf2d | ||
|
|
549727aae9 | ||
|
|
d56bf78e58 | ||
|
|
3b389cc524 | ||
|
|
9c241ad384 | ||
|
|
bbadbc582e | ||
|
|
dbbbc228b5 | ||
|
|
55b3b06c00 | ||
|
|
ee788bf077 | ||
|
|
c23824075c | ||
|
|
5342e28754 | ||
|
|
0cd039811a | ||
|
|
4760763401 | ||
|
|
75ca3bca90 | ||
|
|
cd2f7d4bfb | ||
|
|
87d41c1a7f | ||
|
|
90d9ca588a | ||
|
|
7d2f6b8b04 | ||
|
|
54a16ffda4 | ||
|
|
d6ce07021f | ||
|
|
128f5d9b36 | ||
|
|
6ff16e0813 | ||
|
|
bd420ba25e | ||
|
|
9d35698b9b | ||
|
|
e194bf0355 | ||
|
|
22ae896c85 | ||
|
|
88fa2341e5 | ||
|
|
747245c2f8 | ||
|
|
cae3135ef6 | ||
|
|
28c95f3255 | ||
|
|
f9d9bb2367 | ||
|
|
796ae7e82c | ||
|
|
e179256194 | ||
|
|
af3ce324cc | ||
|
|
0a486cb991 | ||
|
|
e46bab96c7 | ||
|
|
f8663ea36f | ||
|
|
2a61827a6b | ||
|
|
db2ea9704f | ||
|
|
5325f104d3 | ||
|
|
bfc58161d5 | ||
|
|
62d269f92f | ||
|
|
f5441e35a1 | ||
|
|
6222fcf802 | ||
|
|
05da923fc3 | ||
|
|
724615f539 | ||
|
|
db9141ec1e | ||
|
|
43e1637c18 | ||
|
|
183285a03b | ||
|
|
8705f78aee | ||
|
|
8349b25587 | ||
|
|
82b12d227b | ||
|
|
aa31228f7b | ||
|
|
4d46848417 | ||
|
|
dc5b3e24e5 | ||
|
|
6157217f6b | ||
|
|
ed0f54da27 | ||
|
|
07e8289282 | ||
|
|
dcfc6dae96 | ||
|
|
0e4a809600 | ||
|
|
4681c227f9 | ||
|
|
d33945bfad |
@@ -1,8 +1,2 @@
|
||||
contrib/
|
||||
Documentation/
|
||||
examples/
|
||||
Godeps/
|
||||
scripts/
|
||||
vendor/
|
||||
vagrant/
|
||||
*.aci
|
||||
*
|
||||
!bin/matchbox
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -26,7 +26,10 @@ _testmain.go
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
_output/
|
||||
bin/
|
||||
assets/
|
||||
*.aci
|
||||
assets/
|
||||
bin/
|
||||
_output/
|
||||
tools/
|
||||
contrib/registry/data
|
||||
contrib/rpm/*.tar.gz
|
||||
|
||||
18
.travis.yml
18
.travis.yml
@@ -3,27 +3,23 @@ sudo: required
|
||||
services:
|
||||
- docker
|
||||
go:
|
||||
- 1.5.4
|
||||
- 1.6.2
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.11.1
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
env:
|
||||
global:
|
||||
- GO15VENDOREXPERIMENT="1"
|
||||
install:
|
||||
- go get github.com/golang/lint/golint
|
||||
- go get golang.org/x/lint/golint
|
||||
script:
|
||||
- ./test
|
||||
- make test
|
||||
deploy:
|
||||
provider: script
|
||||
script: scripts/travis-docker-push
|
||||
script: scripts/dev/travis-docker-push
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: master
|
||||
go: '1.6.2'
|
||||
condition: "$TRAVIS_PULL_REQUEST = false"
|
||||
go: '1.11.1'
|
||||
notifications:
|
||||
email: change
|
||||
|
||||
|
||||
136
CHANGES.md
136
CHANGES.md
@@ -1,8 +1,140 @@
|
||||
# coreos-baremetal bootcfg
|
||||
# Matchbox
|
||||
|
||||
Notable changes between releases.
|
||||
|
||||
## Latest
|
||||
|
||||
## v0.4.0 (2016-06-21)
|
||||
## v0.7.1 (2018-11-01)
|
||||
|
||||
* Add `kernel_args` variable to the terraform bootkube-install cluster definition
|
||||
* Add `get-flatcar` helper script
|
||||
* Add optional TLS support to read-only HTTP API
|
||||
* Build Matchbox with Go 1.11.1 for images and binaries
|
||||
|
||||
### Examples
|
||||
|
||||
* Upgrade Kubernetes example clusters to v1.10.0 (Terraform-based)
|
||||
* Upgrade Kubernetes example clusters to v1.8.5
|
||||
|
||||
## v0.7.0 (2017-12-12)
|
||||
|
||||
* Add gRPC API endpoints for managing generic (experimental) templates
|
||||
* Update Container Linux config transpiler to v0.5.0
|
||||
* Update Ignition to v0.19.0, render v2.1.0 Ignition configs
|
||||
* Drop support for Container Linux versions below 1465.0.0 (breaking)
|
||||
* Build Matchbox with Go 1.8.5 for images and binaries
|
||||
* Remove Profile `Cmdline` map (deprecated in v0.5.0), use `Args` slice instead
|
||||
* Remove pixiecore support (deprecated in v0.5.0)
|
||||
* Remove `ContextHandler`, `ContextHandlerFunc`, and `NewHandler` from the `matchbox/http` package.
|
||||
|
||||
### Examples / Modules
|
||||
|
||||
* Upgrade Kubernetes example clusters to v1.8.4
|
||||
* Kubernetes examples clusters enable etcd TLS
|
||||
* Deploy the Container Linux Update Operator (CLUO) to coordinate reboots of Container Linux nodes in Kubernetes clusters. See the cluster [addon docs](Documentation/cluster-addons.md).
|
||||
* Kubernetes examples (terraform and non-terraform) mask locksmithd
|
||||
* Terraform modules `bootkube` and `profiles` (Kubernetes) mask locksmithd
|
||||
|
||||
## v0.6.1 (2017-05-25)
|
||||
|
||||
* Improve the installation documentation
|
||||
* Move examples/etc/matchbox/cert-gen to scripts/tls
|
||||
* Build Matchbox with Go 1.8.3 for images and binaries
|
||||
|
||||
### Examples
|
||||
|
||||
* Upgrade self-hosted Kubernetes cluster examples to v1.6.4
|
||||
* Add NoSchedule taint to self-hosted Kubernetes controllers
|
||||
* Remove static Kubernetes and rktnetes cluster examples
|
||||
|
||||
## v0.6.0 (2017-04-25)
|
||||
|
||||
* New [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) plugin for Terraform users!
|
||||
* New hosted [documentation](https://coreos.com/matchbox/docs/latest) on coreos.com
|
||||
* Add `ProfileDelete`, `GroupDelete`, `IgnitionGet` and `IgnitionDelete` gRPC endpoints
|
||||
* Build matchbox with Go 1.8 for container images and binaries
|
||||
* Generate code with gRPC v1.2.1 and matching Go protoc-gen-go plugin
|
||||
* Update Ignition to v0.14.0 and coreos-cloudinit to v1.13.0
|
||||
* Update "fuze" docs to the new name [Container Linux Configs](https://coreos.com/os/docs/latest/configuration.html)
|
||||
* Remove `bootcmd` binary from release tarballs
|
||||
|
||||
### Examples
|
||||
|
||||
* Upgrade Kubernetes v1.5.5 (static) example clusters
|
||||
* Upgrade Kubernetes v1.6.1 (self-hosted) example cluster
|
||||
* Use etcd3 by default in all clusters (remove etcd2 clusters)
|
||||
* Add Terraform examples for etcd3 and self-hosted Kubernetes 1.6.1
|
||||
|
||||
## v0.5.0 (2017-01-23)
|
||||
|
||||
* Rename project to CoreOS `matchbox`!
|
||||
* Add Profile `args` field to list kernel args
|
||||
* Update [Fuze](https://github.com/coreos/container-linux-config-transpiler) and [Ignition](https://github.com/coreos/ignition) to v0.11.2
|
||||
* Switch from `golang.org/x/net/context` to `context`
|
||||
* Deprecate Profile `cmd` field map of kernel args
|
||||
* Deprecate Pixiecore support
|
||||
* Drop build support for Go 1.6
|
||||
|
||||
#### Rename
|
||||
|
||||
* Move repo from github.com/coreos/coreos-baremetal to github.com/coreos/matchbox
|
||||
* Rename `bootcfg` binary to `matchbox`
|
||||
* Rename `bootcfg` packages to `matchbox`
|
||||
* Publish a `quay.io/coreos/matchbox` container image. The `quay.io/coreos/bootcfg` image will no longer be updated.
|
||||
* Rename environment variable prefix from `BOOTCFG*` to `MATCHBOX*`
|
||||
* Change config directory to `/etc/matchbox`
|
||||
* Change default `-data-path` to `/var/lib/matchbox`
|
||||
* Change default `-assets-path` to `/var/lib/matchbox/assets`
|
||||
|
||||
#### Examples
|
||||
|
||||
* Upgrade Kubernetes v1.5.1 (static) example clusters
|
||||
* Upgrade Kubernetes v1.5.1 (self-hosted) example cluster
|
||||
* Switch Kubernetes (self-hosted) to run flannel as pods
|
||||
* Combine rktnetes Ignition into Kubernetes static cluster
|
||||
|
||||
#### Migration
|
||||
|
||||
* binary users should install the `matchbox` binary (see [installation](Documentation/deployment.md))
|
||||
* rkt/docker users should start using `quay.io/coreos/matchbox` (see [installation](Documentation/deployment.md))
|
||||
* RPM users should uninstall bootcfg and install matchbox (see [installation](Documentation/deployment.md))
|
||||
* Move `/etc/bootcfg` configs and certificates to `/etc/matchbox`
|
||||
* Move `/var/lib/bootcfg` data to `/var/lib/matchbox`
|
||||
* See the new [contrib/systemd](contrib/systemd) service examples
|
||||
* Remove the old `bootcfg` user if you created one
|
||||
|
||||
## v0.4.2 (2016-12-7)
|
||||
|
||||
#### Improvements
|
||||
|
||||
* Add RPM packages to Copr
|
||||
* Fix packaged `contrib/systemd` units
|
||||
* Update Go version to 1.7.4
|
||||
|
||||
#### Examples
|
||||
|
||||
* Upgrade Kubernetes v1.4.6 (static manifest) example clusters
|
||||
* Upgrade Kubernetes v1.4.6 (rktnetes) example clusters
|
||||
* Upgrade Kubernetes v1.4.6 (self-hosted) example cluster
|
||||
|
||||
## v0.4.1 (2016-10-17)
|
||||
|
||||
#### Improvements
|
||||
|
||||
* Add ARM and ARM64 release architectures (#309)
|
||||
* Add guide for installing bootcfg on CoreOS (#306)
|
||||
* Improvements to the bootcfg cert-gen script (#310)
|
||||
|
||||
#### Examples
|
||||
|
||||
* Add Kubernetes example with rkt container runtime (i.e. rktnetes)
|
||||
* Upgrade Kubernetes v1.4.1 (static manifest) example clusters
|
||||
* Upgrade Kubernetes v1.4.1 (rktnetes) example clusters
|
||||
* Upgrade Kubernetes v1.4.1 (self-hosted) example cluster
|
||||
* Add etcd3 example cluster (PXE in-RAM or install to disk)
|
||||
* Use DNS names (instead of IPs) in example clusters (except bootkube)
|
||||
|
||||
## v0.4.0 (2016-07-21)
|
||||
|
||||
#### Features
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM alpine:latest
|
||||
FROM alpine:3.6
|
||||
MAINTAINER Dalton Hubble <dalton.hubble@coreos.com>
|
||||
COPY bin/bootcfg /bootcfg
|
||||
COPY bin/matchbox /matchbox
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/bootcfg"]
|
||||
ENTRYPOINT ["/matchbox"]
|
||||
|
||||
@@ -1,17 +1,21 @@
|
||||
|
||||
# HTTP API
|
||||
|
||||
## iPXE Script
|
||||
## iPXE script
|
||||
|
||||
Serves a static iPXE boot script which gathers client machine attributes and chainloads to the iPXE endpoint. Use DHCP/TFTP to point iPXE clients to this endpoint as the next-server.
|
||||
|
||||
GET http://bootcfg.foo/boot.ipxe
|
||||
GET http://bootcfg.foo/boot.ipxe.0 // for dnsmasq
|
||||
```
|
||||
GET http://matchbox.foo/boot.ipxe
|
||||
GET http://matchbox.foo/boot.ipxe.0 // for dnsmasq
|
||||
```
|
||||
|
||||
**Response**
|
||||
|
||||
#!ipxe
|
||||
chain ipxe?uuid=${uuid}&mac=${net0/mac:hexhyp}&domain=${domain}&hostname=${hostname}&serial=${serial}
|
||||
```
|
||||
#!ipxe
|
||||
chain ipxe?uuid=${uuid}&mac=${mac:hexhyp}&domain=${domain}&hostname=${hostname}&serial=${serial}
|
||||
```
|
||||
|
||||
Client's booted with the `/ipxe.boot` endpoint will introspect and make a request to `/ipxe` with the `uuid`, `mac`, `hostname`, and `serial` value as query arguments.
|
||||
|
||||
@@ -19,9 +23,11 @@ Client's booted with the `/ipxe.boot` endpoint will introspect and make a reques
|
||||
|
||||
Finds the profile for the machine and renders the network boot config (kernel, options, initrd) as an iPXE script.
|
||||
|
||||
GET http://bootcfg.foo/ipxe?label=value
|
||||
```
|
||||
GET http://matchbox.foo/ipxe?label=value
|
||||
```
|
||||
|
||||
**Query Parameters**
|
||||
**Query parameters**
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|--------|-----------------|
|
||||
@@ -31,16 +37,49 @@ Finds the profile for the machine and renders the network boot config (kernel, o
|
||||
|
||||
**Response**
|
||||
|
||||
#!ipxe
|
||||
kernel /assets/coreos/1053.2.0/coreos_production_pxe.vmlinuz coreos.config.url=http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp} coreos.first_boot=1 coreos.autologin
|
||||
initrd /assets/coreos/1053.2.0/coreos_production_pxe_image.cpio.gz
|
||||
boot
|
||||
```
|
||||
#!ipxe
|
||||
kernel /assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp} coreos.first_boot=1 coreos.autologin
|
||||
initrd /assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz
|
||||
boot
|
||||
```
|
||||
|
||||
## GRUB2
|
||||
|
||||
Finds the profile for the machine and renders the network boot config as a GRUB config. Use DHCP/TFTP to point GRUB clients to this endpoint as the next-server.
|
||||
|
||||
GET http://bootcfg.foo/grub?label=value
|
||||
```
|
||||
GET http://matchbox.foo/grub?label=value
|
||||
```
|
||||
|
||||
**Query parameters**
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|--------|-----------------|
|
||||
| uuid | string | Hardware UUID |
|
||||
| mac | string | MAC address |
|
||||
| * | string | Arbitrary label |
|
||||
|
||||
**Response**
|
||||
|
||||
```
|
||||
default=0
|
||||
timeout=1
|
||||
menuentry "CoreOS" {
|
||||
echo "Loading kernel"
|
||||
linuxefi "(http;matchbox.foo:8080)/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz" "coreos.autologin" "coreos.config.url=http://matchbox.foo:8080/ignition" "coreos.first_boot"
|
||||
echo "Loading initrd"
|
||||
initrdefi "(http;matchbox.foo:8080)/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud config
|
||||
|
||||
DEPRECATED: Finds the profile matching the machine and renders the corresponding Cloud-Config with group metadata, selectors, and query params.
|
||||
|
||||
```
|
||||
GET http://matchbox.foo/cloud?label=value
|
||||
```
|
||||
|
||||
**Query Parameters**
|
||||
|
||||
@@ -52,69 +91,25 @@ Finds the profile for the machine and renders the network boot config as a GRUB
|
||||
|
||||
**Response**
|
||||
|
||||
default=0
|
||||
timeout=1
|
||||
menuentry "CoreOS" {
|
||||
echo "Loading kernel"
|
||||
linuxefi "(http;bootcfg.foo:8080)/assets/coreos/1053.2.0/coreos_production_pxe.vmlinuz" "coreos.autologin" "coreos.config.url=http://bootcfg.foo:8080/ignition" "coreos.first_boot"
|
||||
echo "Loading initrd"
|
||||
initrdefi "(http;bootcfg.foo:8080)/assets/coreos/1053.2.0/coreos_production_pxe_image.cpio.gz"
|
||||
}
|
||||
```yaml
|
||||
#cloud-config
|
||||
coreos:
|
||||
units:
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: fleet.service
|
||||
command: start
|
||||
```
|
||||
|
||||
## Pixiecore
|
||||
|
||||
Finds the profile matching the machine and renders the network boot config as JSON to implement the [Pixiecore API](https://github.com/danderson/pixiecore/blob/master/README.api.md). Currently, Pixiecore only provides the machine's MAC address for matching.
|
||||
|
||||
GET http://bootcfg.foo/pixiecore/v1/boot/:MAC
|
||||
|
||||
**URL Parameters**
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|--------|-------------|
|
||||
| mac | string | MAC address |
|
||||
|
||||
**Response**
|
||||
|
||||
{
|
||||
"kernel":"/assets/coreos/1032.0.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd":["/assets/coreos/1032.0.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"cmdline":{
|
||||
"cloud-config-url":"http://bootcfg.foo/cloud?mac=ADDRESS",
|
||||
"coreos.autologin":""
|
||||
}
|
||||
}
|
||||
|
||||
## Cloud Config
|
||||
|
||||
Finds the profile matching the machine and renders the corresponding Cloud-Config with group metadata, selectors, and query params.
|
||||
|
||||
GET http://bootcfg.foo/cloud?label=value
|
||||
|
||||
**Query Parameters**
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|--------|-----------------|
|
||||
| uuid | string | Hardware UUID |
|
||||
| mac | string | MAC address |
|
||||
| * | string | Arbitrary label |
|
||||
|
||||
**Response**
|
||||
|
||||
#cloud-config
|
||||
coreos:
|
||||
units:
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: fleet.service
|
||||
command: start
|
||||
|
||||
## Ignition Config
|
||||
## Container Linux Config / Ignition Config
|
||||
|
||||
Finds the profile matching the machine and renders the corresponding Ignition Config with group metadata, selectors, and query params.
|
||||
|
||||
GET http://bootcfg.foo/ignition?label=value
|
||||
```
|
||||
GET http://matchbox.foo/ignition?label=value
|
||||
```
|
||||
|
||||
**Query Parameters**
|
||||
**Query parameters**
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|--------|-----------------|
|
||||
@@ -124,24 +119,28 @@ Finds the profile matching the machine and renders the corresponding Ignition Co
|
||||
|
||||
**Response**
|
||||
|
||||
{
|
||||
"ignition": { "version": "2.0.0" },
|
||||
"systemd": {
|
||||
"units": [{
|
||||
"name": "example.service",
|
||||
"enable": true,
|
||||
"contents": "[Service]\nType=oneshot\nExecStart=/usr/bin/echo Hello World\n\n[Install]\nWantedBy=multi-user.target"
|
||||
}]
|
||||
}
|
||||
}
|
||||
```json
|
||||
{
|
||||
"ignition": { "version": "2.0.0" },
|
||||
"systemd": {
|
||||
"units": [{
|
||||
"name": "example.service",
|
||||
"enable": true,
|
||||
"contents": "[Service]\nType=oneshot\nExecStart=/usr/bin/echo Hello World\n\n[Install]\nWantedBy=multi-user.target"
|
||||
}]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Generic Config
|
||||
## Generic config
|
||||
|
||||
Finds the profile matching the machine and renders the corresponding generic config with group metadata, selectors, and query params.
|
||||
|
||||
GET http://bootcfg.foo/generic?label=value
|
||||
```
|
||||
GET http://matchbox.foo/generic?label=value
|
||||
```
|
||||
|
||||
**Query Parameters**
|
||||
**Query parameters**
|
||||
|
||||
| Name | Type | Description |
|
||||
|------|--------|-----------------|
|
||||
@@ -151,19 +150,22 @@ Finds the profile matching the machine and renders the corresponding generic con
|
||||
|
||||
**Response**
|
||||
|
||||
{
|
||||
“uuid”: “”,
|
||||
“mac”: “52:54:00:a1:9c:ae”,
|
||||
“osInstalled”: true,
|
||||
“rawQuery”: “mac=52:54:00:a1:9c:ae&os=installed”
|
||||
}
|
||||
|
||||
```
|
||||
{
|
||||
“uuid”: “”,
|
||||
“mac”: “52:54:00:a1:9c:ae”,
|
||||
“osInstalled”: true,
|
||||
“rawQuery”: “mac=52:54:00:a1:9c:ae&os=installed”
|
||||
}
|
||||
```
|
||||
|
||||
## Metadata
|
||||
|
||||
Finds the matching machine group and renders the group metadata, selectors, and query params in an "env file" style response.
|
||||
|
||||
GET http://bootcfg.foo/metadata?mac=52-54-00-a1-9c-ae&foo=bar&count=3&gate=true
|
||||
```
|
||||
GET http://matchbox.foo/metadata?mac=52-54-00-a1-9c-ae&foo=bar&count=3&gate=true
|
||||
```
|
||||
|
||||
**Query Parameters**
|
||||
|
||||
@@ -175,34 +177,37 @@ Finds the matching machine group and renders the group metadata, selectors, and
|
||||
|
||||
**Response**
|
||||
|
||||
META=data
|
||||
ETCD_NAME=node1
|
||||
SOME_NESTED_DATA=some-value
|
||||
MAC=52:54:00:a1:9c:ae
|
||||
REQUEST_QUERY_MAC=52:54:00:a1:9c:ae
|
||||
REQUEST_QUERY_FOO=bar
|
||||
REQUEST_QUERY_COUNT=3
|
||||
REQUEST_QUERY_GATE=true
|
||||
REQUEST_RAW_QUERY=mac=52-54-00-a1-9c-ae&foo=bar&count=3&gate=true
|
||||
```
|
||||
META=data
|
||||
ETCD_NAME=node1
|
||||
SOME_NESTED_DATA=some-value
|
||||
MAC=52:54:00:a1:9c:ae
|
||||
REQUEST_QUERY_MAC=52:54:00:a1:9c:ae
|
||||
REQUEST_QUERY_FOO=bar
|
||||
REQUEST_QUERY_COUNT=3
|
||||
REQUEST_QUERY_GATE=true
|
||||
REQUEST_RAW_QUERY=mac=52-54-00-a1-9c-ae&foo=bar&count=3&gate=true
|
||||
```
|
||||
|
||||
## OpenPGP Signatures
|
||||
## OpenPGP signatures
|
||||
|
||||
OpenPGPG signature endpoints serve detached binary and ASCII armored signatures of rendered configs, if enabled. See [OpenPGP Signing](openpgp.md).
|
||||
|
||||
| Endpoint | Signature Endpoint | ASCII Signature Endpoint |
|
||||
|------------|--------------------|-------------------------|
|
||||
| iPXE | `http://bootcfg.foo/ipxe.sig` | `http://bootcfg.foo/ipxe.asc` |
|
||||
| Pixiecore | `http://bootcfg/pixiecore/v1/boot.sig/:MAC` | `http://bootcfg/pixiecore/v1/boot.asc/:MAC` |
|
||||
| GRUB2 | `http://bootcf.foo/grub.sig` | `http://bootcfg.foo/grub.asc` |
|
||||
| Ignition | `http://bootcfg.foo/ignition.sig` | `http://bootcfg.foo/ignition.asc` |
|
||||
| Cloud-Config | `http://bootcfg.foo/cloud.sig` | `http://bootcfg.foo/cloud.asc` |
|
||||
| Generic | `http://bootcfg.foo/generic.sig` | `http://bootcfg.foo/generic.asc` |
|
||||
| Metadata | `http://bootcfg.foo/metadata.sig` | `http://bootcfg.foo/metadata.asc` |
|
||||
| iPXE | `http://matchbox.foo/ipxe.sig` | `http://matchbox.foo/ipxe.asc` |
|
||||
| GRUB2 | `http://bootcf.foo/grub.sig` | `http://matchbox.foo/grub.asc` |
|
||||
| Ignition | `http://matchbox.foo/ignition.sig` | `http://matchbox.foo/ignition.asc` |
|
||||
| Cloud-Config | `http://matchbox.foo/cloud.sig` | `http://matchbox.foo/cloud.asc` |
|
||||
| Generic | `http://matchbox.foo/generic.sig` | `http://matchbox.foo/generic.asc` |
|
||||
| Metadata | `http://matchbox.foo/metadata.sig` | `http://matchbox.foo/metadata.asc` |
|
||||
|
||||
Get a config and its detached ASCII armored signature.
|
||||
|
||||
GET http://bootcfg.foo/ipxe?label=value
|
||||
GET http://bootcfg.foo/ipxe.asc?label=value
|
||||
```
|
||||
GET http://matchbox.foo/ipxe?label=value
|
||||
GET http://matchbox.foo/ipxe.asc?label=value
|
||||
```
|
||||
|
||||
**Response**
|
||||
|
||||
@@ -221,14 +226,15 @@ NO+p24BL3PHZyKw0nsrm275C913OxEVgnNZX7TQltaweW23Cd1YBNjcfb3zv+Zo=
|
||||
|
||||
## Assets
|
||||
|
||||
If you need to serve static assets (e.g. kernel, initrd), `bootcfg` can serve arbitrary assets from the `-assets-path`.
|
||||
|
||||
bootcfg.foo/assets/
|
||||
└── coreos
|
||||
└── 1053.2.0
|
||||
├── coreos_production_pxe.vmlinuz
|
||||
└── coreos_production_pxe_image.cpio.gz
|
||||
└── 1032.0.0
|
||||
├── coreos_production_pxe.vmlinuz
|
||||
└── coreos_production_pxe_image.cpio.gz
|
||||
If you need to serve static assets (e.g. kernel, initrd), `matchbox` can serve arbitrary assets from the `-assets-path`.
|
||||
|
||||
```
|
||||
matchbox.foo/assets/
|
||||
└── coreos
|
||||
└── 1576.5.0
|
||||
├── coreos_production_pxe.vmlinuz
|
||||
└── coreos_production_pxe_image.cpio.gz
|
||||
└── 1153.0.0
|
||||
├── coreos_production_pxe.vmlinuz
|
||||
└── coreos_production_pxe_image.cpio.gz
|
||||
```
|
||||
|
||||
@@ -1,176 +0,0 @@
|
||||
|
||||
# bootcfg
|
||||
|
||||
`bootcfg` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create CoreOS clusters. `bootcfg` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use CoreOS Linux's early-boot capabilities to provision CoreOS machines.
|
||||
|
||||
Network boot endpoints provide iPXE, GRUB, and [Pixiecore](https://github.com/danderson/pixiecore/blob/master/README.api.md) support. `bootcfg` can be deployed as a binary, as an [appc](https://github.com/appc/spec) container with rkt, or as a Docker container.
|
||||
|
||||
<img src='img/overview.png' class="img-center" alt="Bootcfg Overview"/>
|
||||
|
||||
## Getting Started
|
||||
|
||||
Get started running `bootcfg` on your Linux machine, with rkt or Docker.
|
||||
|
||||
* [bootcfg with rkt](getting-started-rkt.md)
|
||||
* [bootcfg with Docker](getting-started-docker.md)
|
||||
|
||||
## Flags
|
||||
|
||||
See [configuration](config.md) flags and variables.
|
||||
|
||||
## API
|
||||
|
||||
* [HTTP API](api.md)
|
||||
* [gRPC API](https://godoc.org/github.com/coreos/coreos-baremetal/bootcfg/client)
|
||||
|
||||
## Data
|
||||
|
||||
A `Store` stores machine Groups, Profiles, and associated Ignition configs, cloud-configs, and generic configs. By default, `bootcfg` uses a `FileStore` to search a `-data-path` for these resources.
|
||||
|
||||
Prepare `/var/lib/bootcfg` with `groups`, `profile`, `ignition`, `cloud`, and `generic` subdirectories. You may wish to keep these files under version control.
|
||||
|
||||
/var/lib/bootcfg
|
||||
├── cloud
|
||||
│ ├── cloud.yaml.tmpl
|
||||
│ └── worker.sh.tmpl
|
||||
├── ignition
|
||||
│ └── raw.ign
|
||||
│ └── etcd.yaml.tmpl
|
||||
│ └── simple.yaml.tmpl
|
||||
├── generic
|
||||
│ └── config.yaml
|
||||
│ └── setup.cfg
|
||||
│ └── datacenter-1.tmpl
|
||||
├── groups
|
||||
│ └── default.json
|
||||
│ └── node1.json
|
||||
│ └── us-central1-a.json
|
||||
└── profiles
|
||||
└── etcd.json
|
||||
└── worker.json
|
||||
|
||||
The [examples](../examples) directory is a valid data directory with some pre-defined configs. Note that `examples/groups` contains many possible groups in nested directories for demo purposes (tutorials pick one to mount). Your machine groups should be kept directly inside the `groups` directory as shown above.
|
||||
|
||||
### Profiles
|
||||
|
||||
Profiles reference an Ignition config, Cloud-Config, and/or generic config by name and define network boot settings.
|
||||
|
||||
{
|
||||
"id": "etcd",
|
||||
"name": "CoreOS with etcd2",
|
||||
"cloud_id": "",
|
||||
"ignition_id": "etcd.yaml"
|
||||
"generic_id": "some-service.cfg",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1053.2.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1053.2.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"cmdline": {
|
||||
"coreos.config.url": "http://bootcfg.foo:8080/ignition?uuid=${uuid}&mac=${net0/mac:hexhyp}",
|
||||
"coreos.autologin": "",
|
||||
"coreos.first_boot": "1"
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
The `"boot"` settings will be used to render configs to network boot programs such as iPXE, GRUB, or Pixiecore. You may reference remote kernel and initrd assets or [local assets](#assets).
|
||||
|
||||
To use Ignition, set the `coreos.config.url` kernel option to reference the `bootcfg` [Ignition endpoint](api.md#ignition-config), which will render the `ignition_id` file. Be sure to add the `coreos.first_boot` option as well.
|
||||
|
||||
To use cloud-config, set the `cloud-config-url` kernel option to reference the `bootcfg` [Cloud-Config endpoint](api.md#cloud-config), which will render the `cloud_id` file.
|
||||
|
||||
### Groups
|
||||
|
||||
Groups define selectors which match zero or more machines. Machine(s) matching a group will boot and provision according to the group's `Profile`.
|
||||
|
||||
Create a group definition with a `Profile` to be applied, selectors for matching machines, and any `metadata` needed to render templated configs. For example `/var/lib/bootcfg/groups/node1.json` matches a single machine with MAC address `52:54:00:89:d8:10`.
|
||||
|
||||
# /var/lib/bootcfg/groups/node1.json
|
||||
{
|
||||
"name": "node1",
|
||||
"profile": "etcd",
|
||||
"selector": {
|
||||
"mac": "52:54:00:89:d8:10"
|
||||
},
|
||||
"metadata": {
|
||||
"fleet_metadata": "role=etcd,name=node1",
|
||||
"etcd_name": "node1",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
|
||||
}
|
||||
}
|
||||
|
||||
Meanwhile, `/var/lib/bootcfg/groups/proxy.json` acts as the default machine group since it has no selectors.
|
||||
|
||||
{
|
||||
"name": "etcd-proxy",
|
||||
"profile": "etcd-proxy",
|
||||
"metadata": {
|
||||
"fleet_metadata": "role=etcd-proxy",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
|
||||
}
|
||||
}
|
||||
|
||||
For example, a request to `/ignition?mac=52:54:00:89:d8:10` would render the Ignition template in the "etcd" `Profile`, with the machine group's metadata. A request to `/ignition` would match the default group (which has no selectors) and render the Ignition in the "etcd-proxy" Profile. Avoid defining multiple default groups as resolution will not be deterministic.
|
||||
|
||||
#### Reserved Selectors
|
||||
|
||||
Group selectors can use any key/value pairs you find useful. However, several labels have a defined purpose and will be normalized or parsed specially.
|
||||
|
||||
* `uuid` - machine UUID
|
||||
* `mac` - network interface physical address (normalized MAC address)
|
||||
* `hostname` - hostname reported by a network boot program
|
||||
* `serial` - serial reported by a network boot program
|
||||
|
||||
### Config Templates
|
||||
|
||||
Profiles can reference various templated configs. Ignition JSON configs can be generated from [Fuze config](https://github.com/coreos/fuze/blob/master/doc/configuration.md) template files. Cloud-Config templates files can be used to render a script or Cloud-Config. Generic template files can be used to render arbitrary untyped configs (experimental). Each template may contain [Go template](https://golang.org/pkg/text/template/) elements which will be rendered with machine group metadata, selectors, and query params.
|
||||
|
||||
For details and examples:
|
||||
|
||||
* [Ignition Config](ignition.md)
|
||||
* [Cloud-Config](cloud-config.md)
|
||||
|
||||
#### Variables
|
||||
|
||||
Within Ignition/Fuze templates, Cloud-Config templates, or generic templates, you can use group metadata, selectors, or request-scoped query params. For example, a request `/generic?mac=52-54-00-89-d8-10&foo=some-param&bar=b` would match the `node1.json` machine group shown above. If the group's profile ("etcd") referenced a generic template, the following variables could be used.
|
||||
|
||||
# Untyped generic config file
|
||||
# Selector
|
||||
{{.mac}} # 52:54:00:89:d8:10 (normalized)
|
||||
# Metadata
|
||||
{{.etcd_name}} # node1
|
||||
{{.fleet_metadata}} # role=etcd,name=node1
|
||||
# Query
|
||||
{{.request.query.mac}} # 52:54:00:89:d8:10 (normalized)
|
||||
{{.request.query.foo}} # some-param
|
||||
{{.request.query.bar}} # b
|
||||
# Special Addition
|
||||
{{.request.raw_query}} # mac=52:54:00:89:d8:10&foo=some-param&bar=b
|
||||
|
||||
Note that `.request` is reserved for these purposes so group metadata with data nested under a top level "request" key will be overwritten.
|
||||
|
||||
## Assets
|
||||
|
||||
`bootcfg` can serve `-assets-path` static assets at `/assets`. This is helpful for reducing bandwidth usage when serving the kernel and initrd to network booted machines. The default assets-path is `/var/lib/bootcfg/assets` or you can pass `-assets-path=""` to disable asset serving.
|
||||
|
||||
bootcfg.foo/assets/
|
||||
└── coreos
|
||||
└── VERSION
|
||||
├── coreos_production_pxe.vmlinuz
|
||||
└── coreos_production_pxe_image.cpio.gz
|
||||
|
||||
For example, a `Profile` might refer to a local asset `/assets/coreos/VERSION/coreos_production_pxe.vmlinuz` instead of `http://stable.release.core-os.net/amd64-usr/VERSION/coreos_production_pxe.vmlinuz`.
|
||||
|
||||
See the [get-coreos](../scripts/README.md#get-coreos) script to quickly download, verify, and place CoreOS assets.
|
||||
|
||||
## Network
|
||||
|
||||
`bootcfg` does not implement or exec a DHCP/TFTP server. Read [network setup](network-setup.md) or use the [coreos/dnsmasq](../contrib/dnsmasq) image if you need a quick DHCP, proxyDHCP, TFTP, or DNS setup.
|
||||
|
||||
## Going Further
|
||||
|
||||
* [gRPC API Usage](config.md#grpc-api)
|
||||
* [Metadata](api.md#metadata)
|
||||
* OpenPGP [Signing](api.md#openpgp-signatures)
|
||||
|
||||
|
||||
|
||||
147
Documentation/bootkube-upgrades.md
Normal file
147
Documentation/bootkube-upgrades.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Upgrading self-hosted Kubernetes
|
||||
|
||||
CoreOS Kubernetes clusters "self-host" the apiserver, scheduler, controller-manager, flannel, kube-dns, and kube-proxy as Kubernetes pods, like ordinary applications (except with taint tolerations). This allows upgrades to be performed in-place using (mostly) `kubectl` as an alternative to re-provisioning.
|
||||
|
||||
Let's upgrade a Kubernetes v1.6.6 cluster to v1.6.7 as an example.
|
||||
|
||||
## Stability
|
||||
|
||||
This guide shows how to attempt a in-place upgrade of a Kubernetes cluster setup via the [examples](../examples). It does not provide exact diffs, migrations between breaking changes, the stability of a fresh re-provision, or any guarantees. Evaluate whether in-place updates are appropriate for your Kubernetes cluster and be prepared to perform a fresh re-provision if something goes wrong, especially between Kubernetes minor releases (e.g. 1.6 to 1.7).
|
||||
|
||||
Matchbox Kubernetes examples provide a vanilla Kubernetes cluster with only free (as in freedom and cost) software components. If you require currated updates, migrations, or guarantees for production, consider [Tectonic](https://coreos.com/tectonic/) by CoreOS.
|
||||
|
||||
**Note: Tectonic users should NOT manually upgrade. Follow the [Tectonic docs](https://coreos.com/tectonic/docs/latest/admin/upgrade.html)**
|
||||
|
||||
## Inspect
|
||||
|
||||
Show the control plane daemonsets and deployments which will need to be updated.
|
||||
|
||||
```sh
|
||||
$ kubectl get daemonsets -n=kube-system
|
||||
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE-SELECTOR AGE
|
||||
kube-apiserver 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
kube-etcd-network-checkpointer 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
kube-flannel 4 4 4 4 4 <none> 21d
|
||||
kube-proxy 4 4 4 4 4 <none> 21d
|
||||
pod-checkpointer 1 1 1 1 1 node-role.kubernetes.io/master= 21d
|
||||
|
||||
$ kubectl get deployments -n=kube-system
|
||||
kube-controller-manager 2 2 2 2 21d
|
||||
kube-dns 1 1 1 1 21d
|
||||
kube-scheduler 2 2 2 2 21d
|
||||
```
|
||||
|
||||
Check the current Kubernetes version.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.6+coreos.1", GitCommit:"42a5c8b99c994a51d9ceaed5d0254f177e97d419", GitTreeState:"clean", BuildDate:"2017-06-21T01:10:07Z", GoVersion:"go1.7.6", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 21d v1.6.6+coreos.1
|
||||
node2.example.com Ready 21d v1.6.6+coreos.1
|
||||
node3.example.com Ready 21d v1.6.6+coreos.1
|
||||
node4.example.com Ready 21d v1.6.6+coreos.1
|
||||
```
|
||||
|
||||
## Strategy
|
||||
|
||||
Update control plane components with `kubectl`. Then update the `kubelet` systemd unit on each host.
|
||||
|
||||
Prepare the changes to the Kubernetes manifests by generating assets for a target Kubernetes cluster (e.g. bootkube `v0.5.0` produces Kubernetes 1.6.6 and bootkube `v0.5.1` produces Kubernetes 1.6.7). Choose the tool used during creation of the cluster:
|
||||
|
||||
* [kubernetes-incubator/bootkube](https://github.com/kubernetes-incubator/bootkube) - install the `bootkube` binary for the target version and render assets
|
||||
* [poseidon/bootkube-terraform](https://github.com/poseidon/bootkube-terraform) - checkout the tag for the target version and `terraform apply` to render assets
|
||||
|
||||
Diff the generated assets against the assets used when originally creating the cluster. In simple cases, you may only need to bump the hyperkube image. In more complex cases, some manifests may have new flags or configuration.
|
||||
|
||||
## Control Plane
|
||||
|
||||
### kube-apiserver
|
||||
|
||||
Edit the `kube-apiserver` daemonset to rolling update the apiserver.
|
||||
|
||||
```sh
|
||||
$ kubectl edit daemonset kube-apiserver -n=kube-system
|
||||
```
|
||||
|
||||
If you only have one apiserver, the cluster may be momentarily unavailable.
|
||||
|
||||
### kube-scheduler
|
||||
|
||||
Edit the `kube-scheduler` deployment to rolling update the scheduler.
|
||||
|
||||
```sh
|
||||
$ kubectl edit deployments kube-scheduler -n=kube-system
|
||||
```
|
||||
|
||||
### kube-controller-manager
|
||||
|
||||
Edit the `kube-controller-manager` deployment to rolling update the controller manager.
|
||||
|
||||
```sh
|
||||
$ kubectl edit deployments kube-controller-manager -n=kube-system
|
||||
```
|
||||
|
||||
### kube-proxy
|
||||
|
||||
Edit the `kube-proxy` daemonset to rolling update the proxy.
|
||||
|
||||
```sh
|
||||
$ kubectl edit daemonset kube-proxy -n=kube-system
|
||||
```
|
||||
|
||||
### Others
|
||||
|
||||
If there are changes between the prior version and target version manifests, update the `kube-dns` deployment, `kube-flannel` daemonset, or `pod-checkpointer` daemonset.
|
||||
|
||||
### Verify
|
||||
|
||||
Verify the control plane components updated.
|
||||
|
||||
```sh
|
||||
$ kubectl version
|
||||
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.2", GitCommit:"477efc3cbe6a7effca06bd1452fa356e2201e1ee", GitTreeState:"clean", BuildDate:"2017-04-19T20:33:11Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.7+coreos.0", GitCommit:"c8c505ee26ac3ab4d1dff506c46bc5538bc66733", GitTreeState:"clean", BuildDate:"2017-07-06T17:38:33Z", GoVersion:"go1.7.6", Compiler:"gc", Platform:"linux/amd64"}
|
||||
```
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 21d v1.6.7+coreos.0
|
||||
node2.example.com Ready 21d v1.6.7+coreos.0
|
||||
node3.example.com Ready 21d v1.6.7+coreos.0
|
||||
node4.example.com Ready 21d v1.6.7+coreos.0
|
||||
```
|
||||
|
||||
## kubelet
|
||||
|
||||
SSH to each node and update `/etc/kubernetes/kubelet.env`. Restart the `kubelet.service`.
|
||||
|
||||
```sh
|
||||
ssh core@node1.example.com
|
||||
sudo vim /etc/kubernetes/kubelet.env
|
||||
sudo systemctl restart kubelet
|
||||
```
|
||||
|
||||
### Verify
|
||||
|
||||
Verify the kubelet and kube-proxy of each node updated.
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes -o yaml | grep 'kubeletVersion\|kubeProxyVersion'
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
kubeProxyVersion: v1.6.7+coreos.0
|
||||
kubeletVersion: v1.6.7+coreos.0
|
||||
```
|
||||
|
||||
Kubernetes control plane components have been successfully updated!
|
||||
@@ -1,106 +1,139 @@
|
||||
# Kubernetes
|
||||
|
||||
# Self-Hosted Kubernetes
|
||||
|
||||
The self-hosted Kubernetes example provisions a 3 node Kubernetes v1.3.0-beta.2 cluster with etcd, flannel, and a special "runonce" host Kublet. The CoreOS [bootkube](https://github.com/coreos/bootkube) tool is used to bootstrap kubelet, apiserver, scheduler, and controller-manager as pods, which can be managed via kubectl. `bootkube start` is run on any controller (master) to create a temporary control-plane and start Kubernetes components initially. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs).
|
||||
|
||||
## Experimental
|
||||
|
||||
Self-hosted Kubernetes is under very active development by CoreOS.
|
||||
The Kubernetes example provisions a 3 node Kubernetes v1.8.5 cluster. [bootkube](https://github.com/kubernetes-incubator/bootkube) is run once on a controller node to bootstrap Kubernetes control plane components as pods before exiting. An etcd3 cluster across controllers is used to back Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
Ensure that you've gone through the [bootcfg with rkt](getting-started-rkt.md) guide and understand the basics. In particular, you should be able to:
|
||||
Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md) or [matchbox with docker](getting-started-docker.md) guide and understand the basics. In particular, you should be able to:
|
||||
|
||||
* Use rkt to start `bootcfg`
|
||||
* Use rkt or Docker to start `matchbox`
|
||||
* Create a network boot environment with `coreos/dnsmasq`
|
||||
* Create the example libvirt client VMs
|
||||
* `/etc/hosts` entries for `node[1-3].example.com`
|
||||
|
||||
Build and install [bootkube](https://github.com/coreos/bootkube/releases) v0.1.1.
|
||||
Install [bootkube](https://github.com/kubernetes-incubator/bootkube/releases) v0.9.1 and add it on your $PATH.
|
||||
|
||||
```sh
|
||||
$ bootkube version
|
||||
Version: v0.9.1
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. The examples can be used for physical machines if you update the MAC/IP addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. The examples can be used for physical machines if you update the MAC addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
* [bootkube](../examples/groups/bootkube) - iPXE boot a bootkube-ready cluster (use rkt)
|
||||
* [bootkube-install](../examples/groups/bootkube-install) - Install a bootkube-ready cluster (use rkt)
|
||||
* [bootkube](../examples/groups/bootkube) - iPXE boot a self-hosted Kubernetes cluster
|
||||
* [bootkube-install](../examples/groups/bootkube-install) - Install a self-hosted Kubernetes cluster
|
||||
|
||||
### Assets
|
||||
## Assets
|
||||
|
||||
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
|
||||
Download the CoreOS Container Linux image assets referenced in the target [profile](../examples/profiles).
|
||||
|
||||
./scripts/get-coreos alpha 1053.2.0 ./examples/assets
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
|
||||
```
|
||||
|
||||
Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
|
||||
{
|
||||
"profile": "bootkube-worker",
|
||||
"metadata": {
|
||||
"ssh_authorized_keys": ["ssh-rsa pub-key-goes-here"]
|
||||
}
|
||||
```json
|
||||
{
|
||||
"profile": "bootkube-worker",
|
||||
"metadata": {
|
||||
"ssh_authorized_keys": ["ssh-rsa pub-key-goes-here"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Later, `bootkube` will schedule these manifests during bootstrapping and the credentials will be used to access your cluster.
|
||||
Use the `bootkube` tool to render Kubernetes manifests and credentials into an `--asset-dir`. Set the `--network-provider` to `flannel` (default) or `experimental-calico` if desired.
|
||||
|
||||
bootkube render --asset-dir=assets --api-servers=https://172.15.0.21:443 --etcd-servers=http://172.15.0.21:2379 --api-server-alt-names=IP=172.15.0.21
|
||||
```sh
|
||||
bootkube render --asset-dir=assets --api-servers=https://node1.example.com:443 --api-server-alt-names=DNS=node1.example.com --etcd-servers=https://node1.example.com:2379
|
||||
```
|
||||
|
||||
Later, a controller will use `bootkube` to bootstrap these manifests and the credentials will be used to access your cluster.
|
||||
|
||||
## Containers
|
||||
|
||||
Run the latest `bootcfg` ACI with rkt and the `bootkube` example (or `bootkube-install`).
|
||||
Use rkt or docker to start `matchbox` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [matchbox with rkt](getting-started-rkt.md) or [matchbox with Docker](getting-started-docker.md) for help.
|
||||
|
||||
sudo rkt run --net=metal0:IP=172.15.0.2 --mount volume=data,target=/var/lib/bootcfg --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/bootcfg/groups --volume groups,kind=host,source=$PWD/examples/groups/bootkube quay.io/coreos/bootcfg:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
|
||||
Create a network boot environment and power-on your machines. Revisit [bootcfg with rkt](getting-started-rkt.md) for help.
|
||||
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot CoreOS and become available via SSH in about 1 minute. If you chose `bootkube-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot Container Linux and become available via SSH in about 1 minute. If you chose `bootkube-install`, notice that machines install Container Linux and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
|
||||
## bootkube
|
||||
|
||||
We're ready to use [bootkube](https://github.com/coreos/bootkube) to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster.
|
||||
We're ready to use bootkube to create a temporary control plane and bootstrap a self-hosted Kubernetes cluster.
|
||||
|
||||
Secure copy the `kubeconfig` to `/etc/kuberentes/kubeconfig` on **every** node (i.e. repeat for 172.15.0.22, 172.15.0.23).
|
||||
Secure copy the etcd TLS assets to `/etc/ssl/etcd/*` on **every controller** node.
|
||||
|
||||
scp assets/auth/kubeconfig core@172.15.0.21:/home/core/kubeconfig
|
||||
ssh core@172.15.0.21
|
||||
sudo mv kubeconfig /etc/kubernetes/kubeconfig
|
||||
```sh
|
||||
for node in 'node1'; do
|
||||
scp -r assets/tls/etcd-* assets/tls/etcd core@$node.example.com:/home/core/
|
||||
ssh core@$node.example.com 'sudo mkdir -p /etc/ssl/etcd && sudo mv etcd-* etcd /etc/ssl/etcd/ && sudo chown -R etcd:etcd /etc/ssl/etcd && sudo chmod -R 500 /etc/ssl/etcd/'
|
||||
done
|
||||
```
|
||||
|
||||
Secure copy the `bootkube` generated assets to any one of the master nodes.
|
||||
Secure copy the `kubeconfig` to `/etc/kubernetes/kubeconfig` on **every node** to path activate the `kubelet.service`.
|
||||
|
||||
scp -r assets core@172.15.0.21:/home/core/assets
|
||||
```sh
|
||||
for node in 'node1' 'node2' 'node3'; do
|
||||
scp assets/auth/kubeconfig core@$node.example.com:/home/core/kubeconfig
|
||||
ssh core@$node.example.com 'sudo mv kubeconfig /etc/kubernetes/kubeconfig'
|
||||
done
|
||||
```
|
||||
|
||||
SSH to the chosen master node and bootstrap the cluster with `bootkube-start`.
|
||||
Secure copy the `bootkube` generated assets to **any controller** node and run `bootkube-start` (takes ~10 minutes).
|
||||
|
||||
ssh core@172.15.0.21 'sudo ./bootkube-start'
|
||||
```sh
|
||||
scp -r assets core@node1.example.com:/home/core
|
||||
ssh core@node1.example.com 'sudo mv assets /opt/bootkube/assets && sudo systemctl start bootkube'
|
||||
```
|
||||
|
||||
Watch the temporary control plane logs until the scheduled kubelet takes over in place of the runonce host kubelet.
|
||||
Watch the Kubernetes control plane bootstrapping with the bootkube temporary api-server. You will see quite a bit of output.
|
||||
|
||||
I0425 12:38:23.746330 29538 status.go:87] Pod status kubelet: Running
|
||||
I0425 12:38:23.746361 29538 status.go:87] Pod status kube-apiserver: Running
|
||||
I0425 12:38:23.746370 29538 status.go:87] Pod status kube-scheduler: Running
|
||||
I0425 12:38:23.746378 29538 status.go:87] Pod status kube-controller-manager: Running
|
||||
```sh
|
||||
$ ssh core@node1.example.com 'journalctl -f -u bootkube'
|
||||
[ 299.241291] bootkube[5]: Pod Status: kube-api-checkpoint Running
|
||||
[ 299.241618] bootkube[5]: Pod Status: kube-apiserver Running
|
||||
[ 299.241804] bootkube[5]: Pod Status: kube-scheduler Running
|
||||
[ 299.241993] bootkube[5]: Pod Status: kube-controller-manager Running
|
||||
[ 299.311743] bootkube[5]: All self-hosted control plane components successfully started
|
||||
```
|
||||
|
||||
You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. They contain a `kubeconfig` and may need to be re-used if the last apiserver were to fail and bootstrapping were needed.
|
||||
[Verify](#verify) the Kubernetes cluster is accessible once complete. Then install **important** cluster [addons](cluster-addons.md). You may cleanup the `bootkube` assets on the node, but you should keep the copy on your laptop. It contains a `kubeconfig` used to access the cluster.
|
||||
|
||||
## Verify
|
||||
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the kubelet, apiserver, scheduler, and controller-manager are running as pods.
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster. Verify that the cluster is accessible and that the apiserver, scheduler, and controller-manager are running as pods.
|
||||
|
||||
$ kubectl --kubeconfig=assets/auth/kubeconfig get nodes
|
||||
NAME STATUS AGE
|
||||
172.15.0.21 Ready 3m
|
||||
172.15.0.22 Ready 3m
|
||||
172.15.0.23 Ready 3m
|
||||
```sh
|
||||
$ export KUBECONFIG=assets/auth/kubeconfig
|
||||
$ kubectl get nodes
|
||||
NAME STATUS AGE VERSION
|
||||
node1.example.com Ready 11m v1.8.5
|
||||
node2.example.com Ready 11m v1.8.5
|
||||
node3.example.com Ready 11m v1.8.5
|
||||
|
||||
$ kubectl --kubeconfig=assets/auth/kubeconfig get pods --all-namespaces
|
||||
kube-system kube-api-checkpoint-172.15.0.21 1/1 Running 0 2m
|
||||
kube-system kube-apiserver-wq4mh 2/2 Running 0 2m
|
||||
kube-system kube-controller-manager-2834499578-y9cnl 1/1 Running 0 2m
|
||||
kube-system kube-dns-v11-2259792283-5tpld 4/4 Running 0 2m
|
||||
kube-system kube-proxy-8zr1b 1/1 Running 0 2m
|
||||
kube-system kube-proxy-i9cgw 1/1 Running 0 2m
|
||||
kube-system kube-proxy-n6qg3 1/1 Running 0 2m
|
||||
kube-system kube-scheduler-4136156790-v9892 1/1 Running 0 2m
|
||||
kube-system kubelet-9wilx 1/1 Running 0 2m
|
||||
kube-system kubelet-a6mmj 1/1 Running 0 2m
|
||||
kube-system kubelet-eomnb 1/1 Running 0 2m
|
||||
$ kubectl get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system kube-apiserver-zd1k3 1/1 Running 0 7m
|
||||
kube-system kube-controller-manager-762207937-2ztxb 1/1 Running 0 7m
|
||||
kube-system kube-controller-manager-762207937-vf6bk 1/1 Running 1 7m
|
||||
kube-system kube-dns-2431531914-qc752 3/3 Running 0 7m
|
||||
kube-system kube-flannel-180mz 2/2 Running 1 7m
|
||||
kube-system kube-flannel-jjr0x 2/2 Running 0 7m
|
||||
kube-system kube-flannel-mlr9w 2/2 Running 0 7m
|
||||
kube-system kube-proxy-0jlq7 1/1 Running 0 7m
|
||||
kube-system kube-proxy-k4mjl 1/1 Running 0 7m
|
||||
kube-system kube-proxy-l4xrd 1/1 Running 0 7m
|
||||
kube-system kube-scheduler-1873228005-5d2mk 1/1 Running 0 7m
|
||||
kube-system kube-scheduler-1873228005-s4w27 1/1 Running 0 7m
|
||||
kube-system pod-checkpointer-hb960 1/1 Running 0 7m
|
||||
kube-system pod-checkpointer-hb960-node1.example.com 1/1 Running 0 6m
|
||||
```
|
||||
|
||||
Try deleting pods to see that the cluster is resilient to failures and machine restarts (CoreOS auto-updates).
|
||||
## Addons
|
||||
|
||||
Install **important** cluster [addons](cluster-addons.md).
|
||||
|
||||
## Going further
|
||||
|
||||
[Learn](bootkube-upgrades.md) to upgrade a self-hosted Kubernetes cluster.
|
||||
|
||||
@@ -1,40 +1,46 @@
|
||||
|
||||
# Cloud Config
|
||||
# Cloud config
|
||||
|
||||
**Note:** We recommend migrating to [Ignition](ignition.md) for hardware provisioning.
|
||||
**Note:** Please migrate to [Container Linux Configs](container-linux-config.md). Cloud-Config support will be removed in the future.
|
||||
|
||||
CoreOS Cloud-Config is a system for configuring machines with a Cloud-Config file or executable script from user-data. Cloud-Config runs in userspace on each boot and implements a subset of the [cloud-init spec](http://cloudinit.readthedocs.org/en/latest/topics/format.html#cloud-config-data). See the cloud-config [docs](https://coreos.com/os/docs/latest/cloud-config.html) for details.
|
||||
|
||||
Cloud-Config template files can be added in `/var/lib/bootcfg/cloud` or in a `cloud` subdirectory of a custom `-data-path`. Template files may contain [Go template](https://golang.org/pkg/text/template/) elements which will be evaluated with group metadata, selectors, and query params.
|
||||
Cloud-Config template files can be added in `/var/lib/matchbox/cloud` or in a `cloud` subdirectory of a custom `-data-path`. Template files may contain [Go template](https://golang.org/pkg/text/template/) elements which will be evaluated with group metadata, selectors, and query params.
|
||||
|
||||
/var/lib/bootcfg
|
||||
├── cloud
|
||||
│ ├── cloud.yaml
|
||||
│ └── script.sh
|
||||
├── ignition
|
||||
└── profiles
|
||||
```
|
||||
/var/lib/matchbox
|
||||
├── cloud
|
||||
│ ├── cloud.yaml
|
||||
│ └── script.sh
|
||||
├── ignition
|
||||
└── profiles
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
Reference a Cloud-Config in a [Profile](bootcfg.md#profiles) with `cloud_id`. When PXE booting, use the kernel option `cloud-config-url` to point to `bootcfg` [cloud-config endpoint](api.md#cloud-config).
|
||||
Reference a Cloud-Config in a [Profile](matchbox.md#profiles) with `cloud_id`. When PXE booting, use the kernel option `cloud-config-url` to point to `matchbox` [cloud-config endpoint](api.md#cloud-config).
|
||||
|
||||
## Examples
|
||||
|
||||
Here is an example Cloud-Config which starts some units and writes a file.
|
||||
|
||||
#cloud-config
|
||||
coreos:
|
||||
units:
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: fleet.service
|
||||
command: start
|
||||
write_files:
|
||||
- path: "/home/core/welcome"
|
||||
owner: "core"
|
||||
permissions: "0644"
|
||||
content: |
|
||||
{{.greeting}}
|
||||
<!-- {% raw %} -->
|
||||
```yaml
|
||||
#cloud-config
|
||||
coreos:
|
||||
units:
|
||||
- name: etcd2.service
|
||||
command: start
|
||||
- name: fleet.service
|
||||
command: start
|
||||
write_files:
|
||||
- path: "/home/core/welcome"
|
||||
owner: "core"
|
||||
permissions: "0644"
|
||||
content: |
|
||||
{{.greeting}}
|
||||
```
|
||||
<!-- {% endraw %} -->
|
||||
|
||||
The Cloud-Config [Validator](https://coreos.com/validate/) is also useful for checking your Cloud-Config files for errors.
|
||||
|
||||
|
||||
30
Documentation/cluster-addons.md
Normal file
30
Documentation/cluster-addons.md
Normal file
@@ -0,0 +1,30 @@
|
||||
## Cluster Addons
|
||||
|
||||
Kubernetes clusters run cluster addons atop Kubernetes itself. Addons may be considered essential for bootstrapping (non-optional), important (highly recommended), or optional.
|
||||
|
||||
## Essential
|
||||
|
||||
Several addons are considered essential. CoreOS cluster creation tools ensure these addons are included. Kubernetes clusters deployed via the Matchbox examples or using our Terraform Modules include these addons as well.
|
||||
|
||||
### kube-proxy
|
||||
|
||||
`kube-proxy` is deployed as a DaemonSet.
|
||||
|
||||
### kube-dns
|
||||
|
||||
`kube-dns` is deployed as a Deployment.
|
||||
|
||||
## Important
|
||||
|
||||
### Container Linux Update Operator
|
||||
|
||||
The [Container Linux Update Operator](https://github.com/coreos/container-linux-update-operator) (i.e. CLUO) coordinates reboots of auto-updating Container Linux nodes so that one node reboots at a time and nodes are drained before reboot. CLUO enables the auto-update behavior Container Linux clusters are known for, but does it in a Kubernetes native way. Deploying CLUO is strongly recommended.
|
||||
|
||||
Create the `update-operator` deployment and `update-agent` DaemonSet.
|
||||
|
||||
```
|
||||
kubectl apply -f examples/addons/cluo/update-operator.yaml
|
||||
kubectl apply -f examples/addons/cluo/update-agent.yaml
|
||||
```
|
||||
|
||||
*Note, CLUO replaces `locksmithd` reboot coordination. The `update_engine` systemd unit on hosts still performs the Container Linux update check, download, and install to the inactive partition.*
|
||||
@@ -1,109 +1,139 @@
|
||||
|
||||
# Flags and Variables
|
||||
# Flags and variables
|
||||
|
||||
Configuration arguments can be provided as flags or as environment variables.
|
||||
|
||||
| flag | variable | default | example |
|
||||
|------|----------|---------|---------|
|
||||
| -address | BOOTCFG_ADDRESS | 127.0.0.1:8080 | 0.0.0.0:8080 |
|
||||
| -log-level | BOOTCFG_LOG_LEVEL | info | critical, error, warning, notice, info, debug |
|
||||
| -data-path | BOOTCFG_DATA_PATH | /var/lib/bootcfg | ./examples |
|
||||
| -assets-path | BOOTCFG_ASSETS_PATH | /var/lib/bootcfg/assets | ./examples/assets |
|
||||
| -rpc-address | BOOTCFG_RPC_ADDRESS | (gRPC API disabled) | 0.0.0.0:8081 |
|
||||
| -cert-file | BOOTCFG_CERT_FILE | /etc/bootcfg/server.crt | ./examples/etc/bootcfg/server.crt |
|
||||
| -key-file | BOOTCFG_KEY_FILE | /etc/bootcfg/server.key | ./examples/etc/bootcfg/server.key
|
||||
| -ca-file | BOOTCFG_CA_FILE | /etc/bootcfg/ca.crt | ./examples/etc/bootcfg/ca.crt |
|
||||
| -key-ring-path | BOOTCFG_KEY_RING_PATH | (no key ring) | ~/.secrets/vault/bootcfg/secring.gpg |
|
||||
| (no flag) | BOOTCFG_PASSPHRASE | (no passphrase) | "secret passphrase" |
|
||||
| -address | MATCHBOX_ADDRESS | 127.0.0.1:8080 | 0.0.0.0:8080 |
|
||||
| -log-level | MATCHBOX_LOG_LEVEL | info | critical, error, warning, notice, info, debug |
|
||||
| -data-path | MATCHBOX_DATA_PATH | /var/lib/matchbox | ./examples |
|
||||
| -assets-path | MATCHBOX_ASSETS_PATH | /var/lib/matchbox/assets | ./examples/assets |
|
||||
| -rpc-address | MATCHBOX_RPC_ADDRESS | (gRPC API disabled) | 0.0.0.0:8081 |
|
||||
| -cert-file | MATCHBOX_CERT_FILE | /etc/matchbox/server.crt | ./examples/etc/matchbox/server.crt |
|
||||
| -key-file | MATCHBOX_KEY_FILE | /etc/matchbox/server.key | ./examples/etc/matchbox/server.key
|
||||
| -ca-file | MATCHBOX_CA_FILE | /etc/matchbox/ca.crt | ./examples/etc/matchbox/ca.crt |
|
||||
| -key-ring-path | MATCHBOX_KEY_RING_PATH | (no key ring) | ~/.secrets/vault/matchbox/secring.gpg |
|
||||
| (no flag) | MATCHBOX_PASSPHRASE | (no passphrase) | "secret passphrase" |
|
||||
|
||||
## Files and Directories
|
||||
## Files and directories
|
||||
|
||||
| Data | Default Location |
|
||||
|:---------|:--------------------------------------------------|
|
||||
| data | /var/lib/bootcfg/{profiles,groups,ignition,cloud,generic} |
|
||||
| assets | /var/lib/bootcfg/assets |
|
||||
| data | /var/lib/matchbox/{profiles,groups,ignition,cloud,generic} |
|
||||
| assets | /var/lib/matchbox/assets |
|
||||
|
||||
| gRPC API TLS Credentials | Default Location |
|
||||
|:---------|:--------------------------------------------------|
|
||||
| CA certificate | /etc/bootcfg/ca.crt |
|
||||
| Server certificate | /etc/bootcfg/server.crt |
|
||||
| Server private key | /etc/bootcfg/server.key |
|
||||
| Client certificate | /etc/bootcfg/client.crt |
|
||||
| Client private key | /etc/bootcfg/client.key |
|
||||
| CA certificate | /etc/matchbox/ca.crt |
|
||||
| Server certificate | /etc/matchbox/server.crt |
|
||||
| Server private key | /etc/matchbox/server.key |
|
||||
| Client certificate | /etc/matchbox/client.crt |
|
||||
| Client private key | /etc/matchbox/client.key |
|
||||
|
||||
## Version
|
||||
|
||||
./bin/bootcfg -version
|
||||
sudo rkt run quay.io/coreos/bootcfg:latest -- -version
|
||||
sudo docker run quay.io/coreos/bootcfg:latest -version
|
||||
```sh
|
||||
$ ./bin/matchbox -version
|
||||
$ sudo rkt run quay.io/coreos/matchbox:latest -- -version
|
||||
$ sudo docker run quay.io/coreos/matchbox:latest -version
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Run the binary.
|
||||
|
||||
./bin/bootcfg -address=0.0.0.0:8080 -log-level=debug -data-path=examples -assets-path=examples/assets
|
||||
```sh
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -log-level=debug -data-path=examples -assets-path=examples/assets
|
||||
```
|
||||
|
||||
Run the latest ACI with rkt.
|
||||
|
||||
sudo rkt run --net=metal0:IP=172.15.0.2 --mount volume=assets,target=/var/lib/bootcfg/assets --volume assets,kind=host,source=$PWD/examples/assets quay.io/coreos/bootcfg:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```sh
|
||||
$ sudo rkt run --mount volume=assets,target=/var/lib/matchbox/assets --volume assets,kind=host,source=$PWD/examples/assets quay.io/coreos/matchbox:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
Run the latest Docker image.
|
||||
|
||||
sudo docker run -p 8080:8080 --rm -v $PWD/examples/assets:/var/lib/bootcfg/assets:Z quay.io/coreos/bootcfg:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples/assets:/var/lib/matchbox/assets:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
#### With Examples
|
||||
### With examples
|
||||
|
||||
Mount `examples` to pre-load the [example](../examples/README.md) machine groups and profiles. Run the container with rkt,
|
||||
|
||||
sudo rkt run --net=metal0:IP=172.15.0.2 --mount volume=data,target=/var/lib/bootcfg --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/bootcfg/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/bootcfg:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/matchbox:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
or with Docker.
|
||||
|
||||
sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/bootcfg:Z -v $PWD/examples/groups/etcd:/var/lib/bootcfg/groups:Z quay.io/coreos/bootcfg:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
### gRPC API
|
||||
### With gRPC API
|
||||
|
||||
The gRPC API allows clients with a TLS client certificate and key to make RPC requests to programmatically create or update `bootcfg` resources. The API can be enabled with the `-rpc-address` flag and by providing a TLS server certificate and key with `-cert-file` and `-key-file` and a CA certificate for authenticating clients with `-ca-file`.
|
||||
The gRPC API allows clients with a TLS client certificate and key to make RPC requests to programmatically create or update `matchbox` resources. The API can be enabled with the `-rpc-address` flag and by providing a TLS server certificate and key with `-cert-file` and `-key-file` and a CA certificate for authenticating clients with `-ca-file`.
|
||||
|
||||
Run the binary with TLS credentials from `examples/etc/bootcfg`.
|
||||
Run the binary with TLS credentials from `examples/etc/matchbox`.
|
||||
|
||||
./bin/bootcfg -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug -data-path=examples -assets-path=examples/assets -cert-file examples/etc/bootcfg/server.crt -key-file examples/etc/bootcfg/server.key -ca-file examples/etc/bootcfg/ca.crt
|
||||
```sh
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug -data-path=examples -assets-path=examples/assets -cert-file examples/etc/matchbox/server.crt -key-file examples/etc/matchbox/server.key -ca-file examples/etc/matchbox/ca.crt
|
||||
```
|
||||
|
||||
Clients, such as `bootcmd`, verify the server's certificate with a CA bundle passed via `-ca-file` and present a client certificate and key via `-cert-file` and `-key-file` to cal the gRPC API.
|
||||
|
||||
./bin/bootcmd profile list --endpoints 127.0.0.1:8081 --ca-file examples/etc/bootcfg/ca.crt --cert-file examples/etc/bootcfg/client.crt --key-file examples/etc/bootcfg/client.key
|
||||
```sh
|
||||
$ ./bin/bootcmd profile list --endpoints 127.0.0.1:8081 --ca-file examples/etc/matchbox/ca.crt --cert-file examples/etc/matchbox/client.crt --key-file examples/etc/matchbox/client.key
|
||||
```
|
||||
|
||||
#### With rkt
|
||||
### With rkt
|
||||
|
||||
Run the ACI with rkt and TLS credentials from `examples/etc/bootcfg`.
|
||||
Run the ACI with rkt and TLS credentials from `examples/etc/matchbox`.
|
||||
|
||||
sudo rkt run --net=metal0:IP=172.15.0.2 --mount volume=data,target=/var/lib/bootcfg --volume data,kind=host,source=$PWD/examples,readOnly=true --mount volume=config,target=/etc/bootcfg --volume config,kind=host,source=$PWD/examples/etc/bootcfg --mount volume=groups,target=/var/lib/bootcfg/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/bootcfg:latest -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples,readOnly=true --mount volume=config,target=/etc/matchbox --volume config,kind=host,source=$PWD/examples/etc/matchbox --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/matchbox:latest -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
A `bootcmd` client can call the gRPC API running at the IP used in the rkt example.
|
||||
|
||||
./bin/bootcmd profile list --endpoints 172.15.0.2:8081 --ca-file examples/etc/bootcfg/ca.crt --cert-file examples/etc/bootcfg/client.crt --key-file examples/etc/bootcfg/client.key
|
||||
```sh
|
||||
$ ./bin/bootcmd profile list --endpoints 172.18.0.2:8081 --ca-file examples/etc/matchbox/ca.crt --cert-file examples/etc/matchbox/client.crt --key-file examples/etc/matchbox/client.key
|
||||
```
|
||||
|
||||
#### With docker
|
||||
### With docker
|
||||
|
||||
Run the Docker image with TLS credentials from `examples/etc/bootcfg`.
|
||||
Run the Docker image with TLS credentials from `examples/etc/matchbox`.
|
||||
|
||||
sudo docker run -p 8080:8080 -p 8081:8081 --rm -v $PWD/examples:/var/lib/bootcfg:Z -v $PWD/examples/etc/bootcfg:/etc/bootcfg:Z,ro -v $PWD/examples/groups/etcd:/var/lib/bootcfg/groups:Z quay.io/coreos/bootcfg:latest -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 -p 8081:8081 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/etc/matchbox:/etc/matchbox:Z,ro -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
A `bootcmd` client can call the gRPC API running at the IP used in the Docker example.
|
||||
|
||||
./bin/bootcmd profile list --endpoints 127.0.0.1:8081 --ca-file examples/etc/bootcfg/ca.crt --cert-file examples/etc/bootcfg/client.crt --key-file examples/etc/bootcfg/client.key
|
||||
```sh
|
||||
$ ./bin/bootcmd profile list --endpoints 127.0.0.1:8081 --ca-file examples/etc/matchbox/ca.crt --cert-file examples/etc/matchbox/client.crt --key-file examples/etc/matchbox/client.key
|
||||
```
|
||||
|
||||
### OpenPGP [Signing](openpgp.md)
|
||||
### With openPGP [Signing](openpgp.md)
|
||||
|
||||
Run with the binary with a test key.
|
||||
|
||||
export BOOTCFG_PASSPHRASE=test
|
||||
./bin/bootcfg -address=0.0.0.0:8080 -key-ring-path bootcfg/sign/fixtures/secring.gpg -data-path=examples -assets-path=examples/assets
|
||||
```sh
|
||||
$ export MATCHBOX_PASSPHRASE=test
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -key-ring-path matchbox/sign/fixtures/secring.gpg -data-path=examples -assets-path=examples/assets
|
||||
```
|
||||
|
||||
Run the ACI with a test key.
|
||||
|
||||
sudo rkt run --net=metal0:IP=172.15.0.2 --set-env=BOOTCFG_PASSPHRASE=test --mount volume=secrets,target=/secrets --volume secrets,kind=host,source=$PWD/bootcfg/sign/fixtures --mount volume=data,target=/var/lib/bootcfg --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/bootcfg/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/bootcfg:latest -- -address=0.0.0.0:8080 -key-ring-path secrets/secring.gpg
|
||||
```sh
|
||||
$ sudo rkt run --net=metal0:IP=172.18.0.2 --set-env=MATCHBOX_PASSPHRASE=test --mount volume=secrets,target=/secrets --volume secrets,kind=host,source=$PWD/matchbox/sign/fixtures --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/matchbox:latest -- -address=0.0.0.0:8080 -key-ring-path secrets/secring.gpg
|
||||
```
|
||||
|
||||
Run the Docker image with a test key.
|
||||
|
||||
sudo docker run -p 8080:8080 --rm --env BOOTCFG_PASSPHRASE=test -v $PWD/examples:/var/lib/bootcfg:Z -v $PWD/examples/groups/etcd:/var/lib/bootcfg/groups:Z -v $PWD/bootcfg/sign/fixtures:/secrets:Z quay.io/coreos/bootcfg:latest -address=0.0.0.0:8080 -log-level=debug -key-ring-path secrets/secring.gpg
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm --env MATCHBOX_PASSPHRASE=test -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z -v $PWD/matchbox/sign/fixtures:/secrets:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug -key-ring-path secrets/secring.gpg
|
||||
```
|
||||
|
||||
144
Documentation/container-linux-config.md
Normal file
144
Documentation/container-linux-config.md
Normal file
@@ -0,0 +1,144 @@
|
||||
# Container Linux Configs
|
||||
|
||||
A Container Linux Config is a YAML document which declares how Container Linux instances' disks should be provisioned on network boot and first-boot from disk. Configs can declare disk partitions, write files (regular files, systemd units, networkd units, etc.), and configure users. See the Container Linux Config [spec](https://coreos.com/os/docs/latest/configuration.html).
|
||||
|
||||
### Ignition
|
||||
|
||||
Container Linux Configs are validated and converted to *machine-friendly* Ignition configs (JSON) by matchbox when serving to booting machines. [Ignition](https://coreos.com/ignition/docs/latest/), the provisioning utility shipped in Container Linux, will parse and execute the Ignition config to realize the desired configuration. Matchbox users usually only need to write Container Linux Configs.
|
||||
|
||||
*Note: Container Linux directory names are still named "ignition" for historical reasons as outlined below. A future breaking change will rename to "container-linux-config".*
|
||||
|
||||
## Adding Container Linux Configs
|
||||
|
||||
Container Linux Config templates can be added to the `/var/lib/matchbox/ignition` directory or in an `ignition` subdirectory of a custom `-data-path`. Template files may contain [Go template](https://golang.org/pkg/text/template/) elements which will be evaluated with group metadata, selectors, and query params.
|
||||
|
||||
```
|
||||
/var/lib/matchbox
|
||||
├── cloud
|
||||
├── ignition
|
||||
│ └── k8s-controller.yaml
|
||||
│ └── etcd.yaml
|
||||
│ └── k8s-worker.yaml
|
||||
│ └── raw.ign
|
||||
└── profiles
|
||||
```
|
||||
|
||||
## Referencing in Profiles
|
||||
|
||||
Profiles can include a Container Linux Config for provisioning machines. Specify the Container Linux Config in a [Profile](matchbox.md#profiles) with `ignition_id`. When PXE booting, use the kernel option `coreos.first_boot=1` and `coreos.config.url` to point to the `matchbox` [Ignition endpoint](api.md#ignition-config).
|
||||
|
||||
## Examples
|
||||
|
||||
Here is an example Container Linux Config template. Variables will be interpreted using group metadata, selectors, and query params. Matchbox will convert the config to Ignition to serve Container Linux machines.
|
||||
|
||||
ignition/format-disk.yaml.tmpl:
|
||||
|
||||
<!-- {% raw %} -->
|
||||
```yaml
|
||||
|
||||
---
|
||||
storage:
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: root
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
files:
|
||||
- filesystem: root
|
||||
path: /home/core/foo
|
||||
mode: 0644
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
{{.example_contents}}
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
{{ range $element := .ssh_authorized_keys }}
|
||||
- {{$element}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
```
|
||||
<!-- {% endraw %} -->
|
||||
|
||||
The Ignition config response (formatted) to a query `/ignition?label=value` for a Container Linux instance supporting Ignition 2.0.0 would be:
|
||||
|
||||
```json
|
||||
{
|
||||
"ignition": {
|
||||
"version": "2.0.0",
|
||||
"config": {}
|
||||
},
|
||||
"storage": {
|
||||
"disks": [
|
||||
{
|
||||
"device": "/dev/sda",
|
||||
"wipeTable": true,
|
||||
"partitions": [
|
||||
{
|
||||
"label": "ROOT",
|
||||
"number": 0,
|
||||
"size": 0,
|
||||
"start": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"filesystems": [
|
||||
{
|
||||
"name": "root",
|
||||
"mount": {
|
||||
"device": "/dev/sda1",
|
||||
"format": "ext4",
|
||||
"create": {
|
||||
"force": true,
|
||||
"options": [
|
||||
"-LROOT"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"files": [
|
||||
{
|
||||
"filesystem": "root",
|
||||
"path": "/home/core/foo",
|
||||
"contents": {
|
||||
"source": "data:,Example%20file%20contents%0A",
|
||||
"verification": {}
|
||||
},
|
||||
"mode": 420,
|
||||
"user": {
|
||||
"id": 500
|
||||
},
|
||||
"group": {
|
||||
"id": 500
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"systemd": {},
|
||||
"networkd": {},
|
||||
"passwd": {}
|
||||
}
|
||||
```
|
||||
|
||||
See [examples/ignition](../examples/ignition) for numerous Container Linux Config template examples.
|
||||
|
||||
### Raw Ignition
|
||||
|
||||
If you prefer to design your own templating solution, raw Ignition files (suffixed with `.ign` or `.ignition`) are served directly.
|
||||
@@ -1,118 +1,337 @@
|
||||
# Installation
|
||||
|
||||
# Deployment
|
||||
This guide walks through deploying the `matchbox` service on a Linux host (via RPM, rkt, docker, or binary) or on a Kubernetes cluster.
|
||||
|
||||
## Provisoner
|
||||
|
||||
`matchbox` is a service for network booting and provisioning machines to create CoreOS Container Linux clusters. `matchbox` should be installed on a provisioner machine (Container Linux or any Linux distribution) or cluster (Kubernetes) which can serve configs to client machines in a lab or datacenter.
|
||||
|
||||
Choose one of the supported installation options:
|
||||
|
||||
* [CoreOS Container Linux (rkt)](#coreos-container-linux)
|
||||
* [RPM-based](#rpm-based-distro)
|
||||
* [Generic Linux (binary)](#generic-linux)
|
||||
* [With rkt](#rkt)
|
||||
* [With docker](#docker)
|
||||
* [Kubernetes Service](#kubernetes)
|
||||
|
||||
## Download
|
||||
|
||||
Download the latest matchbox [release](https://github.com/coreos/matchbox/releases) to the provisioner host.
|
||||
|
||||
```sh
|
||||
$ wget https://github.com/coreos/matchbox/releases/download/v0.7.1/matchbox-v0.7.1-linux-amd64.tar.gz
|
||||
$ wget https://github.com/coreos/matchbox/releases/download/v0.7.1/matchbox-v0.7.1-linux-amd64.tar.gz.asc
|
||||
```
|
||||
|
||||
Verify the release has been signed by the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/).
|
||||
|
||||
```sh
|
||||
$ gpg --keyserver pgp.mit.edu --recv-key 18AD5014C99EF7E3BA5F6CE950BDD3E0FC8A365E
|
||||
$ gpg --verify matchbox-v0.7.1-linux-amd64.tar.gz.asc matchbox-v0.7.1-linux-amd64.tar.gz
|
||||
# gpg: Good signature from "CoreOS Application Signing Key <security@coreos.com>"
|
||||
```
|
||||
|
||||
Untar the release.
|
||||
|
||||
```sh
|
||||
$ tar xzvf matchbox-v0.7.1-linux-amd64.tar.gz
|
||||
$ cd matchbox-v0.7.1-linux-amd64
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
### RPM-based distro
|
||||
|
||||
On an RPM-based provisioner (Fedora 24+), install the `matchbox` RPM from the Copr [repository](https://copr.fedorainfracloud.org/coprs/g/CoreOS/matchbox/) using `dnf`.
|
||||
|
||||
```sh
|
||||
dnf copr enable @CoreOS/matchbox
|
||||
dnf install matchbox
|
||||
```
|
||||
|
||||
RPMs are not currently available for CentOS and RHEL (due to Go version). CentOS and RHEL users should follow the Generic Linux section below.
|
||||
|
||||
### CoreOS Container Linux
|
||||
|
||||
On a Container Linux provisioner, rkt run `matchbox` image with the provided systemd unit.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-on-coreos.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
### Generic Linux
|
||||
|
||||
Pre-built binaries are available for generic Linux distributions. Copy the `matchbox` static binary to an appropriate location on the host.
|
||||
|
||||
```sh
|
||||
$ sudo cp matchbox /usr/local/bin
|
||||
```
|
||||
|
||||
#### Set up User/Group
|
||||
|
||||
The `matchbox` service should be run by a non-root user with access to the `matchbox` data directory (`/var/lib/matchbox`). Create a `matchbox` user and group.
|
||||
|
||||
```sh
|
||||
$ sudo useradd -U matchbox
|
||||
$ sudo mkdir -p /var/lib/matchbox/assets
|
||||
$ sudo chown -R matchbox:matchbox /var/lib/matchbox
|
||||
```
|
||||
|
||||
#### Create systemd service
|
||||
|
||||
Copy the provided `matchbox` systemd unit file.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-local.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
## Customization
|
||||
|
||||
Customize matchbox by editing the systemd unit or adding a systemd dropin. Find the complete set of `matchbox` flags and environment variables at [config](config.md).
|
||||
|
||||
```sh
|
||||
$ sudo systemctl edit matchbox
|
||||
```
|
||||
|
||||
By default, the read-only HTTP machine endpoint will be exposed on port **8080**.
|
||||
|
||||
```ini
|
||||
# /etc/systemd/system/matchbox.service.d/override.conf
|
||||
[Service]
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_LOG_LEVEL=debug"
|
||||
```
|
||||
|
||||
A common customization is enabling the gRPC API to allow clients with a TLS client certificate to change machine configs.
|
||||
|
||||
```ini
|
||||
# /etc/systemd/system/matchbox.service.d/override.conf
|
||||
[Service]
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
|
||||
```
|
||||
|
||||
The Tectonic [Installer](https://tectonic.com/enterprise/docs/latest/install/bare-metal/index.html) uses this API. Tectonic users with a Container Linux provisioner can start with an example that enables it.
|
||||
|
||||
```sh
|
||||
$ sudo cp contrib/systemd/matchbox-for-tectonic.service /etc/systemd/system/matchbox.service
|
||||
```
|
||||
|
||||
Customize `matchbox` to suit your preferences.
|
||||
|
||||
## Firewall
|
||||
|
||||
Allow your port choices on the provisioner's firewall so the clients can access the service. Here are the commands for those using `firewalld`:
|
||||
|
||||
```sh
|
||||
$ sudo firewall-cmd --zone=MYZONE --add-port=8080/tcp --permanent
|
||||
$ sudo firewall-cmd --zone=MYZONE --add-port=8081/tcp --permanent
|
||||
```
|
||||
|
||||
## Generate TLS Certificates
|
||||
|
||||
The Matchbox gRPC API allows clients (terraform-provider-matchbox) to create and update Matchbox resources. TLS credentials are needed for client authentication and to establish a secure communication channel. Client machines (those PXE booting) read from the HTTP endpoints and do not require this setup.
|
||||
|
||||
The `cert-gen` helper script generates a self-signed CA, server certificate, and client certificate. **Prefer your organization's PKI, if possible**
|
||||
|
||||
Navigate to the `scripts/tls` directory.
|
||||
|
||||
```sh
|
||||
$ cd scripts/tls
|
||||
```
|
||||
|
||||
Export `SAN` to set the Subject Alt Names which should be used in certificates. Provide the fully qualified domain name or IP (discouraged) where Matchbox will be installed.
|
||||
|
||||
```sh
|
||||
# DNS or IP Subject Alt Names where matchbox runs
|
||||
$ export SAN=DNS.1:matchbox.example.com,IP.1:172.18.0.2
|
||||
```
|
||||
|
||||
Generate a `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`.
|
||||
|
||||
```sh
|
||||
$ ./cert-gen
|
||||
```
|
||||
|
||||
Move TLS credentials to the matchbox server's default location.
|
||||
|
||||
```sh
|
||||
$ sudo mkdir -p /etc/matchbox
|
||||
$ sudo cp ca.crt server.crt server.key /etc/matchbox
|
||||
```
|
||||
|
||||
Save `client.crt`, `client.key`, and `ca.crt` for later use (e.g. `~/.matchbox`).
|
||||
|
||||
```sh
|
||||
$ mkdir -p ~/.matchbox
|
||||
$ cp client.crt client.key ca.crt ~/.matchbox/
|
||||
```
|
||||
|
||||
## Start matchbox
|
||||
|
||||
Start the `matchbox` service and enable it if you'd like it to start on every boot.
|
||||
|
||||
```sh
|
||||
$ sudo systemctl daemon-reload
|
||||
$ sudo systemctl start matchbox
|
||||
$ sudo systemctl enable matchbox
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
Verify the matchbox service is running and can be reached by client machines (those being provisioned).
|
||||
|
||||
```sh
|
||||
$ systemctl status matchbox
|
||||
$ dig matchbox.example.com
|
||||
```
|
||||
|
||||
Verify you receive a response from the HTTP and API endpoints.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080
|
||||
matchbox
|
||||
```
|
||||
|
||||
If you enabled the gRPC API,
|
||||
|
||||
```sh
|
||||
$ openssl s_client -connect matchbox.example.com:8081 -CAfile /etc/matchbox/ca.crt -cert scripts/tls/client.crt -key scripts/tls/client.key
|
||||
CONNECTED(00000003)
|
||||
depth=1 CN = fake-ca
|
||||
verify return:1
|
||||
depth=0 CN = fake-server
|
||||
verify return:1
|
||||
---
|
||||
Certificate chain
|
||||
0 s:/CN=fake-server
|
||||
i:/CN=fake-ca
|
||||
---
|
||||
....
|
||||
```
|
||||
|
||||
## Download Container Linux (optional)
|
||||
|
||||
`matchbox` can serve Container Linux images in development or lab environments to reduce bandwidth usage and increase the speed of Container Linux PXE boots and installs to disk.
|
||||
|
||||
Download a recent Container Linux [release](https://coreos.com/releases/) with signatures.
|
||||
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1576.5.0 . # note the "." 3rd argument
|
||||
```
|
||||
|
||||
Move the images to `/var/lib/matchbox/assets`,
|
||||
|
||||
```sh
|
||||
$ sudo cp -r coreos /var/lib/matchbox/assets
|
||||
```
|
||||
|
||||
```
|
||||
/var/lib/matchbox/assets/
|
||||
├── coreos
|
||||
│ └── 1576.5.0
|
||||
│ ├── CoreOS_Image_Signing_Key.asc
|
||||
│ ├── coreos_production_image.bin.bz2
|
||||
│ ├── coreos_production_image.bin.bz2.sig
|
||||
│ ├── coreos_production_pxe_image.cpio.gz
|
||||
│ ├── coreos_production_pxe_image.cpio.gz.sig
|
||||
│ ├── coreos_production_pxe.vmlinuz
|
||||
│ └── coreos_production_pxe.vmlinuz.sig
|
||||
```
|
||||
|
||||
and verify the images are acessible.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080/assets/coreos/1576.5.0/
|
||||
<pre>...
|
||||
```
|
||||
|
||||
For large production environments, use a cache proxy or mirror suitable for your environment to serve Container Linux images. See [contrib/squid](../contrib/squid/README.md) for details.
|
||||
|
||||
## Network
|
||||
|
||||
Review [network setup](https://github.com/coreos/matchbox/blob/master/Documentation/network-setup.md) with your network administrator to set up DHCP, TFTP, and DNS services on your network. At a high level, your goals are to:
|
||||
|
||||
* Chainload PXE firmwares to iPXE
|
||||
* Point iPXE client machines to the `matchbox` iPXE HTTP endpoint `http://matchbox.example.com:8080/boot.ipxe`
|
||||
* Ensure `matchbox.example.com` resolves to your `matchbox` deployment
|
||||
|
||||
CoreOS provides [dnsmasq](https://github.com/coreos/matchbox/tree/master/contrib/dnsmasq) as `quay.io/coreos/dnsmasq`, if you wish to use rkt or Docker.
|
||||
|
||||
## rkt
|
||||
|
||||
Run the most recent tagged and signed `bootcfg` [release](https://github.com/coreos/coreos-baremetal/releases) ACI. Trust the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/) for image signature verification.
|
||||
Run the container image with rkt.
|
||||
|
||||
sudo rkt trust --prefix coreos.com/bootcfg
|
||||
# gpg key fingerprint is: 18AD 5014 C99E F7E3 BA5F 6CE9 50BD D3E0 FC8A 365E
|
||||
sudo rkt run --net=host --mount volume=assets,target=/var/lib/bootcfg/assets --volume assets,kind=host,source=$PWD/examples/assets quay.io/coreos/bootcfg:v0.4.0 -- -address=0.0.0.0:8080 -log-level=debug
|
||||
latest or most recent tagged `matchbox` [release](https://github.com/coreos/matchbox/releases) ACI. Trust the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/) for image signature verification.
|
||||
|
||||
Create machine profiles, groups, or Ignition configs at runtime with `bootcmd` or by using your own `/var/lib/bootcfg` volume mounts.
|
||||
```sh
|
||||
$ mkdir -p /var/lib/matchbox/assets
|
||||
$ sudo rkt run --net=host --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=/var/lib/matchbox quay.io/coreos/matchbox:latest --mount volume=config,target=/etc/matchbox --volume config,kind=host,source=/etc/matchbox,readOnly=true -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
Create machine profiles, groups, or Ignition configs by adding files to `/var/lib/matchbox`.
|
||||
|
||||
## Docker
|
||||
|
||||
Run the latest or the most recently tagged `bootcfg` [release](https://github.com/coreos/coreos-baremetal/releases) Docker image.
|
||||
Run the container image with docker.
|
||||
|
||||
sudo docker run --net=host --rm -v $PWD/examples/assets:/var/lib/bootcfg/assets:Z quay.io/coreos/bootcfg:v0.4.0 -address=0.0.0.0:8080 -log-level=debug
|
||||
```sh
|
||||
$ mkdir -p /var/lib/matchbox/assets
|
||||
$ sudo docker run --net=host --rm -v /var/lib/matchbox:/var/lib/matchbox:Z -v /etc/matchbox:/etc/matchbox:Z,ro quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
Create machine profiles, groups, or Ignition configs at runtime with `bootcmd` or by using your own `/var/lib/bootcfg` volume mounts.
|
||||
Create machine profiles, groups, or Ignition configs by adding files to `/var/lib/matchbox`.
|
||||
|
||||
## Kubernetes
|
||||
|
||||
*Note: Enhancements to the gRPC API, CLI, and `EtcdStore` backend will improve this deployment strategy in the future.*
|
||||
Install `matchbox` on a Kubernetes cluster by creating a deployment and service.
|
||||
|
||||
Create a `bootcfg` Kubernetes `Deployment` and `Service` based on the example manifests provided in [contrib/k8s](../contrib/k8s).
|
||||
```sh
|
||||
$ kubectl apply -f contrib/k8s/matchbox-deployment.yaml
|
||||
$ kubectl apply -f contrib/k8s/matchbox-service.yaml
|
||||
$ kubectl get services
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
matchbox 10.3.0.145 <none> 8080/TCP,8081/TCP 46m
|
||||
```
|
||||
|
||||
kubectl apply -f contrib/k8s/bootcfg-deployment.yaml
|
||||
kubectl apply -f contrib/k8s/bootcfg-service.yaml
|
||||
Example manifests in [contrib/k8s](../contrib/k8s) enable the gRPC API to allow client apps to update matchbox objects. Generate TLS server credentials for `matchbox-rpc.example.com` [as shown](#generate-tls-credentials) and create a Kubernetes secret. Alternately, edit the example manifests if you don't need the gRPC API enabled.
|
||||
|
||||
The `bootcfg` HTTP server should be exposed on NodePort `tcp:31488` on each node in the cluster. `BOOTCFG_LOG_LEVEL` is set to debug.
|
||||
```sh
|
||||
$ kubectl create secret generic matchbox-rpc --from-file=ca.crt --from-file=server.crt --from-file=server.key
|
||||
```
|
||||
|
||||
kubectl get deployments
|
||||
kubectl get services
|
||||
kubectl get pods
|
||||
kubectl logs POD-NAME
|
||||
Create an Ingress resource to expose the HTTP read-only and gRPC API endpoints. The Ingress example requires the cluster to have a functioning [Nginx Ingress Controller](https://github.com/kubernetes/ingress).
|
||||
|
||||
The example manifests use Kubernetes `emptyDir` volumes to back the `bootcfg` FileStore (`/var/lib/bootcfg`). This doesn't provide long-term persistent storage so you may wish to mount your machine groups, profiles, and Ignition configs with a [gitRepo](http://kubernetes.io/docs/user-guide/volumes/#gitrepo) and host image assets on a file server.
|
||||
```sh
|
||||
$ kubectl create -f contrib/k8s/matchbox-ingress.yaml
|
||||
$ kubectl get ingress
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
matchbox matchbox.example.com 10.128.0.3,10... 80 29m
|
||||
matchbox-rpc matchbox-rpc.example.com 10.128.0.3,10... 80, 443 29m
|
||||
```
|
||||
|
||||
## Binary
|
||||
Add DNS records `matchbox.example.com` and `matchbox-rpc.example.com` to route traffic to the Ingress Controller.
|
||||
|
||||
### User/Group
|
||||
Verify `http://matchbox.example.com` responds with the text "matchbox" and verify gRPC clients can connect to `matchbox-rpc.example.com:443`.
|
||||
|
||||
The `bootcfg` service should be run by a non-root user with access to the `bootcfg` data directory (e.g. `/var/lib/bootcfg`). Create a `bootcfg` user and group.
|
||||
```sh
|
||||
$ curl http://matchbox.example.com
|
||||
$ openssl s_client -connect matchbox-rpc.example.com:443 -CAfile ca.crt -cert client.crt -key client.key
|
||||
```
|
||||
|
||||
sudo useradd -U bootcfg
|
||||
sudo mkdir -p /var/lib/bootcfg/assets
|
||||
sudo chown -R bootcfg:bootcfg /var/lib/bootcfg
|
||||
# HTTPS - The read-only Matchbox API is also available with HTTPS
|
||||
|
||||
Add yourself to the `bootcfg` group if you'd like to edit configs directly rather than through the `bootcmd` client.
|
||||
To start matchbox in this mode you will need the following flags set:
|
||||
|
||||
SELF=$(whoami)
|
||||
sudo gpasswd --add $SELF bootcfg
|
||||
| Name | Type | Description |
|
||||
|----------------|--------|---------------------------------------------------------------|
|
||||
| -web-ssl | bool | true/false |
|
||||
| -web-cert-file | string | Path to the server TLS certificate file |
|
||||
| -web-key-file | string | Path to the server TLS key file |
|
||||
|
||||
### Prebuilt
|
||||
|
||||
Download a prebuilt binary from the Github [releases](https://github.com/coreos/coreos-baremetal/releases).
|
||||
|
||||
wget https://github.com/coreos/coreos-baremetal/releases/download/VERSION/bootcfg-VERSION-linux-amd64.tar.gz
|
||||
wget https://github.com/coreos/coreos-baremetal/releases/download/VERSION/bootcfg-VERSION-linux-amd64.tar.gz.asc
|
||||
|
||||
Verify the signature from the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/).
|
||||
|
||||
gpg --keyserver pgp.mit.edu --recv-key 18AD5014C99EF7E3BA5F6CE950BDD3E0FC8A365E
|
||||
gpg --verify bootcfg-VERSION-linux-amd64.tar.gz.asc bootcfg-VERSION-linux-amd64.tar.gz
|
||||
# gpg: Good signature from "CoreOS Application Signing Key <security@coreos.com>"
|
||||
|
||||
Install the `bootcfg` static binary to `/usr/local/bin`.
|
||||
|
||||
tar xzvf bootcfg-VERSION-linux-amd64.tar.gz
|
||||
sudo cp bootcfg /usr/local/bin
|
||||
|
||||
### Source
|
||||
|
||||
Clone the coreos-baremetal project into your $GOPATH.
|
||||
|
||||
go get github.com/coreos/coreos-baremetal/cmd/bootcfg
|
||||
cd $GOPATH/src/github.com/coreos/coreos-baremetal
|
||||
|
||||
Build `bootcfg` from source.
|
||||
|
||||
make
|
||||
|
||||
Install the `bootcfg` static binary to `/usr/local/bin`.
|
||||
|
||||
### Run
|
||||
|
||||
Run the `bootcfg` server.
|
||||
|
||||
$ bootcfg -version
|
||||
$ bootcfg -address 0.0.0.0:8080
|
||||
main: starting bootcfg HTTP server on 0.0.0.0:8080
|
||||
|
||||
See [flags and variables](config.md).
|
||||
|
||||
### systemd
|
||||
|
||||
First, install the `bootcfg` binary from a pre-built binary or from source. Then add and start bootcfg's example systemd unit.
|
||||
|
||||
sudo cp contrib/systemd/bootcfg.service /etc/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start bootcfg.service
|
||||
|
||||
Check the status and logs.
|
||||
|
||||
systemctl status bootcfg.service
|
||||
journalctl -u bootcfg.service
|
||||
|
||||
Enable the `bootcfg` service if you'd like it to start at boot time.
|
||||
|
||||
sudo systemctl enable bootcfg.service
|
||||
|
||||
### Uninstall
|
||||
|
||||
sudo systemctl stop bootcfg.service
|
||||
sudo make uninstall
|
||||
### Operational notes
|
||||
|
||||
* Secrets: Matchbox **can** be run as a public facing service. However, you **must** follow best practices and avoid writing secret material into machine user-data. Instead, load secret materials from an internal secret store.
|
||||
* Storage: Example manifests use Kubernetes `emptyDir` volumes to store `matchbox` data. Swap those out for a Kubernetes persistent volume if available.
|
||||
|
||||
|
||||
@@ -1,65 +1,82 @@
|
||||
# Development
|
||||
|
||||
# bootcfg Development
|
||||
To develop `matchbox` locally, compile the binary and build the container image.
|
||||
|
||||
Develop `bootcfg` locally.
|
||||
|
||||
## Binary
|
||||
## Static binary
|
||||
|
||||
Build the static binary.
|
||||
|
||||
./build
|
||||
```sh
|
||||
$ make build
|
||||
```
|
||||
|
||||
Test with vendored dependencies.
|
||||
|
||||
./test
|
||||
```sh
|
||||
$ make test
|
||||
```
|
||||
|
||||
## Container Image
|
||||
## Container image
|
||||
|
||||
Build an ACI `bootcfg.aci`.
|
||||
Build an ACI `matchbox.aci`.
|
||||
|
||||
./build-aci
|
||||
```sh
|
||||
$ make aci
|
||||
```
|
||||
|
||||
Alternately, build a Docker image `coreos/bootcfg:latest`.
|
||||
Alternately, build a Docker image `coreos/matchbox:latest`.
|
||||
|
||||
sudo ./build-docker
|
||||
```sh
|
||||
$ make docker-image
|
||||
```
|
||||
|
||||
## Version
|
||||
|
||||
./bin/bootcfg -version
|
||||
sudo rkt --insecure-options=image run bootcfg.aci -- -version
|
||||
sudo docker run coreos/bootcfg:latest -version
|
||||
|
||||
```sh
|
||||
$ ./bin/matchbox -version
|
||||
$ sudo rkt --insecure-options=image run matchbox.aci -- -version
|
||||
$ sudo docker run coreos/matchbox:latest -version
|
||||
```
|
||||
## Run
|
||||
|
||||
Run the binary.
|
||||
|
||||
./bin/bootcfg -address=0.0.0.0:8080 -log-level=debug -data-path examples -assets-path examples/assets
|
||||
```sh
|
||||
$ ./bin/matchbox -address=0.0.0.0:8080 -log-level=debug -data-path examples -assets-path examples/assets
|
||||
```
|
||||
|
||||
Run the ACI with rkt on `metal0`.
|
||||
Run the container image with rkt, on `metal0`.
|
||||
|
||||
sudo rkt --insecure-options=image run --net=metal0:IP=172.15.0.2 --mount volume=data,target=/var/lib/bootcfg --volume data,kind=host,source=$PWD/examples --mount volume=config,target=/etc/bootcfg --volume config,kind=host,source=$PWD/examples/etc/bootcfg --mount volume=groups,target=/var/lib/bootcfg/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd bootcfg.aci -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```sh
|
||||
$ sudo rkt --insecure-options=image run --net=metal0:IP=172.18.0.2 --mount volume=data,target=/var/lib/matchbox --volume data,kind=host,source=$PWD/examples --mount volume=config,target=/etc/matchbox --volume config,kind=host,source=$PWD/examples/etc/matchbox --mount volume=groups,target=/var/lib/matchbox/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd matchbox.aci -- -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
```
|
||||
|
||||
Alternately, run the Docker image on `docker0`.
|
||||
|
||||
sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/bootcfg:Z -v $PWD/examples/groups/etcd-docker:/var/lib/bootcfg/groups:Z coreos/bootcfg:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd:/var/lib/matchbox/groups:Z coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
|
||||
### bootcmd
|
||||
## bootcmd
|
||||
|
||||
Run `bootcmd` against the gRPC API of the service running via rkt.
|
||||
|
||||
./bin/bootcmd profile list --endpoints 172.15.0.2:8081 --cacert examples/etc/bootcfg/ca.crt
|
||||
```sh
|
||||
$ ./bin/bootcmd profile list --endpoints 172.18.0.2:8081 --cacert examples/etc/matchbox/ca.crt
|
||||
```
|
||||
|
||||
## Dependencies
|
||||
## Vendor
|
||||
|
||||
Project dependencies are commited to the `vendor` directory, so Go 1.6+ users can clone to their `GOPATH` and build or test immediately. Go 1.5 users should set `GO15VENDOREXPERIMENT=1`.
|
||||
Use `glide` and `glide-vc` to manage dependencies committed to the `vendor` directory.
|
||||
|
||||
Project developers should use [glide](https://github.com/Masterminds/glide) to manage commited dependencies under `vendor`. Configure `glide.yaml` as desired. Use `glide update` to download and update dependencies listed in `glide.yaml` into `/vendor` (do **not** use glide `get`).
|
||||
```sh
|
||||
$ make vendor
|
||||
```
|
||||
|
||||
glide update --update-vendored --strip-vendor --strip-vcs
|
||||
## Codegen
|
||||
|
||||
Recursive dependencies are also vendored. A `glide.lock` will be created to represent the exact versions of each dependency.
|
||||
Generate code from *proto* definitions using `protoc` and the `protoc-gen-go` plugin.
|
||||
|
||||
With an empty `vendor` directory, you can install the `glide.lock` dependencies.
|
||||
|
||||
rm -rf vendor/
|
||||
glide install --strip-vendor --strip-vcs
|
||||
```sh
|
||||
$ make codegen
|
||||
```
|
||||
|
||||
@@ -1,47 +1,74 @@
|
||||
|
||||
# bootcfg Release Guide
|
||||
# Release guide
|
||||
|
||||
This guide covers releasing new versions of `bootcfg`.
|
||||
This guide covers releasing new versions of matchbox.
|
||||
|
||||
## Release Notes
|
||||
## Version
|
||||
|
||||
Create a pre-release with the [changelog](../CHANGES.md) contents.
|
||||
Create a release commit which updates old version references.
|
||||
|
||||
```sh
|
||||
$ export VERSION=v0.7.1
|
||||
```
|
||||
|
||||
## Tag
|
||||
|
||||
Tag, sign the release version, and push to Github.
|
||||
Tag, sign the release version, and push it to Github.
|
||||
|
||||
git tag -s vX.Y.Z -m 'vX.Y.Z'
|
||||
```sh
|
||||
$ git tag -s vX.Y.Z -m 'vX.Y.Z'
|
||||
$ git push origin --tags
|
||||
$ git push origin master
|
||||
```
|
||||
|
||||
Travis CI will build the Docker image and push it to Quay.io when the tag is pushed to master.
|
||||
## Images
|
||||
|
||||
## Binaries and Images
|
||||
Travis CI will build the Docker image and push it to Quay.io when the tag is pushed to master. Verify the new image and version.
|
||||
|
||||
Build the binary and ACI. Check that their version is correct/clean.
|
||||
```sh
|
||||
$ sudo docker run quay.io/coreos/matchbox:$VERSION -version
|
||||
$ sudo rkt run --no-store quay.io/coreos/matchbox:$VERSION -- -version
|
||||
```
|
||||
|
||||
./build
|
||||
./build-aci
|
||||
## Github release
|
||||
|
||||
Prepare the binary tarball and ACI.
|
||||
Publish the release on Github with release notes.
|
||||
|
||||
export VERSION=v0.3.0
|
||||
mkdir bootcfg-$VERSION
|
||||
cp bin/bootcfg bootcfg-$VERSION
|
||||
cp bootcfg.aci bootcfg-$VERSION-linux-amd64.aci
|
||||
tar -zcvf bootcfg-$VERSION-linux-amd64.tar.gz bootcfg-$VERSION
|
||||
## Tarballs
|
||||
|
||||
Build the release tarballs.
|
||||
|
||||
```sh
|
||||
$ make release
|
||||
```
|
||||
|
||||
Verify the reported version.
|
||||
|
||||
```
|
||||
./_output/matchbox-v0.7.1-linux-amd64/matchbox -version
|
||||
```
|
||||
|
||||
## Signing
|
||||
|
||||
Sign the binary tarball and ACI.
|
||||
Sign the release tarballs and ACI with a [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/) subkey.
|
||||
|
||||
gpg2 -a --default-key FC8A365E --detach-sign bootcfg-$VERSION-linux-amd64.tar.gz
|
||||
gpg2 -a --default-key FC8A365E --detach-sign bootcfg-$VERSION-linux-amd64.aci
|
||||
```sh
|
||||
cd _output
|
||||
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-amd64.tar.gz
|
||||
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-darwin-amd64.tar.gz
|
||||
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-arm.tar.gz
|
||||
gpg2 --armor --local-user A6F71EE5BEDDBA18! --detach-sign matchbox-$VERSION-linux-arm64.tar.gz
|
||||
```
|
||||
|
||||
Verify the signatures.
|
||||
|
||||
gpg2 --verify bootcfg-$VERSION-linux-amd64.tar.gz.asc bootcfg-$VERSION-linux-amd64.tar.gz
|
||||
gpg2 --verify bootcfg-$VERSION-linux-amd64.aci.asc bootcfg-$VERSION-linux-amd64.aci
|
||||
```sh
|
||||
gpg2 --verify matchbox-$VERSION-linux-amd64.tar.gz.asc matchbox-$VERSION-linux-amd64.tar.gz
|
||||
gpg2 --verify matchbox-$VERSION-darwin-amd64.tar.gz.asc matchbox-$VERSION-darwin-amd64.tar.gz
|
||||
gpg2 --verify matchbox-$VERSION-linux-arm.tar.gz.asc matchbox-$VERSION-linux-arm.tar.gz
|
||||
gpg2 --verify matchbox-$VERSION-linux-arm64.tar.gz.asc matchbox-$VERSION-linux-arm64.tar.gz
|
||||
```
|
||||
|
||||
## Publish
|
||||
|
||||
Publish the signed binary tarball(s) and the signed ACI with the Github release. The Docker image is published to Quay.io when the tag is pushed to master.
|
||||
Upload the signed tarball(s) with the Github release. Promote the release from a `pre-release` to an official release.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Getting started with Docker
|
||||
|
||||
# Getting Started with Docker
|
||||
|
||||
In this tutorial, we'll run `bootcfg` on your Linux machine with Docker to network boot and provision a cluster of CoreOS machines locally. You'll be able to create Kubernetes clustes, etcd clusters, and test network setups.
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with Docker to network boot and provision a cluster of QEMU/KVM Container Linux machines locally. You'll be able to create Kubernetes clusters, etcd3 clusters, and test network setups.
|
||||
|
||||
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
@@ -9,73 +8,117 @@ In this tutorial, we'll run `bootcfg` on your Linux machine with Docker to netwo
|
||||
|
||||
Install the package dependencies and start the Docker daemon.
|
||||
|
||||
# Fedora
|
||||
sudo dnf install docker virt-install virt-manager
|
||||
sudo systemctl start docker
|
||||
```sh
|
||||
$ # Fedora
|
||||
$ sudo dnf install docker virt-install virt-manager
|
||||
$ sudo systemctl start docker
|
||||
|
||||
# Debian/Ubuntu
|
||||
# check Docker's docs to install Docker 1.8+ on Debian/Ubuntu
|
||||
sudo apt-get install virt-manager virtinst qemu-kvm
|
||||
$ # Debian/Ubuntu
|
||||
$ # check Docker's docs to install Docker 1.8+ on Debian/Ubuntu
|
||||
$ sudo apt-get install virt-manager virtinst qemu-kvm
|
||||
```
|
||||
|
||||
Clone the [coreos-baremetal](https://github.com/coreos/coreos-baremetal) source which contains the examples and scripts.
|
||||
Clone the [matchbox](https://github.com/coreos/matchbox) source which contains the examples and scripts.
|
||||
|
||||
git clone https://github.com/coreos/coreos-baremetal.git
|
||||
cd coreos-baremetal
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/matchbox.git
|
||||
$ cd matchbox
|
||||
```
|
||||
|
||||
Download CoreOS image assets referenced by the `etcd-docker` [example](../examples) to `examples/assets`.
|
||||
Download CoreOS Container Linux image assets referenced by the `etcd3` [example](../examples) to `examples/assets`.
|
||||
|
||||
./scripts/get-coreos alpha 1053.2.0 ./examples/assets
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
|
||||
```
|
||||
|
||||
For development convenience, add `/etc/hosts` entries for nodes so they may be referenced by name.
|
||||
|
||||
```sh
|
||||
# /etc/hosts
|
||||
...
|
||||
172.17.0.21 node1.example.com
|
||||
172.17.0.22 node2.example.com
|
||||
172.17.0.23 node3.example.com
|
||||
```
|
||||
|
||||
## Containers
|
||||
|
||||
Run the latest `bootcfg` Docker image from `quay.io/coreos/bootcfg` with the `etcd-docker` example. The container should receive the IP address 172.17.0.2 on the `docker0` bridge.
|
||||
Run the `matchbox` and `dnsmasq` services on the `docker0` bridge. `dnsmasq` will run DHCP, DNS and TFTP services to create a suitable network boot environment. `matchbox` will serve configs to machines as they PXE boot.
|
||||
|
||||
sudo docker pull quay.io/coreos/bootcfg:latest
|
||||
sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/bootcfg:Z -v $PWD/examples/groups/etcd-docker:/var/lib/bootcfg/groups:Z quay.io/coreos/bootcfg:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
The `devnet` convenience script can start these services and accepts the name of any example cluster in [examples](../examples).
|
||||
|
||||
Take a look at the [etcd groups](../examples/groups/etcd-docker) to get an idea of how machines are mapped to Profiles. Explore some endpoints port mapped to localhost:8080.
|
||||
```sh
|
||||
$ sudo ./scripts/devnet create etcd3
|
||||
```
|
||||
|
||||
* [node1's ipxe](http://127.0.0.1:8080/ipxe?mac=52:54:00:a1:9c:ae)
|
||||
* [node1's Ignition](http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae)
|
||||
* [node1's Metadata](http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae)
|
||||
Inspect the logs.
|
||||
|
||||
## Network
|
||||
```
|
||||
$ sudo ./scripts/devnet status
|
||||
```
|
||||
|
||||
Since the virtual network has no network boot services, use the `dnsmasq` image to create an iPXE network boot environment which runs DHCP, DNS, and TFTP.
|
||||
Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of how machines are mapped to Profiles. Explore some endpoints exposed by the service, say for QEMU/KVM node1.
|
||||
|
||||
sudo docker run --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-range=172.17.0.43,172.17.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:#ipxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe --log-queries --log-dhcp --dhcp-option=3,172.17.0.1 --address=/bootcfg.foo/172.17.0.2
|
||||
* iPXE [http://127.0.0.1:8080/ipxe?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/ipxe?mac=52:54:00:a1:9c:ae)
|
||||
* Ignition [http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/ignition?mac=52:54:00:a1:9c:ae)
|
||||
* Metadata [http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae](http://127.0.0.1:8080/metadata?mac=52:54:00:a1:9c:ae)
|
||||
|
||||
In this case, dnsmasq runs a DHCP server allocating IPs to VMs between 172.17.0.43 and 172.17.0.99, resolves `bootcfg.foo` to 172.17.0.2 (the IP where `bootcfg` runs), and points iPXE clients to `http://bootcfg.foo:8080/boot.ipxe`.
|
||||
### Manual
|
||||
|
||||
If you prefer to start the containers yourself, instead of using `devnet`,
|
||||
|
||||
```sh
|
||||
$ sudo docker run -p 8080:8080 --rm -v $PWD/examples:/var/lib/matchbox:Z -v $PWD/examples/groups/etcd3:/var/lib/matchbox/groups:Z quay.io/coreos/matchbox:latest -address=0.0.0.0:8080 -log-level=debug
|
||||
$ sudo docker run --name dnsmasq --cap-add=NET_ADMIN -v $PWD/contrib/dnsmasq/docker0.conf:/etc/dnsmasq.conf:Z quay.io/coreos/dnsmasq -d
|
||||
```
|
||||
|
||||
## Client VMs
|
||||
|
||||
Create VM nodes which have known hardware attributes. The nodes will be attached to the `docker0` bridge where Docker's containers run.
|
||||
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `docker0` bridge, where Docker containers run.
|
||||
|
||||
sudo ./scripts/libvirt create-docker
|
||||
sudo virt-manager
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create
|
||||
```
|
||||
|
||||
You can use `virt-manager` to watch the console and reboot VM machines with
|
||||
You can connect to the serial console of any node (ctrl+] to exit). If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
|
||||
sudo ./scripts/libvirt poweroff
|
||||
sudo ./scripts/libvirt start
|
||||
```sh
|
||||
$ sudo virsh console node1
|
||||
$ ssh core@node1.example.com
|
||||
```
|
||||
|
||||
You can also use `virt-manager` to watch the console.
|
||||
|
||||
```sh
|
||||
$ sudo virt-manager
|
||||
```
|
||||
|
||||
Use the wrapper script to act on all nodes.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt [start|reboot|shutdown|poweroff|destroy]
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
The VMs should network boot and provision themselves into a three node etcd cluster, with other nodes behaving as etcd proxies.
|
||||
The VMs should network boot and provision themselves into a three node etcd3 cluster, with other nodes behaving as etcd3 gateways.
|
||||
|
||||
The example profile added autologin so you can verify that etcd works between nodes.
|
||||
The example profile added autologin so you can verify that etcd3 works between nodes.
|
||||
|
||||
systemctl status etcd2
|
||||
etcdctl set /message hello
|
||||
etcdctl get /message
|
||||
fleetctl list-machines
|
||||
```sh
|
||||
$ systemctl status etcd-member
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
```
|
||||
## Clean up
|
||||
|
||||
Clean up the VM machines.
|
||||
Clean up the containers and VM machines.
|
||||
|
||||
sudo ./scripts/libvirt poweroff
|
||||
sudo ./scripts/libvirt destroy
|
||||
```sh
|
||||
$ sudo ./scripts/devnet destroy
|
||||
$ sudo ./scripts/libvirt destroy
|
||||
```
|
||||
|
||||
## Going Further
|
||||
|
||||
Learn more about [bootcfg](bootcfg.md) or explore the other [example](../examples) clusters. Try the [k8s-docker example](kubernetes.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl` ([docs](../examples/README.md#kubernetes)).
|
||||
## Going further
|
||||
|
||||
Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](bootkube.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`.
|
||||
|
||||
@@ -1,32 +1,39 @@
|
||||
# Getting started with rkt
|
||||
|
||||
# Getting Started with rkt
|
||||
|
||||
In this tutorial, we'll run `bootcfg` on your Linux machine with `rkt` and `CNI` to network boot and provision a cluster of CoreOS machines locally. You'll be able to create Kubernetes clustes, etcd clusters, and test network setups.
|
||||
In this tutorial, we'll run `matchbox` on your Linux machine with `rkt` and `CNI` to network boot and provision a cluster of QEMU/KVM Container Linux machines locally. You'll be able to create Kubernetes clustes, etcd3 clusters, and test network setups.
|
||||
|
||||
*Note*: To provision physical machines, see [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
## Requirements
|
||||
|
||||
Install [rkt](https://coreos.com/rkt/docs/latest/distributions.html) 1.8 or higher ([example script](https://github.com/dghubble/phoenix/blob/master/scripts/fedora/sources.sh)) and setup rkt [privilege separation](https://coreos.com/rkt/docs/latest/trying-out-rkt.html).
|
||||
Install [rkt](https://coreos.com/rkt/docs/latest/distributions.html) 1.12.0 or higher ([example script](https://github.com/dghubble/phoenix/blob/master/fedora/sources.sh)) and setup rkt [privilege separation](https://coreos.com/rkt/docs/latest/trying-out-rkt.html).
|
||||
|
||||
Next, install the package dependencies.
|
||||
|
||||
# Fedora
|
||||
sudo dnf install virt-install virt-manager
|
||||
```sh
|
||||
# Fedora
|
||||
$ sudo dnf install virt-install virt-manager
|
||||
|
||||
# Debian/Ubuntu
|
||||
sudo apt-get install virt-manager virtinst qemu-kvm systemd-container
|
||||
# Debian/Ubuntu
|
||||
$ sudo apt-get install virt-manager virtinst qemu-kvm systemd-container
|
||||
```
|
||||
|
||||
**Note**: rkt does not yet integrate with SELinux on Fedora. As a workaround, temporarily set enforcement to permissive if you are comfortable (`sudo setenforce Permissive`). Check the rkt [distribution notes](https://github.com/coreos/rkt/blob/master/Documentation/distributions.md) or see the tracking [issue](https://github.com/coreos/rkt/issues/1727).
|
||||
|
||||
Clone the [coreos-baremetal](https://github.com/coreos/coreos-baremetal) source which contains the examples and scripts.
|
||||
Clone the [matchbox](https://github.com/coreos/matchbox) source which contains the examples and scripts.
|
||||
|
||||
git clone https://github.com/coreos/coreos-baremetal.git
|
||||
cd coreos-baremetal
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/matchbox.git
|
||||
$ cd matchbox
|
||||
```
|
||||
|
||||
Download CoreOS image assets referenced by the `etcd` [example](../examples) to `examples/assets`.
|
||||
Download CoreOS Container Linux image assets referenced by the `etcd3` [example](../examples) to `examples/assets`.
|
||||
|
||||
./scripts/get-coreos alpha 1053.2.0 ./examples/assets
|
||||
```sh
|
||||
$ ./scripts/get-coreos stable 1576.5.0 ./examples/assets
|
||||
```
|
||||
|
||||
## Network
|
||||
|
||||
Define the `metal0` virtual bridge with [CNI](https://github.com/appc/cni).
|
||||
|
||||
@@ -41,7 +48,7 @@ sudo bash -c 'cat > /etc/rkt/net.d/20-metal.conf << EOF
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "172.15.0.0/16",
|
||||
"subnet": "172.18.0.0/24",
|
||||
"routes" : [ { "dst" : "0.0.0.0/0" } ]
|
||||
}
|
||||
}
|
||||
@@ -50,68 +57,128 @@ EOF'
|
||||
|
||||
On Fedora, add the `metal0` interface to the trusted zone in your firewall configuration.
|
||||
|
||||
sudo firewall-cmd --add-interface=metal0 --zone=trusted
|
||||
```sh
|
||||
$ sudo firewall-cmd --add-interface=metal0 --zone=trusted
|
||||
$ sudo firewall-cmd --add-interface=metal0 --zone=trusted --permanent
|
||||
```
|
||||
|
||||
For development convenience, you may wish to add `/etc/hosts` entries for nodes to refer to them by name.
|
||||
|
||||
```
|
||||
# /etc/hosts
|
||||
...
|
||||
172.18.0.21 node1.example.com
|
||||
172.18.0.22 node2.example.com
|
||||
172.18.0.23 node3.example.com
|
||||
```
|
||||
|
||||
## Containers
|
||||
|
||||
Run the latest `bootcfg` ACI with rkt and the `etcd` example.
|
||||
Run the `matchbox` and `dnsmasq` services on the `metal0` bridge. `dnsmasq` will run DHCP, DNS, and TFTP services to create a suitable network boot environment. `matchbox` will serve configs to machinesas they PXE boot.
|
||||
|
||||
sudo rkt run --net=metal0:IP=172.15.0.2 --mount volume=data,target=/var/lib/bootcfg --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/bootcfg/groups --volume groups,kind=host,source=$PWD/examples/groups/etcd quay.io/coreos/bootcfg:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
The `devnet` convenience script can rkt run these services in systemd transient units and accepts the name of any example cluster in [examples](../examples).
|
||||
|
||||
```sh
|
||||
$ export CONTAINER_RUNTIME=rkt
|
||||
$ sudo -E ./scripts/devnet create etcd3
|
||||
```
|
||||
|
||||
Inspect the journal logs.
|
||||
|
||||
```
|
||||
$ sudo -E ./scripts/devnet status
|
||||
$ journalctl -f -u dev-matchbox
|
||||
$ journalctl -f -u dev-dnsmasq
|
||||
```
|
||||
|
||||
Take a look at the [etcd3 groups](../examples/groups/etcd3) to get an idea of how machines are mapped to Profiles. Explore some endpoints exposed by the service, say for QEMU/KVM node1.
|
||||
|
||||
* iPXE [http://172.18.0.2:8080/ipxe?mac=52:54:00:a1:9c:ae](http://172.18.0.2:8080/ipxe?mac=52:54:00:a1:9c:ae)
|
||||
* Ignition [http://172.18.0.2:8080/ignition?mac=52:54:00:a1:9c:ae](http://172.18.0.2:8080/ignition?mac=52:54:00:a1:9c:ae)
|
||||
* Metadata [http://172.18.0.2:8080/metadata?mac=52:54:00:a1:9c:ae](http://172.18.0.2:8080/metadata?mac=52:54:00:a1:9c:ae)
|
||||
|
||||
### Manual
|
||||
|
||||
If you prefer to start the containers yourself, instead of using `devnet`,
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=metal0:IP=172.18.0.2 \
|
||||
--mount volume=data,target=/var/lib/matchbox \
|
||||
--volume data,kind=host,source=$PWD/examples \
|
||||
--mount volume=groups,target=/var/lib/matchbox/groups \
|
||||
--volume groups,kind=host,source=$PWD/examples/groups/etcd3 \
|
||||
quay.io/coreos/matchbox:v0.7.1 -- -address=0.0.0.0:8080 -log-level=debug
|
||||
```
|
||||
```sh
|
||||
sudo rkt run --net=metal0:IP=172.18.0.3 \
|
||||
--dns=host \
|
||||
--mount volume=config,target=/etc/dnsmasq.conf \
|
||||
--volume config,kind=host,source=$PWD/contrib/dnsmasq/metal0.conf \
|
||||
quay.io/coreos/dnsmasq:v0.4.1 \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW
|
||||
```
|
||||
|
||||
If you get an error about the IP assignment, stop old pods and run garbage collection.
|
||||
|
||||
sudo rkt gc --grace-period=0
|
||||
|
||||
Take a look at the [etcd groups](../examples/groups/etcd) to get an idea of how machines are mapped to Profiles. Explore some endpoints exposed by the service.
|
||||
|
||||
* [node1's ipxe](http://172.15.0.2:8080/ipxe?mac=52:54:00:a1:9c:ae)
|
||||
* [node1's Ignition](http://172.15.0.2:8080/ignition?mac=52:54:00:a1:9c:ae)
|
||||
* [node1's Metadata](http://172.15.0.2:8080/metadata?mac=52:54:00:a1:9c:ae)
|
||||
|
||||
## Network
|
||||
|
||||
Since the virtual network has no network boot services, use the `dnsmasq` ACI to create an iPXE network boot environment which runs DHCP, DNS, and TFTP.
|
||||
|
||||
Trust the [CoreOS App Signing Key](https://coreos.com/security/app-signing-key/).
|
||||
|
||||
sudo rkt trust --prefix coreos.com/dnsmasq
|
||||
# gpg key fingerprint is: 18AD 5014 C99E F7E3 BA5F 6CE9 50BD D3E0 FC8A 365E
|
||||
|
||||
Run the `coreos.com/dnsmasq` ACI with rkt.
|
||||
|
||||
sudo rkt run coreos.com/dnsmasq:v0.3.0 --net=metal0:IP=172.15.0.3 -- -d -q --dhcp-range=172.15.0.50,172.15.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:#ipxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe --log-queries --log-dhcp --dhcp-option=3,172.15.0.1 --address=/bootcfg.foo/172.15.0.2
|
||||
|
||||
In this case, dnsmasq runs a DHCP server allocating IPs to VMs between 172.15.0.50 and 172.15.0.99, resolves `bootcfg.foo` to 172.15.0.2 (the IP where `bootcfg` runs), and points iPXE clients to `http://bootcfg.foo:8080/boot.ipxe`.
|
||||
```sh
|
||||
$ sudo rkt gc --grace-period=0
|
||||
```
|
||||
|
||||
## Client VMs
|
||||
|
||||
Create VM nodes which have known hardware attributes. The nodes will be attached to the `metal0` bridge where your pods run.
|
||||
Create QEMU/KVM VMs which have known hardware attributes. The nodes will be attached to the `metal0` bridge, where your pods run.
|
||||
|
||||
sudo ./scripts/libvirt create-rkt
|
||||
sudo virt-manager
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-rkt
|
||||
```
|
||||
|
||||
You can use `virt-manager` to watch the console and reboot VM machines with
|
||||
You can connect to the serial console of any node (ctrl+] to exit). If you provisioned nodes with an SSH key, you can SSH after bring-up.
|
||||
|
||||
sudo ./scripts/libvirt poweroff
|
||||
sudo ./scripts/libvirt start
|
||||
```sh
|
||||
$ sudo virsh console node1
|
||||
$ ssh core@node1.example.com
|
||||
```
|
||||
|
||||
You can also use `virt-manager` to watch the console.
|
||||
|
||||
```sh
|
||||
$ sudo virt-manager
|
||||
```
|
||||
|
||||
Use the wrapper script to act on all nodes.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt [start|reboot|shutdown|poweroff|destroy]
|
||||
```
|
||||
|
||||
## Verify
|
||||
|
||||
The VMs should network boot and provision themselves into a three node etcd cluster, with other nodes behaving as etcd proxies.
|
||||
The VMs should network boot and provision themselves into a three node etcd3 cluster, with other nodes behaving as etcd3 gateways.
|
||||
|
||||
The example profile added autologin so you can verify that etcd works between nodes.
|
||||
The example profile added autologin so you can verify that etcd3 works between nodes.
|
||||
|
||||
systemctl status etcd2
|
||||
etcdctl set /message hello
|
||||
etcdctl get /message
|
||||
fleetctl list-machines
|
||||
```sh
|
||||
$ systemctl status etcd-member
|
||||
$ etcdctl set /message hello
|
||||
$ etcdctl get /message
|
||||
```
|
||||
|
||||
Press ^] three times to stop a rkt pod. Clean up the VM machines.
|
||||
## Clean up
|
||||
|
||||
sudo ./scripts/libvirt poweroff
|
||||
sudo ./scripts/libvirt destroy
|
||||
Clean up the systemd units running `matchbox` and `dnsmasq`.
|
||||
|
||||
## Going Further
|
||||
```sh
|
||||
$ sudo -E ./scripts/devnet destroy
|
||||
```
|
||||
|
||||
Learn more about [bootcfg](bootcfg.md) or explore the other [example](../examples) clusters. Try the [k8s example](kubernetes.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl` ([docs](../examples/README.md#kubernetes)).
|
||||
Clean up VM machines.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt destroy
|
||||
```
|
||||
|
||||
Press ^] three times to stop any rkt pod.
|
||||
|
||||
## Going further
|
||||
|
||||
Learn more about [matchbox](matchbox.md) or explore the other [example](../examples) clusters. Try the [k8s example](bootkube.md) to produce a TLS-authenticated Kubernetes cluster you can access locally with `kubectl`.
|
||||
|
||||
200
Documentation/getting-started.md
Normal file
200
Documentation/getting-started.md
Normal file
@@ -0,0 +1,200 @@
|
||||
# Getting started
|
||||
|
||||
In this tutorial, we'll show how to use terraform with `matchbox` to provision Container Linux machines.
|
||||
|
||||
You'll install the `matchbox` service, setup a PXE network boot environment, and then use terraform configs to describe your infrastructure and the terraform CLI to create those resources on `matchbox`.
|
||||
|
||||
## matchbox
|
||||
|
||||
Install `matchbox` on a dedicated server or Kubernetes cluster. Generate TLS credentials and enable the gRPC API as directed. Save the `ca.crt`, `client.crt`, and `client.key` on your local machine (e.g. `~/.matchbox`).
|
||||
|
||||
* Installing on [Container Linux / other distros](deployment.md)
|
||||
* Installing on [Kubernetes](deployment.md#kubernetes)
|
||||
* Running with [rkt](deployment.md#rkt) / [docker](deployment.md#docker)
|
||||
|
||||
Verify the matchbox read-only HTTP endpoints are accessible.
|
||||
|
||||
```sh
|
||||
$ curl http://matchbox.example.com:8080
|
||||
matchbox
|
||||
```
|
||||
|
||||
Verify your TLS client certificate and key can be used to access the gRPC API.
|
||||
|
||||
```sh
|
||||
$ openssl s_client -connect matchbox.example.com:8081 \
|
||||
-CAfile ~/.matchbox/ca.crt \
|
||||
-cert ~/.matchbox/client.crt \
|
||||
-key ~/.matchbox/client.key
|
||||
```
|
||||
|
||||
## Terraform
|
||||
|
||||
Install [Terraform][terraform-dl] v0.9+ on your system.
|
||||
|
||||
```sh
|
||||
$ terraform version
|
||||
Terraform v0.9.4
|
||||
```
|
||||
|
||||
Add the `terraform-provider-matchbox` plugin binary on your system.
|
||||
|
||||
```sh
|
||||
$ wget https://github.com/coreos/terraform-provider-matchbox/releases/download/v0.1.0/terraform-provider-matchbox-v0.1.0-linux-amd64.tar.gz
|
||||
$ tar xzf terraform-provider-matchbox-v0.1.0-linux-amd64.tar.gz
|
||||
```
|
||||
|
||||
Add the plugin to your `~/.terraformrc`.
|
||||
|
||||
```hcl
|
||||
providers {
|
||||
matchbox = "/path/to/terraform-provider-matchbox"
|
||||
}
|
||||
```
|
||||
|
||||
## First cluster
|
||||
|
||||
Clone the matchbox source and take a look at the Terraform examples.
|
||||
|
||||
```sh
|
||||
$ git clone https://github.com/coreos/matchbox.git
|
||||
$ cd matchbox/examples/terraform
|
||||
```
|
||||
|
||||
Let's start with the `simple-install` example. With `simple-install`, any machines which PXE boot from matchbox will install Container Linux to `dev/sda`, reboot, and have your SSH key set. Its not much of a cluster, but we'll get to that later.
|
||||
|
||||
```sh
|
||||
$ cd simple-install
|
||||
```
|
||||
|
||||
Configure the variables in `variables.tf` by creating a `terraform.tfvars` file.
|
||||
|
||||
```hcl
|
||||
matchbox_http_endpoint = "http://matchbox.example.com:8080"
|
||||
matchbox_rpc_endpoint = "matchbox.example.com:8081"
|
||||
ssh_authorized_key = "YOUR_SSH_KEY"
|
||||
```
|
||||
|
||||
Terraform can now interact with the matchbox service and create resources.
|
||||
|
||||
```sh
|
||||
$ terraform plan
|
||||
Plan: 4 to add, 0 to change, 0 to destroy.
|
||||
```
|
||||
|
||||
Let's review the terraform config and learn a bit about matchbox.
|
||||
|
||||
#### Provider
|
||||
|
||||
Matchbox is configured as a provider platform for bare-metal resources.
|
||||
|
||||
```hcl
|
||||
// Configure the matchbox provider
|
||||
provider "matchbox" {
|
||||
endpoint = "${var.matchbox_rpc_endpoint}"
|
||||
client_cert = "${file("~/.matchbox/client.crt")}"
|
||||
client_key = "${file("~/.matchbox/client.key")}"
|
||||
ca = "${file("~/.matchbox/ca.crt")}"
|
||||
}
|
||||
```
|
||||
|
||||
#### Profiles
|
||||
|
||||
Machine profiles specify the kernel, initrd, kernel args, Container Linux Config, Cloud-config, or other configs used to network boot and provision a bare-metal machine. This profile will PXE boot machines using the current stable Container Linux kernel and initrd (see [assets](api.md#assets) to learn about caching for speed) and supply a Container Linux Config specifying that a disk install and reboot should be performed. Learn more about [Container Linux configs](https://coreos.com/os/docs/latest/configuration.html).
|
||||
|
||||
```hcl
|
||||
// Create a CoreOS-install profile
|
||||
resource "matchbox_profile" "coreos-install" {
|
||||
name = "coreos-install"
|
||||
kernel = "https://stable.release.core-os.net/amd64-usr/current/coreos_production_pxe.vmlinuz"
|
||||
initrd = [
|
||||
"https://stable.release.core-os.net/amd64-usr/current/coreos_production_pxe_image.cpio.gz"
|
||||
]
|
||||
args = [
|
||||
"coreos.config.url=${var.matchbox_http_endpoint}/ignition?uuid=$${uuid}&mac=$${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
]
|
||||
container_linux_config = "${file("./cl/coreos-install.yaml.tmpl")}"
|
||||
}
|
||||
```
|
||||
|
||||
#### Groups
|
||||
|
||||
Matcher groups match machines based on labels like MAC, UUID, etc. to different profiles and templates in machine-specific values. This group does not have a `selector` block, so any machines which network boot from matchbox will match this group and be provisioned using the `coreos-install` profile. Machines are matched to the most specific matching group.
|
||||
|
||||
```hcl
|
||||
resource "matchbox_group" "default" {
|
||||
name = "default"
|
||||
profile = "${matchbox_profile.coreos-install.name}"
|
||||
# no selector means all machines can be matched
|
||||
metadata {
|
||||
ignition_endpoint = "${var.matchbox_http_endpoint}/ignition"
|
||||
ssh_authorized_key = "${var.ssh_authorized_key}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Apply
|
||||
|
||||
Apply the terraform configuration.
|
||||
|
||||
```sh
|
||||
$ terraform apply
|
||||
Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
|
||||
```
|
||||
|
||||
Matchbox serves configs to machines and respects query parameters, if you're interested:
|
||||
|
||||
* iPXE default - [/ipxe](http://matchbox.example.com:8080/ipxe)
|
||||
* Ignition default - [/ignition](http://matchbox.example.com:8080/ignition)
|
||||
* Ignition post-install - [/ignition?os=installed](http://matchbox.example.com:8080/ignition?os=installed)
|
||||
* GRUB default - [/grub](http://matchbox.example.com:8080/grub)
|
||||
|
||||
## Network
|
||||
|
||||
Matchbox can integrate with many on-premise network setups. It does not seek to be the DHCP server, TFTP server, or DNS server for the network. Instead, matchbox serves iPXE scripts and GRUB configs as the entrypoint for provisioning network booted machines. PXE clients are supported by chainloading iPXE firmware.
|
||||
|
||||
In the simplest case, an iPXE-enabled network can chain to matchbox,
|
||||
|
||||
```
|
||||
# /var/www/html/ipxe/default.ipxe
|
||||
chain http://matchbox.foo:8080/boot.ipxe
|
||||
```
|
||||
|
||||
Read [network-setup.md](network-setup.md) for the complete range of options. Network admins have a great amount of flexibility:
|
||||
|
||||
* May keep using existing DHCP, TFTP, and DNS services
|
||||
* May configure subnets, architectures, or specific machines to delegate to matchbox
|
||||
* May place matchbox behind a menu entry (timeout and default to matchbox)
|
||||
|
||||
If you've never setup a PXE-enabled network before or you're trying to setup a home lab, checkout the [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image [copy-paste examples](https://github.com/coreos/matchbox/blob/master/Documentation/network-setup.md#coreosdnsmasq) and see the section about [proxy-DHCP](https://github.com/coreos/matchbox/blob/master/Documentation/network-setup.md#proxy-dhcp).
|
||||
|
||||
## Boot
|
||||
|
||||
Its time to network boot your machines. Use the BMC's remote management capablities (may be vendor-specific) to set the boot device (on the next boot only) to PXE and power on each machine.
|
||||
|
||||
```sh
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS power off
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS chassis bootdev pxe
|
||||
$ ipmitool -H node1.example.com -U USER -P PASS power on
|
||||
```
|
||||
|
||||
Each machine should chainload iPXE, delegate to `matchbox`, receive its iPXE config (or other supported configs) and begin the provisioning process. The `simple-install` example assumes your machines are configured to boot from disk first and PXE only when requested, but you can write profiles for different cases.
|
||||
|
||||
Once the Container Linux install completes and the machine reboots you can SSH,
|
||||
|
||||
```ssh
|
||||
$ ssh core@node1.example.com
|
||||
```
|
||||
|
||||
To re-provision the machine for another purpose, run `terraform apply` and PXE boot it again.
|
||||
|
||||
## Going Further
|
||||
|
||||
Matchbox can be used to provision multi-node Container Linux clusters at one or many on-premise sites if deployed in an HA way. Machines can be matched individually by MAC address, UUID, region, or other labels you choose. Installs can be made much faster by caching images in the built-in HTTP [assets](api.md#assets) server.
|
||||
|
||||
[Container Linux configs](https://coreos.com/os/docs/latest/configuration.html) can be used to partition disks and filesystems, write systemd units, write networkd configs or regular files, and create users. Container Linux nodes can be provisioned into a system that meets your needs. Checkout the examples which create a 3 node [etcd](../examples/terraform/etcd3-install) cluster or a 3 node [Kubernetes](../examples/terraform/bootkube-install) cluster.
|
||||
|
||||
[terraform-dl]: https://www.terraform.io/downloads.html
|
||||
@@ -1,5 +1,4 @@
|
||||
|
||||
# GRUB2 Netboot
|
||||
# GRUB2 netboot
|
||||
|
||||
Use GRUB to network boot UEFI hardware.
|
||||
|
||||
@@ -9,34 +8,59 @@ For local development, install the dependencies for libvirt with UEFI.
|
||||
|
||||
* [UEFI with QEMU](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU)
|
||||
|
||||
Ensure that you've gone through the [bootcfg with rkt](getting-started-rkt.md) and [bootcfg](bootcfg.md) guides and understand the basics.
|
||||
Ensure that you've gone through the [matchbox with rkt](getting-started-rkt.md) and [matchbox](matchbox.md) guides and understand the basics.
|
||||
|
||||
## Containers
|
||||
|
||||
Run `bootcfg` with rkt, but mount the [grub](../examples/groups/grub) group example.
|
||||
Run `matchbox` with rkt, but mount the [grub](../examples/groups/grub) group example.
|
||||
|
||||
## Network
|
||||
|
||||
On Fedora, add the `metal0` interface to the trusted zone in your firewall configuration.
|
||||
|
||||
sudo firewall-cmd --add-interface=metal0 --zone=trusted
|
||||
```sh
|
||||
$ sudo firewall-cmd --add-interface=metal0 --zone=trusted
|
||||
```
|
||||
|
||||
Run the `coreos.com/dnsmasq` ACI with rkt.
|
||||
Run the `quay.io/coreos/dnsmasq` container image with rkt or docker.
|
||||
|
||||
sudo rkt run coreos.com/dnsmasq:v0.3.0 --net=metal0:IP=172.15.0.3 -- -d -q --dhcp-range=172.15.0.50,172.15.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-match=set:efi-bc,option:client-arch,7 --dhcp-boot=tag:efi-bc,grub.efi --dhcp-userclass=set:grub,GRUB2 --dhcp-boot=tag:grub,"(http;bootcfg.foo:8080)/grub","172.15.0.2" --log-queries --log-dhcp --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:pxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe --address=/bootcfg.foo/172.15.0.2
|
||||
```sh
|
||||
sudo rkt run --net=metal0:IP=172.18.0.3 quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=172.18.0.50,172.18.0.99 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:efi-bc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efi-bc,grub.efi \
|
||||
--dhcp-userclass=set:grub,GRUB2 \
|
||||
--dhcp-boot=tag:grub,"(http;matchbox.example.com:8080)/grub","172.18.0.2" \
|
||||
--log-queries \
|
||||
--log-dhcp \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:pxe,undionly.kpxe \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.foo/172.18.0.2
|
||||
```
|
||||
|
||||
## Client VM
|
||||
|
||||
Create UEFI VM nodes which have known hardware attributes.
|
||||
|
||||
sudo ./scripts/libvirt create-uefi
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-uefi
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
If you use Docker, run `bootcfg` according to [bootcfg with Docker](getting-started-docker.md), but mount the [grub](../examples/groups/grub) group example. Then start the `coreos/dnsmasq` Docker image, which bundles a `grub.efi`.
|
||||
If you use Docker, run `matchbox` according to [matchbox with Docker](getting-started-docker.md), but mount the [grub](../examples/groups/grub) group example. Then start the `coreos/dnsmasq` Docker image, which bundles a `grub.efi`.
|
||||
|
||||
sudo docker run --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-range=172.17.0.43,172.17.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-match=set:efi-bc,option:client-arch,7 --dhcp-boot=tag:efi-bc,grub.efi --dhcp-userclass=set:grub,GRUB2 --dhcp-boot=tag:grub,"(http;bootcfg.foo:8080)/grub","172.17.0.2" --log-queries --log-dhcp --dhcp-option=3,172.17.0.1 --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:pxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe --address=/bootcfg.foo/172.17.0.2
|
||||
```sh
|
||||
$ sudo docker run --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-range=172.17.0.43,172.17.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-match=set:efi-bc,option:client-arch,7 --dhcp-boot=tag:efi-bc,grub.efi --dhcp-userclass=set:grub,GRUB2 --dhcp-boot=tag:grub,"(http;matchbox.foo:8080)/grub","172.17.0.2" --log-queries --log-dhcp --dhcp-option=3,172.17.0.1 --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:pxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://matchbox.foo:8080/boot.ipxe --address=/matchbox.foo/172.17.0.2
|
||||
```
|
||||
|
||||
Create a VM to verify the machine network boots.
|
||||
|
||||
sudo virt-install --name uefi-test --pxe --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
|
||||
```sh
|
||||
$ sudo virt-install --name uefi-test --boot=uefi,network --disk pool=default,size=4 --network=bridge=docker0,model=e1000 --memory=1024 --vcpus=1 --os-type=linux --noautoconsole
|
||||
```
|
||||
|
||||
@@ -1,161 +0,0 @@
|
||||
|
||||
# Ignition
|
||||
|
||||
Ignition is a system for declaratively provisioning disks during the initramfs, before systemd starts. It runs only on the first boot and handles partitioning disks, formatting partitions, writing files (regular files, systemd units, networkd units, etc.), and configuring users. See the Ignition [docs](https://coreos.com/ignition/docs/latest/) for details.
|
||||
|
||||
## Fuze Configs
|
||||
|
||||
Ignition 2.0.0+ configs are versioned, *machine-friendly* JSON documents (which contain encoded file contents). Operators should write and maintain configs in a *human-friendly* format, such as CoreOS [fuze](https://github.com/coreos/fuze) configs. As of `bootcfg` v0.4.0, Fuze configs are the primary way to use CoreOS Ignition.
|
||||
|
||||
The [Fuze schema](https://github.com/coreos/fuze/blob/master/doc/configuration.md) formalizes and improves upon the YAML to Ignition JSON transform. Fuze provides better support for Ignition 2.0.0+, handles file content encoding, patches Ignition bugs, performs better validations, and lets services (like `bootcfg`) negotiate the Ignition version required by a CoreOS client.
|
||||
|
||||
## Adding Fuze Configs
|
||||
|
||||
Fuze template files can be added in the `/var/lib/bootcfg/ignition` directory or in an `ignition` subdirectory of a custom `-data-path`. Template files may contain [Go template](https://golang.org/pkg/text/template/) elements which will be evaluated with group metadata, selectors, and query params.
|
||||
|
||||
/var/lib/bootcfg
|
||||
├── cloud
|
||||
├── ignition
|
||||
│ └── k8s-master.yaml
|
||||
│ └── etcd.yaml
|
||||
│ └── k8s-worker.yaml
|
||||
│ └── raw.ign
|
||||
└── profiles
|
||||
|
||||
### Reference
|
||||
|
||||
Reference an Fuze config in a [Profile](bootcfg.md#profiles) with `ignition_id`. When PXE booting, use the kernel option `coreos.first_boot=1` and `coreos.config.url` to point to the `bootcfg` [Ignition endpoint](api.md#ignition-config).
|
||||
|
||||
### Migration from v0.3.0
|
||||
|
||||
In v0.4.0, `bootcfg` switched to using the CoreOS [fuze](https://github.com/coreos/fuze) library, which formalizes and improves upon the YAML to Ignition JSON transform. Fuze provides better support for Ignition 2.0.0+, handles file content encoding, patches Ignition bugs, and performs better validations.
|
||||
|
||||
Upgrade your Ignition YAML templates to match the [Fuze config schema](https://github.com/coreos/fuze/blob/master/doc/configuration.md). Typically, you'll need to do the following:
|
||||
|
||||
* Remove `ignition_version: 1`, Fuze configs are version-less
|
||||
* Update `filesystems` section and set the `name`
|
||||
* Update `files` section to use `inline` as shown below
|
||||
* Replace `uid` and `gid` with `user` and `group` objects as shown above
|
||||
|
||||
Maintain readable inline file contents in Fuze:
|
||||
|
||||
```
|
||||
...
|
||||
files:
|
||||
- path: /etc/foo.conf
|
||||
filesystem: rootfs
|
||||
contents:
|
||||
inline: |
|
||||
foo bar
|
||||
```
|
||||
|
||||
Support for the older Ignition v1 format has been dropped, so CoreOS machines must be **1010.1.0 or newer**. Read the upstream Ignition v1 to 2.0.0 [migration guide](https://coreos.com/ignition/docs/latest/migrating-configs.html) to understand the reasons behind schema changes.
|
||||
|
||||
## Examples
|
||||
|
||||
Here is an example Fuze template. This template will be rendered into a Fuze config (YAML), using group metadata, selectors, and query params as template variables. Finally, the Fuze config is served to client machines as Ignition JSON.
|
||||
|
||||
ignition/format-disk.yaml.tmpl:
|
||||
|
||||
---
|
||||
storage:
|
||||
disks:
|
||||
- device: /dev/sda
|
||||
wipe_table: true
|
||||
partitions:
|
||||
- label: ROOT
|
||||
filesystems:
|
||||
- name: rootfs
|
||||
mount:
|
||||
device: "/dev/sda1"
|
||||
format: "ext4"
|
||||
create:
|
||||
force: true
|
||||
options:
|
||||
- "-LROOT"
|
||||
files:
|
||||
- filesystem: rootfs
|
||||
path: /home/core/foo
|
||||
mode: 0644
|
||||
user:
|
||||
id: 500
|
||||
group:
|
||||
id: 500
|
||||
contents:
|
||||
inline: |
|
||||
{{.example_contents}}
|
||||
{{ if index . "ssh_authorized_keys" }}
|
||||
passwd:
|
||||
users:
|
||||
- name: core
|
||||
ssh_authorized_keys:
|
||||
{{ range $element := .ssh_authorized_keys }}
|
||||
- {{$element}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
The Ignition config response (formatted) to a query `/ignition?label=value` for a CoreOS instance supporting Ignition 2.0.0 would be:
|
||||
|
||||
{
|
||||
"ignition": {
|
||||
"version": "2.0.0",
|
||||
"config": {}
|
||||
},
|
||||
"storage": {
|
||||
"disks": [
|
||||
{
|
||||
"device": "/dev/sda",
|
||||
"wipeTable": true,
|
||||
"partitions": [
|
||||
{
|
||||
"label": "ROOT",
|
||||
"number": 0,
|
||||
"size": 0,
|
||||
"start": 0
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"filesystems": [
|
||||
{
|
||||
"name": "rootfs",
|
||||
"mount": {
|
||||
"device": "/dev/sda1",
|
||||
"format": "ext4",
|
||||
"create": {
|
||||
"force": true,
|
||||
"options": [
|
||||
"-LROOT"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"files": [
|
||||
{
|
||||
"filesystem": "rootfs",
|
||||
"path": "/home/core/foo",
|
||||
"contents": {
|
||||
"source": "data:,Example%20file%20contents%0A",
|
||||
"verification": {}
|
||||
},
|
||||
"mode": 420,
|
||||
"user": {
|
||||
"id": 500
|
||||
},
|
||||
"group": {
|
||||
"id": 500
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"systemd": {},
|
||||
"networkd": {},
|
||||
"passwd": {}
|
||||
}
|
||||
|
||||
See [examples/ignition](../examples/ignition) for numerous Fuze template examples.
|
||||
|
||||
### Raw Ignition
|
||||
|
||||
If you prefer to design your own templating solution, raw Ignition files (suffixed with `.ign` or `.ignition`) are served directly.
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 118 KiB After Width: | Height: | Size: 130 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 172 KiB After Width: | Height: | Size: 116 KiB |
BIN
Documentation/img/tectonic-installer.png
Normal file
BIN
Documentation/img/tectonic-installer.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 107 KiB |
@@ -1,88 +0,0 @@
|
||||
|
||||
# Kubernetes
|
||||
|
||||
The Kubernetes example provisions a 3 node Kubernetes v1.3.0 cluster with one controller, two workers, and TLS authentication. An etcd cluster backs Kubernetes and coordinates CoreOS auto-updates (enabled for disk installs).
|
||||
|
||||
## Requirements
|
||||
|
||||
Ensure that you've gone through the [bootcfg with rkt](getting-started-rkt.md) or [bootcfg with docker](getting-started-docker.md) guide and understand the basics. In particular, you should be able to:
|
||||
|
||||
* Use rkt or Docker to start `bootcfg`
|
||||
* Create a network boot environment with `coreos/dnsmasq`
|
||||
* Create the example libvirt client VMs
|
||||
|
||||
## Examples
|
||||
|
||||
The [examples](../examples) statically assign IP addresses to libvirt client VMs created by `scripts/libvirt`. VMs are setup on the `metal0` CNI bridge for rkt or the `docker0` bridge for Docker. The examples can be used for physical machines if you update the MAC/IP addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
* [k8s](../examples/groups/k8s) - iPXE boot a Kubernetes cluster (use rkt)
|
||||
* [k8s-docker](../examples/groups/k8s-docker) - iPXE boot a Kubernetes cluster on `docker0` (use docker)
|
||||
* [k8s-install](../examples/groups/k8s-install) - Install a Kubernetes cluster to disk (use rkt)
|
||||
* [Lab examples](https://github.com/dghubble/metal) - Lab hardware examples
|
||||
|
||||
### Assets
|
||||
|
||||
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
|
||||
|
||||
./scripts/get-coreos alpha 1053.2.0 ./examples/assets
|
||||
|
||||
Add your SSH public key to each machine group definition [as shown](../examples/README.md#ssh-keys).
|
||||
|
||||
Generate a root CA and Kubernetes TLS assets for components (`admin`, `apiserver`, `worker`).
|
||||
|
||||
rm -rf examples/assets/tls
|
||||
# for Kubernetes on CNI metal0, i.e. rkt
|
||||
./scripts/tls/k8s-certgen -d examples/assets/tls -s 172.15.0.21 -m IP.1=10.3.0.1,IP.2=172.15.0.21 -w IP.1=172.15.0.22,IP.2=172.15.0.23
|
||||
# for Kubernetes on docker0
|
||||
./scripts/tls/k8s-certgen -d examples/assets/tls -s 172.17.0.21 -m IP.1=10.3.0.1,IP.2=172.17.0.21 -w IP.1=172.17.0.22,IP.2=172.17.0.23
|
||||
|
||||
**Note**: TLS assets are served to any machines which request them, which requires a trusted network. Alternately, provisioning may be tweaked to require TLS assets be securely copied to each host. Read about our longer term security plans at [Distributed Trusted Computing](https://coreos.com/blog/coreos-trusted-computing.html).
|
||||
|
||||
## Containers
|
||||
|
||||
Use rkt or docker to start `bootcfg` and mount the desired example resources. Create a network boot environment and power-on your machines. Revisit [bootcfg with rkt](getting-started-rkt.md) or [bootcfg with Docker](getting-started-docker.md) for help.
|
||||
|
||||
Client machines should boot and provision themselves. Local client VMs should network boot CoreOS in about a 1 minute and the Kubernetes API should be available after 2-3 minutes. If you chose `k8s-install`, notice that machines install CoreOS and then reboot (in libvirt, you must hit "power" again). Time to network boot and provision Kubernetes clusters on physical hardware depends on a number of factors (POST duration, boot device iteration, network speed, etc.).
|
||||
|
||||
## Verify
|
||||
|
||||
[Install kubectl](https://coreos.com/kubernetes/docs/latest/configure-kubectl.html) on your laptop. Use the generated kubeconfig to access the Kubernetes cluster created on rkt `metal0` or `docker0`.
|
||||
|
||||
$ cd /path/to/coreos-baremetal
|
||||
$ kubectl --kubeconfig=examples/assets/tls/kubeconfig get nodes
|
||||
NAME STATUS AGE
|
||||
172.15.0.21 Ready 6m
|
||||
172.15.0.22 Ready 5m
|
||||
172.15.0.23 Ready 6m
|
||||
|
||||
Get all pods.
|
||||
|
||||
$ kubectl --kubeconfig=examples/assets/tls/kubeconfig get pods --all-namespaces
|
||||
NAMESPACE NAME READY STATUS RESTARTS AGE
|
||||
kube-system heapster-v1.1.0-3647315203-tes6g 2/2 Running 0 14m
|
||||
kube-system kube-apiserver-172.15.0.21 1/1 Running 0 14m
|
||||
kube-system kube-controller-manager-172.15.0.21 1/1 Running 0 14m
|
||||
kube-system kube-dns-v15-nfbz4 3/3 Running 0 14m
|
||||
kube-system kube-proxy-172.15.0.21 1/1 Running 0 14m
|
||||
kube-system kube-proxy-172.15.0.22 1/1 Running 0 14m
|
||||
kube-system kube-proxy-172.15.0.23 1/1 Running 0 14m
|
||||
kube-system kube-scheduler-172.15.0.21 1/1 Running 0 13m
|
||||
kube-system kubernetes-dashboard-v1.1.0-m1gyy 1/1 Running 0 14m
|
||||
|
||||
## Kubernetes Dashboard
|
||||
|
||||
Access the Kubernetes Dashboard with `kubeconfig` credentials by port forwarding to the dashboard pod.
|
||||
|
||||
$ kubectl --kubeconfig=examples/assets/tls/kubeconfig port-forward kubernetes-dashboard-v1.1.0-SOME-ID 9090 --namespace=kube-system
|
||||
Forwarding from 127.0.0.1:9090 -> 9090
|
||||
|
||||
Then visit [http://127.0.0.1:9090](http://127.0.0.1:9090/).
|
||||
|
||||
<img src='img/kubernetes-dashboard.png' class="img-center" alt="Kubernetes Dashboard"/>
|
||||
|
||||
## Tectonic
|
||||
|
||||
Sign up for [Tectonic Starter](https://tectonic.com/starter/) for free and deploy the [Tectonic Console](https://tectonic.com/enterprise/docs/latest/deployer/tectonic_console.html) with a few `kubectl` commands!
|
||||
|
||||
<img src='img/tectonic-console.png' class="img-center" alt="Tectonic Console"/>
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
# Lifecycle of a physical machine
|
||||
|
||||
# Lifecycle of a Physical Machine
|
||||
## About boot environment
|
||||
|
||||
Physical machines [network boot](network-booting.md) in an network boot environment with DHCP/TFTP/DNS services or with [coreos/dnsmasq](../contrib/dnsmasq).
|
||||
|
||||
`bootcfg` serves iPXE, GRUB, or Pixiecore boot configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS. The "first boot" Ignition config if fetched and CoreOS is installed.
|
||||
`matchbox` serves iPXE or GRUB configs via HTTP to machines based on Group selectors (e.g. UUID, MAC, region, etc.) and machine Profiles. Kernel and initrd images are fetched and booted with Ignition to install CoreOS Container Linux. The "first boot" Ignition config if fetched and Container Linux is installed.
|
||||
|
||||
CoreOS boots ("first boot" from disk) and runs Ignition to provision its disk with systemd units, files, keys, and more to become a cluster node. Systemd units may fetch metadata from a remote source if needed.
|
||||
Container Linux boots ("first boot" from disk) and runs Ignition to provision its disk with systemd units, files, keys, and more to become a cluster node. Systemd units may fetch metadata from a remote source if needed.
|
||||
|
||||
Coordinated auto-updates are enabled. Systems like [fleet](https://coreos.com/docs/#fleet) or [Kubernetes](http://kubernetes.io/docs/) coordinate container services. IPMI, vendor utilities, or first-boot are used to re-provision machines into new roles.
|
||||
|
||||
## Machine lifecycle
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
186
Documentation/matchbox.md
Normal file
186
Documentation/matchbox.md
Normal file
@@ -0,0 +1,186 @@
|
||||
# matchbox
|
||||
|
||||
`matchbox` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create CoreOS Container Linux clusters. `matchbox` maintains **Group** definitions which match machines to *profiles* based on labels (e.g. MAC address, UUID, stage, region). A **Profile** is a named set of config templates (e.g. iPXE, GRUB, Ignition config, Cloud-Config, generic configs). The aim is to use Container Linux's early-boot capabilities to provision Container Linux machines.
|
||||
|
||||
Network boot endpoints provide PXE, iPXE, GRUB support. `matchbox` can be deployed as a binary, as an [appc](https://github.com/appc/spec) container with rkt, or as a Docker container.
|
||||
|
||||

|
||||
|
||||
## Getting started
|
||||
|
||||
Get started running `matchbox` on your Linux machine, with rkt or Docker.
|
||||
|
||||
* [matchbox with rkt](getting-started-rkt.md)
|
||||
* [matchbox with Docker](getting-started-docker.md)
|
||||
|
||||
## Flags
|
||||
|
||||
See [configuration](config.md) flags and variables.
|
||||
|
||||
## API
|
||||
|
||||
* [HTTP API](api.md)
|
||||
* [gRPC API](https://godoc.org/github.com/coreos/matchbox/matchbox/client)
|
||||
|
||||
## Data
|
||||
|
||||
A `Store` stores machine Groups, Profiles, and associated Ignition configs, cloud-configs, and generic configs. By default, `matchbox` uses a `FileStore` to search a `-data-path` for these resources.
|
||||
|
||||
Prepare `/var/lib/matchbox` with `groups`, `profile`, `ignition`, `cloud`, and `generic` subdirectories. You may wish to keep these files under version control.
|
||||
|
||||
```
|
||||
/var/lib/matchbox
|
||||
├── cloud
|
||||
│ ├── cloud.yaml.tmpl
|
||||
│ └── worker.sh.tmpl
|
||||
├── ignition
|
||||
│ └── raw.ign
|
||||
│ └── etcd.yaml.tmpl
|
||||
│ └── simple.yaml.tmpl
|
||||
├── generic
|
||||
│ └── config.yaml
|
||||
│ └── setup.cfg
|
||||
│ └── datacenter-1.tmpl
|
||||
├── groups
|
||||
│ └── default.json
|
||||
│ └── node1.json
|
||||
│ └── us-central1-a.json
|
||||
└── profiles
|
||||
└── etcd.json
|
||||
└── worker.json
|
||||
```
|
||||
|
||||
The [examples](../examples) directory is a valid data directory with some pre-defined configs. Note that `examples/groups` contains many possible groups in nested directories for demo purposes (tutorials pick one to mount). Your machine groups should be kept directly inside the `groups` directory as shown above.
|
||||
|
||||
### Profiles
|
||||
|
||||
Profiles reference an Ignition config, Cloud-Config, and/or generic config by name and define network boot settings.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "etcd",
|
||||
"name": "Container Linux with etcd2",
|
||||
"cloud_id": "",
|
||||
"ignition_id": "etcd.yaml",
|
||||
"generic_id": "some-service.cfg",
|
||||
"boot": {
|
||||
"kernel": "/assets/coreos/1576.5.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["/assets/coreos/1576.5.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
The `"boot"` settings will be used to render configs to network boot programs such as iPXE or GRUB. You may reference remote kernel and initrd assets or [local assets](#assets).
|
||||
|
||||
To use Ignition, set the `coreos.config.url` kernel option to reference the `matchbox` [Ignition endpoint](api.md#ignition-config), which will render the `ignition_id` file. Be sure to add the `coreos.first_boot` option as well.
|
||||
|
||||
To use cloud-config, set the `cloud-config-url` kernel option to reference the `matchbox` [Cloud-Config endpoint](api.md#cloud-config), which will render the `cloud_id` file.
|
||||
|
||||
### Groups
|
||||
|
||||
Groups define selectors which match zero or more machines. Machine(s) matching a group will boot and provision according to the group's `Profile`.
|
||||
|
||||
Create a group definition with a `Profile` to be applied, selectors for matching machines, and any `metadata` needed to render templated configs. For example `/var/lib/matchbox/groups/node1.json` matches a single machine with MAC address `52:54:00:89:d8:10`.
|
||||
|
||||
```json
|
||||
# /var/lib/matchbox/groups/node1.json
|
||||
{
|
||||
"name": "node1",
|
||||
"profile": "etcd",
|
||||
"selector": {
|
||||
"mac": "52:54:00:89:d8:10"
|
||||
},
|
||||
"metadata": {
|
||||
"fleet_metadata": "role=etcd,name=node1",
|
||||
"etcd_name": "node1",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380,node2=http://node2.example.com:2380,node3=http://node3.example.com:2380"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Meanwhile, `/var/lib/matchbox/groups/proxy.json` acts as the default machine group since it has no selectors.
|
||||
|
||||
```
|
||||
{
|
||||
"name": "etcd-proxy",
|
||||
"profile": "etcd-proxy",
|
||||
"metadata": {
|
||||
"fleet_metadata": "role=etcd-proxy",
|
||||
"etcd_initial_cluster": "node1=http://node1.example.com:2380,node2=http://node2.example.com:2380,node3=http://node3.example.com:2380"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For example, a request to `/ignition?mac=52:54:00:89:d8:10` would render the Ignition template in the "etcd" `Profile`, with the machine group's metadata. A request to `/ignition` would match the default group (which has no selectors) and render the Ignition in the "etcd-proxy" Profile. Avoid defining multiple default groups as resolution will not be deterministic.
|
||||
|
||||
#### Reserved selectors
|
||||
|
||||
Group selectors can use any key/value pairs you find useful. However, several labels have a defined purpose and will be normalized or parsed specially.
|
||||
|
||||
* `uuid` - machine UUID
|
||||
* `mac` - network interface physical address (normalized MAC address)
|
||||
* `hostname` - hostname reported by a network boot program
|
||||
* `serial` - serial reported by a network boot program
|
||||
|
||||
### Config templates
|
||||
|
||||
Profiles can reference various templated configs. Ignition JSON configs can be generated from [Container Linux Config](https://github.com/coreos/container-linux-config-transpiler/blob/master/doc/configuration.md) template files. Cloud-Config templates files can be used to render a script or Cloud-Config. Generic template files can be used to render arbitrary untyped configs (experimental). Each template may contain [Go template](https://golang.org/pkg/text/template/) elements which will be rendered with machine group metadata, selectors, and query params.
|
||||
|
||||
For details and examples:
|
||||
|
||||
* [Container Linux Config](container-linux-config.md)
|
||||
* [Cloud-Config](cloud-config.md)
|
||||
|
||||
#### Variables
|
||||
|
||||
Within Container Linux Config templates, Cloud-Config templates, or generic templates, you can use group metadata, selectors, or request-scoped query params. For example, a request `/generic?mac=52-54-00-89-d8-10&foo=some-param&bar=b` would match the `node1.json` machine group shown above. If the group's profile ("etcd") referenced a generic template, the following variables could be used.
|
||||
|
||||
<!-- {% raw %} -->
|
||||
```
|
||||
# Untyped generic config file
|
||||
# Selector
|
||||
{{.mac}} # 52:54:00:89:d8:10 (normalized)
|
||||
# Metadata
|
||||
{{.etcd_name}} # node1
|
||||
{{.fleet_metadata}} # role=etcd,name=node1
|
||||
# Query
|
||||
{{.request.query.mac}} # 52:54:00:89:d8:10 (normalized)
|
||||
{{.request.query.foo}} # some-param
|
||||
{{.request.query.bar}} # b
|
||||
# Special Addition
|
||||
{{.request.raw_query}} # mac=52:54:00:89:d8:10&foo=some-param&bar=b
|
||||
```
|
||||
<!-- {% endraw %} -->
|
||||
|
||||
Note that `.request` is reserved for these purposes so group metadata with data nested under a top level "request" key will be overwritten.
|
||||
|
||||
## Assets
|
||||
|
||||
`matchbox` can serve `-assets-path` static assets at `/assets`. This is helpful for reducing bandwidth usage when serving the kernel and initrd to network booted machines. The default assets-path is `/var/lib/matchbox/assets` or you can pass `-assets-path=""` to disable asset serving.
|
||||
|
||||
```
|
||||
matchbox.foo/assets/
|
||||
└── coreos
|
||||
└── VERSION
|
||||
├── coreos_production_pxe.vmlinuz
|
||||
└── coreos_production_pxe_image.cpio.gz
|
||||
```
|
||||
|
||||
For example, a `Profile` might refer to a local asset `/assets/coreos/VERSION/coreos_production_pxe.vmlinuz` instead of `http://stable.release.core-os.net/amd64-usr/VERSION/coreos_production_pxe.vmlinuz`.
|
||||
|
||||
See the [get-coreos](../scripts/README.md#get-coreos) script to quickly download, verify, and place Container Linux assets.
|
||||
|
||||
## Network
|
||||
|
||||
`matchbox` does not implement or exec a DHCP/TFTP server. Read [network setup](network-setup.md) or use the [coreos/dnsmasq](../contrib/dnsmasq) image if you need a quick DHCP, proxyDHCP, TFTP, or DNS setup.
|
||||
|
||||
## Going further
|
||||
|
||||
* [gRPC API Usage](config.md#grpc-api)
|
||||
* [Metadata](api.md#metadata)
|
||||
* OpenPGP [Signing](api.md#openpgp-signatures)
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
# Network Boot Environments
|
||||
# Network boot environments
|
||||
|
||||
This guide reviews network boot protocols and the different ways client machines can be PXE booted.
|
||||
|
||||
@@ -7,24 +7,26 @@ This guide reviews network boot protocols and the different ways client machines
|
||||
|
||||
The Preboot eXecution Environment (PXE) defines requirements for consistent, hardware-independent network-based machine booting and configuration. Formally, PXE specifies pre-boot protocol services that client NIC firmware must provide (DHCP, TFTP, UDP/IP), specifies boot firmware requirements, and defines a client-server protocol for obtaining a network boot program (NBP) which automates OS installation and configuration.
|
||||
|
||||
<img src='img/pxelinux.png' class="img-center" alt="Basic PXE client server protocol flow"/>
|
||||

|
||||
|
||||
At power-on, if a client machine's BIOS or UEFI boot firmware is set to perform network booting, the network interface card's PXE firmware broadcasts a DHCPDISCOVER packet identifying itself as a PXEClient to the network environment.
|
||||
|
||||
The network environment can be set up in a number of ways, which we'll discuss. In the simplest, a PXE-enabled DHCP Server responds with a DHCPOFFER with Options, which include a TFTP server IP ("next server") and the name of an NBP ("boot filename") to download (e.g. pxelinux.0). PXE firmware then downloads the NBP over TFTP and starts it. Finally, the NBP loads configs, scripts, and/or images it requires to run an OS.
|
||||
|
||||
### Network Boot Programs
|
||||
### Network boot programs
|
||||
|
||||
Machines can be booted and configured with CoreOS using several network boot programs and approaches. Let's review them. If you're new to network booting or unsure which to choose, iPXE is a reasonable and flexible choice.
|
||||
Machines can be booted and configured with CoreOS Container Linux using several network boot programs and approaches. Let's review them. If you're new to network booting or unsure which to choose, iPXE is a reasonable and flexible choice.
|
||||
|
||||
#### PXELINUX
|
||||
|
||||
[PXELINUX](http://www.syslinux.org/wiki/index.php/PXELINUX) is a common network boot program which loads a config file from `mybootdir/pxelinux.cfg/` over TFTP. The file is chosen based on the client's UUID, MAC address, IP address, or a default.
|
||||
|
||||
mybootdir/pxelinux.cfg/b8945908-d6a6-41a9-611d-74a6ab80b83d
|
||||
mybootdir/pxelinux.cfg/default
|
||||
```sh
|
||||
$ mybootdir/pxelinux.cfg/b8945908-d6a6-41a9-611d-74a6ab80b83d
|
||||
$ mybootdir/pxelinux.cfg/default
|
||||
```
|
||||
|
||||
Here is an example PXE config file which boots a CoreOS image hosted on the TFTP server.
|
||||
Here is an example PXE config file which boots a Container Linux image hosted on the TFTP server.
|
||||
|
||||
```
|
||||
default coreos
|
||||
@@ -47,11 +49,11 @@ This approach has a number of drawbacks. TFTP can be slow, managing config files
|
||||
|
||||
[iPXE](http://ipxe.org/) is an enhanced implementation of the PXE client firmware and a network boot program which uses iPXE scripts rather than config files and can download scripts and images with HTTP.
|
||||
|
||||
<img src='img/ipxe.png' class="img-center" alt="iPXE client server protocol flow"/>
|
||||

|
||||
|
||||
A DHCPOFFER to iPXE client firmware specifies an HTTP boot script such as `http://bootcfg.foo/boot.ipxe`.
|
||||
A DHCPOFFER to iPXE client firmware specifies an HTTP boot script such as `http://matchbox.foo/boot.ipxe`.
|
||||
|
||||
Here is an example iPXE script for booting the remote CoreOS stable image.
|
||||
Here is an example iPXE script for booting the remote Container Linux stable image.
|
||||
|
||||
```
|
||||
#!ipxe
|
||||
@@ -64,11 +66,7 @@ boot
|
||||
|
||||
A TFTP server is used only to provide the `undionly.kpxe` boot program to older PXE firmware in order to bootstrap into iPXE.
|
||||
|
||||
CoreOS `bootcfg` can render signed iPXE scripts to machines based on their hardware attributes. Setup involves configuring your DHCP server to point iPXE clients to the `bootcfg` [iPXE endpoint](api.md#ipxe).
|
||||
|
||||
#### Pixiecore
|
||||
|
||||
[Pixiecore](https://github.com/danderson/pixiecore) is a newer service which implements a proxyDHCP server, TFTP server, and HTTP server all-in-one and calls through to an HTTP API. CoreOS `bootcfg` can serve Pixiecore JSON (optionally signed) based on the supplied MAC address, to implement the Pixiecore HTTP API.
|
||||
CoreOS `matchbox` can render signed iPXE scripts to machines based on their hardware attributes. Setup involves configuring your DHCP server to point iPXE clients to the `matchbox` [iPXE endpoint](api.md#ipxe).
|
||||
|
||||
## DHCP
|
||||
|
||||
@@ -76,4 +74,4 @@ Many networks have DHCP services which are impractical to modify or disable. Com
|
||||
|
||||
To address this, PXE client firmware listens for DHCPOFFERs from a non-PXE DHCP server *and* a PXE-enabled **proxyDHCP server** configured to respond with the next server and boot filename only. Client firmware combines the two responses as if they had come from a single PXE-enabled DHCP server.
|
||||
|
||||
<img src='img/proxydhcp.png' class="img-center" alt="DHCP and proxyDHCP responses are merged to get PXE Options"/>
|
||||

|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
# Network setup
|
||||
|
||||
# Network Setup
|
||||
This guide shows how to create a DHCP/TFTP/DNS network boot environment to boot and provision BIOS/PXE, iPXE, or UEFI client machines.
|
||||
|
||||
This guide shows how to create a DHCP/TFTP/DNS network boot environment to work with `bootcfg` to boot and provision PXE, iPXE, or GRUB2 client machines.
|
||||
|
||||
`bootcfg` serves iPXE scripts or GRUB configs over HTTP to serve as the entrypoint for CoreOS cluster bring-up. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, you can configure your own network services to point to `bootcfg` or use the convenient [coreos/dnsmasq](../contrib/dnsmasq) container image (used in libvirt demos).
|
||||
Matchbox serves iPXE scripts over HTTP to serve as the entrypoint for provisioning clusters. It does not implement or exec a DHCP, TFTP, or DNS server. Instead, configure your network environment to point to Matchbox or use the convenient [coreos/dnsmasq](../contrib/dnsmasq) container image (used in local QEMU/KVM setup).
|
||||
|
||||
*Note*: These are just suggestions. Your network administrator or system administrator should choose the right network setup for your company.
|
||||
|
||||
@@ -13,148 +12,243 @@ Client hardware must have a network interface which supports PXE or iPXE.
|
||||
|
||||
## Goals
|
||||
|
||||
* Add a DNS name which resolves to a `bootcfg` deploy.
|
||||
* Chainload PXE firmware to iPXE or GRUB2
|
||||
* Point iPXE clients to `http://bootcfg.foo:port/boot.ipxe`
|
||||
* Point GRUB clients to `http://bootcfg.foo:port/grub`
|
||||
* Add a DNS name which resolves to a `matchbox` deploy.
|
||||
* Chainload BIOS clients (legacy PXE) to iPXE (undionly.kpxe)
|
||||
* Chainload UEFI clients to iPXE (ipxe.efi)
|
||||
* Point iPXE clients to `http://matchbox.example.com:port/boot.ipxe`
|
||||
* Point GRUB clients to `http://matchbox.example.com:port/grub`
|
||||
|
||||
## Setup
|
||||
|
||||
Many companies already have DHCP/TFTP configured to "PXE-boot" PXE/iPXE clients. In this case, machines (or a subset of machines) can be made to chainload from `chain http://bootcfg.foo:port/boot.ipxe`. Older PXE clients can be made to chainload into iPXE or GRUB to be able to fetch subsequent configs via HTTP.
|
||||
Many companies already have DHCP/TFTP configured to "PXE-boot" PXE/iPXE clients. In this case, machines (or a subset of machines) can be made to chainload from `chain http://matchbox.example.com:port/boot.ipxe`. Older PXE clients can be made to chainload into iPXE to be able to fetch subsequent configs via HTTP.
|
||||
|
||||
On simpler networks, such as what a developer might have at home, a relatively inflexible DHCP server may be in place, with no TFTP server. In this case, a proxy DHCP server can be run alongside a non-PXE capable DHCP server.
|
||||
|
||||
This diagram can point you to the **right section(s)** of this document.
|
||||
|
||||
<img src='img/network-setup-flow.png' class="img-center" alt="Network Setup Flow"/>
|
||||

|
||||
|
||||
The setup of DHCP, TFTP, and DNS services on a network varies greatly. If you wish to use rkt or Docker to quickly run DHCP, proxyDHCP TFTP, or DNS services, use [coreos/dnsmasq](#coreos/dnsmasq).
|
||||
The setup of DHCP, TFTP, and DNS services on a network varies greatly. If you wish to use rkt or Docker to quickly run DHCP, proxyDHCP TFTP, or DNS services, use [coreos/dnsmasq](#coreosdnsmasq).
|
||||
|
||||
## DNS
|
||||
|
||||
Add a DNS entry (e.g. `bootcfg.foo`, `provisoner.mycompany-internal`) that resolves to a deployment of the CoreOS `bootcfg` service from machines you intend to boot and provision.
|
||||
Add a DNS entry (e.g. `matchbox.example.com`, `provisoner.mycompany-internal`) that resolves to a deployment of the CoreOS `matchbox` service from machines you intend to boot and provision.
|
||||
|
||||
dig bootcfg.foo
|
||||
```sh
|
||||
$ dig matchbox.example.com
|
||||
```
|
||||
|
||||
If you deployed `bootcfg` to a known IP address (e.g. dedicated host, load balanced endpoint, Kubernetes NodePort) and use `dnsmasq`, a domain name to IPv4/IPv6 address mapping could be added to the `/etc/dnsmasq.conf`.
|
||||
If you deployed `matchbox` to a known IP address (e.g. dedicated host, load balanced endpoint, Kubernetes NodePort) and use `dnsmasq`, a domain name to IPv4/IPv6 address mapping could be added to the `/etc/dnsmasq.conf`.
|
||||
|
||||
# dnsmasq.conf
|
||||
address=/bootcfg.foo/172.15.0.2
|
||||
```
|
||||
# dnsmasq.conf
|
||||
address=/matchbox.example.com/172.18.0.2
|
||||
```
|
||||
|
||||
## iPXE
|
||||
|
||||
Servers with DHCP/TFTP/ services which already network boot iPXE clients can use the `chain` command to make clients download and execute the iPXE boot script from `bootcfg`.
|
||||
Networks which already run DHCP and TFTP services to network boot PXE/iPXE clients can add an iPXE config to delegate or `chain` to the matchbox service's iPXE entrypoint.
|
||||
|
||||
# /var/www/html/ipxe/default.ipxe
|
||||
chain http://bootcfg.foo:8080/boot.ipxe
|
||||
```
|
||||
# /var/www/html/ipxe/default.ipxe
|
||||
chain http://matchbox.example.com:8080/boot.ipxe
|
||||
```
|
||||
|
||||
You can chainload from a menu entry or use other [iPXE commands](http://ipxe.org/cmd) if you have needs beyond just delegating to the iPXE script served by `bootcfg`.
|
||||
You can chainload from a menu entry or use other [iPXE commands](http://ipxe.org/cmd) if you need to do more than simple delegation.
|
||||
|
||||
## GRUB
|
||||
### PXE-enabled DHCP
|
||||
|
||||
Needs docs.
|
||||
|
||||
### Configuring DHCP
|
||||
|
||||
Configure your DHCP server to supply options to older PXE client firmware to specify the location of an iPXE or GRUB network boot program on your TFTP server. Send clients to the `bootcfg` iPXE script or GRUB config endpoints.
|
||||
Configure your DHCP server to supply options to older PXE client firmware to specify the location of an iPXE or GRUB network boot program on your TFTP server. Send clients to the `matchbox` iPXE script or GRUB config endpoints.
|
||||
|
||||
Here is an example `/etc/dnsmasq.conf`:
|
||||
|
||||
dhcp-range=192.168.1.1,192.168.1.254,30m
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
# if request comes from older PXE ROM, chainload to iPXE (via TFTP)
|
||||
dhcp-boot=tag:!ipxe,undionly.kpxe
|
||||
# if request comes from iPXE user class, set tag "ipxe"
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
# point ipxe tagged requests to the bootcfg iPXE boot script (via HTTP)
|
||||
dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe
|
||||
# verbose
|
||||
log-queries
|
||||
log-dhcp
|
||||
# (optional) disable DNS
|
||||
port=0
|
||||
# (optional) static DNS assignements
|
||||
# address=/bootcfg.foo/192.168.1.100
|
||||
```ini
|
||||
dhcp-range=192.168.1.1,192.168.1.254,30m
|
||||
|
||||
Add [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) (and undionly.kpxe.0 if using dnsmasq) to your tftp-root (e.g. `/var/lib/tftpboot`).
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
|
||||
sudo systemctl start dnsmasq
|
||||
sudo firewall-cmd --add-service=dhcp --add-service=tftp [--add-service=dns]
|
||||
sudo firewall-cmd --list-services
|
||||
# Legacy PXE
|
||||
dhcp-match=set:bios,option:client-arch,0
|
||||
dhcp-boot=tag:bios,undionly.kpxe
|
||||
|
||||
#### proxy DHCP
|
||||
# UEFI
|
||||
dhcp-match=set:efi32,option:client-arch,6
|
||||
dhcp-boot=tag:efi32,ipxe.efi
|
||||
dhcp-match=set:efibc,option:client-arch,7
|
||||
dhcp-boot=tag:efibc,ipxe.efi
|
||||
dhcp-match=set:efi64,option:client-arch,9
|
||||
dhcp-boot=tag:efi64,ipxe.efi
|
||||
|
||||
Alternately, a DHCP proxy server can be run alongside an existing non-PXE DHCP server. The proxy DHCP server provides only the next server and boot filename Options, leaving IP allocation to the DHCP server. Clients listen for both DHCP offers and merge the responses as though they had come from one PXE-enabled DHCP server.
|
||||
# iPXE - chainload to matchbox ipxe boot script
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
# verbose
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
# static DNS assignements
|
||||
address=/matchbox.example.com/192.168.1.100
|
||||
|
||||
# (optional) disable DNS and specify alternate
|
||||
# port=0
|
||||
# dhcp-option=6,192.168.1.100
|
||||
```
|
||||
|
||||
Add [ipxe.efi](http://boot.ipxe.org/ipxe.efi) and [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) to your tftp-root (e.g. `/var/lib/tftpboot`).
|
||||
|
||||
```sh
|
||||
$ sudo systemctl start dnsmasq
|
||||
$ sudo firewall-cmd --add-service=dhcp --add-service=tftp [--add-service=dns]
|
||||
$ sudo firewall-cmd --list-services
|
||||
```
|
||||
|
||||
See [dnsmasq](#coreosdnsmasq) below to run dnsmasq with a container.
|
||||
|
||||
#### Proxy-DHCP
|
||||
|
||||
Alternately, a proxy-DHCP server can be run alongside an existing non-PXE DHCP server. The proxy DHCP server provides only the next server and boot filename Options, leaving IP allocation to the DHCP server. Clients listen for both DHCP offers and merge the responses as though they had come from one PXE-enabled DHCP server.
|
||||
|
||||
Example `/etc/dnsmasq.conf`:
|
||||
|
||||
dhcp-range=192.168.1.1,proxy,255.255.255.0
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
# if request comes from older PXE ROM, chainload to iPXE (via TFTP)
|
||||
pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe
|
||||
# if request comes from iPXE user class, set tag "ipxe"
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
# point ipxe tagged requests to the bootcfg iPXE boot script (via HTTP)
|
||||
pxe-service=tag:ipxe,x86PC,"iPXE",http://bootcfg.foo:8080/boot.ipxe
|
||||
# verbose
|
||||
log-queries
|
||||
log-dhcp
|
||||
```ini
|
||||
dhcp-range=192.168.1.1,proxy,255.255.255.0
|
||||
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
|
||||
# if request comes from older PXE ROM, chainload to iPXE (via TFTP)
|
||||
pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe
|
||||
# if request comes from iPXE user class, set tag "ipxe"
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
# point ipxe tagged requests to the matchbox iPXE boot script (via HTTP)
|
||||
pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
# verbose
|
||||
log-queries
|
||||
log-dhcp
|
||||
```
|
||||
|
||||
Add [unidonly.kpxe](http://boot.ipxe.org/undionly.kpxe) (and undionly.kpxe.0 if using dnsmasq) to your tftp-root (e.g. `/var/lib/tftpboot`).
|
||||
|
||||
sudo systemctl start dnsmasq
|
||||
sudo firewall-cmd --add-service=dhcp --add-service=tftp [--add-service=dns]
|
||||
sudo firewall-cmd --list-services
|
||||
```sh
|
||||
$ sudo systemctl start dnsmasq
|
||||
$ sudo firewall-cmd --add-service=dhcp --add-service=tftp [--add-service=dns]
|
||||
$ sudo firewall-cmd --list-services
|
||||
```
|
||||
|
||||
With rkt:
|
||||
|
||||
sudo rkt run coreos.com/dnsmasq:v0.3.0 --net=host -- -d -q --dhcp-range=192.168.1.1,proxy,255.255.255.0 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-userclass=set:ipxe,iPXE --pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe --pxe-service=tag:ipxe,x86PC,"iPXE",http://bootcfg.foo:8080/boot.ipxe --log-queries --log-dhcp
|
||||
|
||||
With Docker:
|
||||
|
||||
sudo docker run --net=host --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-range=192.168.1.1,proxy,255.255.255.0 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-userclass=set:ipxe,iPXE --pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe --pxe-service=tag:ipxe,x86PC,"iPXE",http://bootcfg.foo:8080/boot.ipxe --log-queries --log-dhcp
|
||||
See [dnsmasq](#coreosdnsmasq) below to run dnsmasq with a container.
|
||||
|
||||
### Configurable TFTP
|
||||
|
||||
If your DHCP server is configured to PXE boot clients, but you don't have control over this configuration, you can modify the pxelinux.cfg's served to PXE clients.
|
||||
If your DHCP server is configured to network boot PXE clients (but not iPXE clients), add a pxelinux.cfg to serve an iPXE kernel image and append commands.
|
||||
|
||||
Example `/var/lib/tftpboot/pxelinux.cfg/default`:
|
||||
|
||||
timeout 10
|
||||
default iPXE
|
||||
LABEL iPXE
|
||||
KERNEL ipxe.lkrn
|
||||
APPEND dhcp && chain http://bootcfg.foo:8080/boot.ipxe
|
||||
```
|
||||
timeout 10
|
||||
default iPXE
|
||||
LABEL iPXE
|
||||
KERNEL ipxe.lkrn
|
||||
APPEND dhcp && chain http://matchbox.example.com:8080/boot.ipxe
|
||||
```
|
||||
|
||||
Add ipxe.lkrn to `/var/lib/tftpboot` (see [iPXE docs](http://ipxe.org/embed)).
|
||||
|
||||
## coreos/dnsmasq
|
||||
|
||||
On networks without network services, the `coreos.com/dnsmasq:v0.3.0` rkt ACI or `coreos/dnsmasq:latest` Docker image can setup an appropriate environment quickly. The images bundle `undionly.kpxe` and `grub.efi` for convenience. Here are some examples which run a DHCP/TFTP/DNS server on your host's network:
|
||||
The [quay.io/coreos/dnsmasq](https://quay.io/repository/coreos/dnsmasq) container image can run DHCP, TFTP, and DNS services via rkt or docker. The image bundles `ipxe.efi`, `undionly.kpxe`, and `grub.efi` for convenience. See [contrib/dnsmasq](../contrib/dnsmasq) for details.
|
||||
|
||||
With rkt:
|
||||
Run DHCP, TFTP, and DNS on the host's network:
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
sudo rkt trust --prefix coreos.com/dnsmasq
|
||||
# gpg key fingerprint is: 18AD 5014 C99E F7E3 BA5F 6CE9 50BD D3E0 FC8A 365E
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
|
||||
Run a proxy-DHCP and TFTP service on the host's network:
|
||||
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=192.168.1.1,proxy,255.255.255.0 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe \
|
||||
--pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
sudo rkt run coreos.com/dnsmasq:v0.3.0 --net=host -- -d -q --dhcp-range=192.168.1.3,192.168.1.254 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:#ipxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe --address=/bootcfg.foo/192.168.1.2 --log-queries --log-dhcp
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.1,proxy,255.255.255.0 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--pxe-service=tag:#ipxe,x86PC,"PXE chainload to iPXE",undionly.kpxe \
|
||||
--pxe-service=tag:ipxe,x86PC,"iPXE",http://matchbox.example.com:8080/boot.ipxe \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
|
||||
With Docker:
|
||||
Be sure to allow enabled services in your firewall configuration.
|
||||
|
||||
```
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq -d -q --dhcp-range=192.168.1.3,192.168.1.254 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:#ipxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe --address=/bootcfg.foo/192.168.1.2 --log-queries --log-dhcp
|
||||
```sh
|
||||
$ sudo firewall-cmd --add-service=dhcp --add-service=tftp --add-service=dns
|
||||
```
|
||||
|
||||
Ensure that `bootcfg.foo` resolves to a `bootcfg` deployment and that you've allowed the services to run in your firewall configuration.
|
||||
## UEFI
|
||||
|
||||
sudo firewall-cmd --add-service=dhcp --add-service=tftp --add-service=dns
|
||||
### Development
|
||||
|
||||
Install the dependencies for [QEMU with UEFI](https://fedoraproject.org/wiki/Using_UEFI_with_QEMU). Walk through the [getting-started-with-docker](getting-started-with-docker.md) tutorial. Launch client VMs using `create-uefi`.
|
||||
|
||||
Create UEFI QEMU/KVM VMs attached to the `docker0` bridge.
|
||||
|
||||
```sh
|
||||
$ sudo ./scripts/libvirt create-uefi
|
||||
```
|
||||
|
||||
UEFI clients should chainload `ipxe.efi`, load iPXE and Ignition configs from Matchbox, and Container Linux should boot as usual.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
See [troubleshooting](troubleshooting.md).
|
||||
See [troubleshooting](troubleshooting.md).
|
||||
|
||||
@@ -1,22 +1,21 @@
|
||||
|
||||
# OpenPGP Signing
|
||||
# OpenPGP signing
|
||||
|
||||
The `bootcfg` OpenPGP signature endpoints serve detached binary and ASCII armored signatures of rendered configs, if enabled. Each config endpoint has corresponding signature endpoints, typically suffixed with `.sig` or `.asc`.
|
||||
The `matchbox` OpenPGP signature endpoints serve detached binary and ASCII armored signatures of rendered configs, if enabled. Each config endpoint has corresponding signature endpoints, typically suffixed with `.sig` or `.asc`.
|
||||
|
||||
To enable OpenPGP signing, provide the path to a secret keyring containing a single signing key with `-key-ring-path` or by setting `BOOTCFG_KEY_RING_PATH`. If a passphrase is required, set it via the `BOOTCFG_PASSPHRASE` environment variable.
|
||||
To enable OpenPGP signing, provide the path to a secret keyring containing a single signing key with `-key-ring-path` or by setting `MATCHBOX_KEY_RING_PATH`. If a passphrase is required, set it via the `MATCHBOX_PASSPHRASE` environment variable.
|
||||
|
||||
Here are example signature endpoints without their query parameters.
|
||||
|
||||
| Endpoint | Signature Endpoint | ASCII Signature Endpoint |
|
||||
|------------|--------------------|-------------------------|
|
||||
| iPXE | `http://bootcfg.foo/ipxe.sig` | `http://bootcfg.foo/ipxe.asc` |
|
||||
| Pixiecore | `http://bootcfg/pixiecore/v1/boot.sig/:MAC` | `http://bootcfg/pixiecore/v1/boot.asc/:MAC` |
|
||||
| GRUB2 | `http://bootcf.foo/grub.sig` | `http://bootcfg.foo/grub.asc` |
|
||||
| Ignition | `http://bootcfg.foo/ignition.sig` | `http://bootcfg.foo/ignition.asc` |
|
||||
| Cloud-Config | `http://bootcfg.foo/cloud.sig` | `http://bootcfg.foo/cloud.asc` |
|
||||
| Metadata | `http://bootcfg.foo/metadata.sig` | `http://bootcfg.foo/metadata.asc` |
|
||||
| iPXE | `http://matchbox.foo/ipxe.sig` | `http://matchbox.foo/ipxe.asc` |
|
||||
| GRUB2 | `http://bootcf.foo/grub.sig` | `http://matchbox.foo/grub.asc` |
|
||||
| Ignition | `http://matchbox.foo/ignition.sig` | `http://matchbox.foo/ignition.asc` |
|
||||
| Cloud-Config | `http://matchbox.foo/cloud.sig` | `http://matchbox.foo/cloud.asc` |
|
||||
| Metadata | `http://matchbox.foo/metadata.sig` | `http://matchbox.foo/metadata.asc` |
|
||||
|
||||
In production, mount your signing keyring and source the passphrase from a [Kubernetes secret](http://kubernetes.io/v1.1/docs/user-guide/secrets.html). Use a signing subkey exported to a keyring by itself, which can be revoked by a primary key, if needed.
|
||||
In production, mount your signing keyring and source the passphrase from a [Kubernetes secret](https://kubernetes.io/docs/user-guide/secrets/). Use a signing subkey exported to a keyring by itself, which can be revoked by a primary key, if needed.
|
||||
|
||||
To try it locally, you may use the test fixture keyring. **Warning: The test fixture keyring is for examples only.**
|
||||
|
||||
@@ -26,28 +25,33 @@ Verify a signature response and config response from the command line using the
|
||||
|
||||
**Warning: The test fixture keyring is for examples only.**
|
||||
|
||||
$ gpg --homedir sign/fixtures --verify sig_file response_file
|
||||
gpg: Signature made Mon 08 Feb 2016 11:37:03 PM PST using RSA key ID 9896356A
|
||||
gpg: sign/fixtures/trustdb.gpg: trustdb created
|
||||
gpg: Good signature from "Fake Bare Metal Key (Do not use) <do-not-use@example.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
Primary key fingerprint: BE2F 12BC 3642 2594 570A CCBB 8DC4 2020 9896 356A
|
||||
```sh
|
||||
$ gpg --homedir sign/fixtures --verify sig_file response_file
|
||||
gpg: Signature made Mon 08 Feb 2016 11:37:03 PM PST using RSA key ID 9896356A
|
||||
gpg: sign/fixtures/trustdb.gpg: trustdb created
|
||||
gpg: Good signature from "Fake Bare Metal Key (Do not use) <do-not-use@example.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
Primary key fingerprint: BE2F 12BC 3642 2594 570A CCBB 8DC4 2020 9896 356A
|
||||
```
|
||||
|
||||
## Signing Key Generation
|
||||
## Signing key generation
|
||||
|
||||
Create a signing key or subkey according to your requirements and security policies. Here are some basic [guides](https://coreos.com/rkt/docs/latest/signing-and-verification-guide.html).
|
||||
|
||||
### gpg
|
||||
|
||||
mkdir -m 700 path/in/vault
|
||||
gpg --homedir path/in/vault --expert --gen-key
|
||||
...
|
||||
```sh
|
||||
$ mkdir -m 700 path/in/vault
|
||||
$ gpg --homedir path/in/vault --expert --gen-key
|
||||
...
|
||||
```
|
||||
|
||||
### gpg2
|
||||
|
||||
mkdir -m 700 path/in/vault
|
||||
gpg2 --homedir path/in/vault --expert --gen-key
|
||||
...
|
||||
gpg2 --homedir path/in/vault --export-secret-key KEYID > path/in/vault/secring.gpg
|
||||
|
||||
```sh
|
||||
$ mkdir -m 700 path/in/vault
|
||||
$ gpg2 --homedir path/in/vault --expert --gen-key
|
||||
...
|
||||
$ gpg2 --homedir path/in/vault --export-secret-key KEYID > path/in/vault/secring.gpg
|
||||
```
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
|
||||
# Torus Storage
|
||||
|
||||
The Torus example provisions a 3 node CoreOS cluster, with `etcd3` and Torus, to demonstrate a stand-alone storage cluster. Each of the 3 nodes runs a Torus instance which makes 1GiB of space available (configured per node by "torus_storage_size" in machine group metadata).
|
||||
|
||||
## Requirements
|
||||
|
||||
Ensure that you've gone through the [bootcfg with rkt](getting-started-rkt.md) guide and understand the basics. In particular, you should be able to:
|
||||
|
||||
* Use rkt to start `bootcfg`
|
||||
* Create a network boot environment with `coreos/dnsmasq`
|
||||
* Create the example libvirt client VMs
|
||||
* Install the Torus [binaries](https://github.com/coreos/torus/releases)
|
||||
|
||||
## Examples
|
||||
|
||||
The [examples](..examples) statically assign IP addresses (172.15.0.21, 172.15.0.22, 172.15.0.23) to libvirt client VMs created by `scripts/libvirt`. The examples can be used for physical machines if you update the MAC/IP addresses. See [network setup](network-setup.md) and [deployment](deployment.md).
|
||||
|
||||
* [torus](../examples/groups/torus) - iPXE boot a Torus cluster (use rkt)
|
||||
|
||||
## Assets
|
||||
|
||||
Download the CoreOS image assets referenced in the target [profile](../examples/profiles).
|
||||
|
||||
./scripts/get-coreos alpha 1053.2.0 ./examples/assets
|
||||
|
||||
## Containers
|
||||
|
||||
Run the latest `bootcfg` ACI with rkt and the `torus` example.
|
||||
|
||||
sudo rkt run --net=metal0:IP=172.15.0.2 --mount volume=data,target=/var/lib/bootcfg --volume data,kind=host,source=$PWD/examples --mount volume=groups,target=/var/lib/bootcfg/groups --volume groups,kind=host,source=$PWD/examples/groups/torus quay.io/coreos/bootcfg:latest -- -address=0.0.0.0:8080 -log-level=debug
|
||||
|
||||
Create a network boot environment and power-on your machines. Revisit [bootcfg with rkt](getting-started-rkt.md) for help. Client machines should network boot and provision themselves.
|
||||
|
||||
## Verify
|
||||
|
||||
Install the Torus [binaries](https://github.com/coreos/torus/releases) on your laptop. Torus uses etcd3 for coordination and metadata storage, so any etcd node in the cluster can be queried with `torusctl`.
|
||||
|
||||
./torusctl --etcd 172.15.0.21:2379 list-peers
|
||||
|
||||
Run `list-peers` to report the status of data nodes in the Torus cluster.
|
||||
|
||||
```
|
||||
+--------------------------+--------------------------------------+---------+------+--------+---------------+--------------+
|
||||
| ADDRESS | UUID | SIZE | USED | MEMBER | UPDATED | REB/REP DATA |
|
||||
+--------------------------+--------------------------------------+---------+------+--------+---------------+--------------+
|
||||
| http://172.15.0.21:40000 | 016fad6a-2e23-11e6-8ced-525400a19cae | 1.0 GiB | 0 B | OK | 1 second ago | 0 B/sec |
|
||||
| http://172.15.0.23:40000 | 0408cbba-2e23-11e6-9871-525400c36177 | 1.0 GiB | 0 B | OK | 2 seconds ago | 0 B/sec |
|
||||
| http://172.15.0.22:40000 | 0c67d31c-2e23-11e6-91f5-525400b22f86 | 1.0 GiB | 0 B | OK | 3 seconds ago | 0 B/sec |
|
||||
+--------------------------+--------------------------------------+---------+------+--------+---------------+--------------+
|
||||
```
|
||||
|
||||
Torus has already initialized its metadata within etcd3 to format the cluster and added all peers to the pool. Each node provides 1 GiB of storage and has `MEMBER` status `OK`.
|
||||
|
||||
### Volume Creation
|
||||
|
||||
Create a new replicated, virtual block device or `volume` on Torus.
|
||||
|
||||
./torusblk --etcd=172.15.0.21:2379 volume create hello 500MiB
|
||||
|
||||
List the current volumes,
|
||||
|
||||
./torusctl --etcd=172.15.0.21:2379 volume list
|
||||
|
||||
and verify that `hello` was created.
|
||||
|
||||
```
|
||||
+-------------+---------+
|
||||
| VOLUME NAME | SIZE |
|
||||
+-------------+---------+
|
||||
| hello | 500 MiB |
|
||||
+-------------+---------+
|
||||
```
|
||||
|
||||
### Filesystems and Mounting
|
||||
|
||||
Let's attach the Torus volume, create a filesystem, and add some files. Add the `nbd` kernel module.
|
||||
|
||||
sudo modprobe nbd
|
||||
sudo ./torusblk --etcd=172.15.0.21:2379 nbd hello
|
||||
|
||||
In a new shell, create a new filesystem on the volume and mount it on your system.
|
||||
|
||||
sudo mkfs.ext4 /dev/nbd0
|
||||
sudo mkdir -p /mnt/hello
|
||||
sudo mount /dev/nbd0 -o discard,noatime /mnt/hello
|
||||
|
||||
Check that the mounted filesystem is present.
|
||||
|
||||
$ mount | grep nbd
|
||||
/dev/nbd0 on /mnt/hello type ext4 (rw,noatime,seclabel,discard,data=ordered)
|
||||
|
||||
By default, Torus uses a replication factor of 2. You may write some data and poweroff one of the three nodes if you wish.
|
||||
|
||||
sudo sh -c "echo 'hello world' > /mnt/hello/world"
|
||||
sudo virsh destroy node3 # actually equivalent to poweroff
|
||||
|
||||
Check the Torus data nodes.
|
||||
|
||||
$ ./torusctl --etcd 172.15.0.21:2379 list-peers
|
||||
|
||||
```
|
||||
+--------------------------+--------------------------------------+---------+--------+--------+---------------+--------------+
|
||||
| ADDRESS | UUID | SIZE | USED | MEMBER | UPDATED | REB/REP DATA |
|
||||
+--------------------------+--------------------------------------+---------+--------+--------+---------------+--------------+
|
||||
| http://172.15.0.21:40000 | 016fad6a-2e23-11e6-8ced-525400a19cae | 1.0 GiB | 22 MiB | OK | 3 seconds ago | 0 B/sec |
|
||||
| http://172.15.0.22:40000 | 0c67d31c-2e23-11e6-91f5-525400b22f86 | 1.0 GiB | 22 MiB | OK | 3 seconds ago | 0 B/sec |
|
||||
| | 0408cbba-2e23-11e6-9871-525400c36177 | ??? | ??? | DOWN | Missing | |
|
||||
+--------------------------+--------------------------------------+---------+--------+--------+---------------+--------------+
|
||||
Balanced: true Usage: 2.15%
|
||||
```
|
||||
|
||||
## Going Further
|
||||
|
||||
See the [Torus](https://github.com/coreos/torus) project to learn more about Torus and contribute.
|
||||
@@ -1,18 +1,19 @@
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
## Firewall
|
||||
|
||||
Running DHCP or proxyDHCP with `coreos/dnsmasq` on a host requires that the Firewall allow DHCP and TFTP (for chainloading) services to run.
|
||||
|
||||
## Port Collision
|
||||
## Port collision
|
||||
|
||||
Running DHCP or proxyDHCP can cause port already in use collisions depending on what's running. Fedora runs bootp listening on udp/67 for example. Find the service using the port.
|
||||
|
||||
sudo lsof -i :67
|
||||
```sh
|
||||
$ sudo lsof -i :67
|
||||
```
|
||||
|
||||
Evaluate whether you can configure the existing service or whether you'd like to stop it and test with `coreos/dnsmasq`.
|
||||
|
||||
## No boot filename received
|
||||
|
||||
PXE client firmware did not receive a DHCP Offer with PXE-Options after several attempts. If you're using the `coreos/dnsmasq` image with `-d`, each request should log to stdout. Using the wrong `-i` interface is the most common reason DHCP requests are not received. Otherwise, wireshark can be useful for investigating.
|
||||
PXE client firmware did not receive a DHCP Offer with PXE-Options after several attempts. If you're using the `coreos/dnsmasq` image with `-d`, each request should log to stdout. Using the wrong `-i` interface is the most common reason DHCP requests are not received. Otherwise, wireshark can be useful for investigating.
|
||||
|
||||
63
Jenkinsfile
vendored
Normal file
63
Jenkinsfile
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
pipeline {
|
||||
agent none
|
||||
|
||||
options {
|
||||
timeout(time:45, unit:'MINUTES')
|
||||
buildDiscarder(logRotator(numToKeepStr:'20'))
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Cluster Tests') {
|
||||
steps {
|
||||
parallel (
|
||||
etcd3: {
|
||||
node('fedora && bare-metal') {
|
||||
timeout(time:5, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
export ASSETS_DIR=~/assets; ./tests/smoke/etcd3
|
||||
'''
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
},
|
||||
bootkube: {
|
||||
node('fedora && bare-metal') {
|
||||
timeout(time:60, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
chmod 600 ./tests/smoke/fake_rsa
|
||||
export ASSETS_DIR=~/assets; ./tests/smoke/bootkube
|
||||
'''
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
},
|
||||
"etcd3-terraform": {
|
||||
node('fedora && bare-metal') {
|
||||
timeout(time:10, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/etcd3-terraform
|
||||
'''
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
},
|
||||
"bootkube-terraform": {
|
||||
node('fedora && bare-metal') {
|
||||
timeout(time:60, unit:'MINUTES') {
|
||||
checkout scm
|
||||
sh '''#!/bin/bash -e
|
||||
chmod 600 ./tests/smoke/fake_rsa
|
||||
export ASSETS_DIR=~/assets; export CONFIG_DIR=~/matchbox/examples/etc/matchbox; ./tests/smoke/bootkube-terraform
|
||||
'''
|
||||
deleteDir()
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
99
Makefile
99
Makefile
@@ -1,45 +1,86 @@
|
||||
|
||||
export CGO_ENABLED:=0
|
||||
LD_FLAGS="-w -X github.com/coreos/coreos-baremetal/bootcfg/version.Version=$(shell ./git-version)"
|
||||
LOCAL_BIN=/usr/local/bin
|
||||
|
||||
VERSION=$(shell ./scripts/dev/git-version)
|
||||
LD_FLAGS="-w -X github.com/coreos/matchbox/matchbox/version.Version=$(VERSION)"
|
||||
|
||||
REPO=github.com/coreos/matchbox
|
||||
IMAGE_REPO=coreos/matchbox
|
||||
QUAY_REPO=quay.io/coreos/matchbox
|
||||
|
||||
all: build
|
||||
build: clean bin/bootcfg bin/bootcmd
|
||||
|
||||
bin/bootcfg:
|
||||
go build -o bin/bootcfg -ldflags $(LD_FLAGS) -a github.com/coreos/coreos-baremetal/cmd/bootcfg
|
||||
build: clean bin/matchbox
|
||||
|
||||
bin/bootcmd:
|
||||
go build -o bin/bootcmd -ldflags $(LD_FLAGS) -a github.com/coreos/coreos-baremetal/cmd/bootcmd
|
||||
bin/%:
|
||||
@go build -o bin/$* -v -ldflags $(LD_FLAGS) $(REPO)/cmd/$*
|
||||
|
||||
test:
|
||||
./test
|
||||
@./scripts/dev/test
|
||||
|
||||
install:
|
||||
cp bin/bootcfg $(LOCAL_BIN)
|
||||
cp bin/bootcmd $(LOCAL_BIN)
|
||||
.PHONY: aci
|
||||
aci: clean build
|
||||
@sudo ./scripts/dev/build-aci
|
||||
|
||||
release: clean _output/coreos-baremetal-linux-amd64.tar.gz _output/coreos-baremetal-darwin-amd64.tar.gz
|
||||
.PHONY: docker-image
|
||||
docker-image:
|
||||
@sudo docker build --rm=true -t $(IMAGE_REPO):$(VERSION) .
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
|
||||
bin/%/bootcfg:
|
||||
GOOS=$* go build -o bin/$*/bootcfg -ldflags $(LD_FLAGS) -a github.com/coreos/coreos-baremetal/cmd/bootcfg
|
||||
.PHONY: docker-push
|
||||
docker-push: docker-image
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(QUAY_REPO):latest
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(QUAY_REPO):$(VERSION)
|
||||
@sudo docker push $(QUAY_REPO):latest
|
||||
@sudo docker push $(QUAY_REPO):$(VERSION)
|
||||
|
||||
bin/%/bootcmd:
|
||||
GOOS=$* go build -o bin/$*/bootcmd -ldflags $(LD_FLAGS) -a github.com/coreos/coreos-baremetal/cmd/bootcmd
|
||||
.PHONY: vendor
|
||||
vendor:
|
||||
@glide update --strip-vendor
|
||||
@glide-vc --use-lock-file --no-tests --only-code
|
||||
|
||||
_output/coreos-baremetal-%-amd64.tar.gz: NAME=coreos-baremetal-$(VERSION)-$*-amd64
|
||||
_output/coreos-baremetal-%-amd64.tar.gz: DEST=_output/$(NAME)
|
||||
_output/coreos-baremetal-%-amd64.tar.gz: bin/%/bootcfg bin/%/bootcmd
|
||||
mkdir -p $(DEST)
|
||||
cp bin/$*/bootcfg $(DEST)
|
||||
cp bin/$*/bootcmd $(DEST)
|
||||
./scripts/release-files $(DEST)
|
||||
tar zcvf $(DEST).tar.gz -C _output $(NAME)
|
||||
.PHONY: codegen
|
||||
codegen: tools
|
||||
@./scripts/dev/codegen
|
||||
|
||||
.PHONY: tools
|
||||
tools: bin/protoc bin/protoc-gen-go
|
||||
|
||||
bin/protoc:
|
||||
@./scripts/dev/get-protoc
|
||||
|
||||
bin/protoc-gen-go:
|
||||
@go build -o bin/protoc-gen-go $(REPO)/vendor/github.com/golang/protobuf/protoc-gen-go
|
||||
|
||||
clean:
|
||||
rm -rf bin
|
||||
rm -rf _output
|
||||
@rm -rf bin
|
||||
|
||||
.PHONY: all build test install release clean
|
||||
.SECONDARY: _output/coreos-baremetal-linux-amd64 _output/coreos-baremetal-darwin-amd64
|
||||
clean-release:
|
||||
@rm -rf _output
|
||||
|
||||
release: \
|
||||
clean \
|
||||
clean-release \
|
||||
_output/matchbox-linux-amd64.tar.gz \
|
||||
_output/matchbox-linux-arm.tar.gz \
|
||||
_output/matchbox-linux-arm64.tar.gz \
|
||||
_output/matchbox-darwin-amd64.tar.gz
|
||||
|
||||
bin/linux-amd64/matchbox: GOARGS = GOOS=linux GOARCH=amd64
|
||||
bin/linux-arm/matchbox: GOARGS = GOOS=linux GOARCH=arm GOARM=6
|
||||
bin/linux-arm64/matchbox: GOARGS = GOOS=linux GOARCH=arm64
|
||||
bin/darwin-amd64/matchbox: GOARGS = GOOS=darwin GOARCH=amd64
|
||||
|
||||
bin/%/matchbox:
|
||||
$(GOARGS) go build -o $@ -ldflags $(LD_FLAGS) -a $(REPO)/cmd/matchbox
|
||||
|
||||
_output/matchbox-%.tar.gz: NAME=matchbox-$(VERSION)-$*
|
||||
_output/matchbox-%.tar.gz: DEST=_output/$(NAME)
|
||||
_output/matchbox-%.tar.gz: bin/%/matchbox
|
||||
mkdir -p $(DEST)
|
||||
cp bin/$*/matchbox $(DEST)
|
||||
./scripts/dev/release-files $(DEST)
|
||||
tar zcvf $(DEST).tar.gz -C _output $(NAME)
|
||||
|
||||
.PHONY: all build clean test release
|
||||
.SECONDARY: _output/matchbox-linux-amd64 _output/matchbox-darwin-amd64
|
||||
|
||||
|
||||
90
README.md
90
README.md
@@ -1,56 +1,52 @@
|
||||
# matchbox [](https://travis-ci.org/coreos/matchbox) [](https://godoc.org/github.com/coreos/matchbox) [](https://quay.io/repository/coreos/matchbox) [](https://botbot.me/freenode/coreos)
|
||||
|
||||
# CoreOS on Baremetal
|
||||
`matchbox` is a service that matches bare-metal machines (based on labels like MAC, UUID, etc.) to profiles that PXE boot and provision Container Linux clusters. Profiles specify the kernel/initrd, kernel arguments, iPXE config, GRUB config, [Container Linux Config][cl-config], or other configs a machine should use. Matchbox can be [installed](Documentation/deployment.md) as a binary, RPM, container image, or deployed on a Kubernetes cluster and it provides an authenticated gRPC API for clients like [Terraform][terraform].
|
||||
|
||||
[](https://travis-ci.org/coreos/coreos-baremetal) [](https://godoc.org/github.com/coreos/coreos-baremetal) [](https://quay.io/repository/coreos/bootcfg) [](https://botbot.me/freenode/coreos)
|
||||
|
||||
Guides and a service for network booting and provisioning CoreOS clusters on virtual or physical hardware.
|
||||
|
||||
## Guides
|
||||
|
||||
* [Network Setup](Documentation/network-setup.md)
|
||||
* [Machine Lifecycle](Documentation/machine-lifecycle.md)
|
||||
* [Documentation][docs]
|
||||
* [matchbox Service](Documentation/matchbox.md)
|
||||
* [Profiles](Documentation/matchbox.md#profiles)
|
||||
* [Groups](Documentation/matchbox.md#groups)
|
||||
* Config Templates
|
||||
* [Container Linux Config][cl-config]
|
||||
* [Cloud-Config][cloud-config]
|
||||
* [Configuration](Documentation/config.md)
|
||||
* [HTTP API](Documentation/api.md) / [gRPC API](https://godoc.org/github.com/coreos/matchbox/matchbox/client)
|
||||
* [Background: Machine Lifecycle](Documentation/machine-lifecycle.md)
|
||||
* [Background: PXE Booting](Documentation/network-booting.md)
|
||||
|
||||
## bootcfg
|
||||
### Installation
|
||||
|
||||
`bootcfg` is an HTTP and gRPC service that renders signed [Ignition configs](https://coreos.com/ignition/docs/latest/what-is-ignition.html), [cloud-configs](https://coreos.com/os/docs/latest/cloud-config.html), network boot configs, and metadata to machines to create CoreOS clusters. Groups match machines based on labels (e.g. MAC, UUID, stage, region) and use named Profiles for provisioning. Network boot endpoints provide PXE, iPXE, GRUB, and Pixiecore support. `bootcfg` can be deployed as a binary, as an [appc](https://github.com/appc/spec) container with [rkt](https://coreos.com/rkt/docs/latest/), or as a Docker container.
|
||||
* Installation
|
||||
* Installing on [Container Linux / other distros](Documentation/deployment.md)
|
||||
* Installing on [Kubernetes](Documentation/deployment.md#kubernetes)
|
||||
* Running with [rkt](Documentation/deployment.md#rkt) / [docker](Documentation/deployment.md#docker)
|
||||
* [Network Setup](Documentation/network-setup.md)
|
||||
|
||||
* [bootcfg Service](Documentation/bootcfg.md)
|
||||
* [Profiles](Documentation/bootcfg.md#profiles)
|
||||
* [Groups](Documentation/bootcfg.md#groups-and-metadata)
|
||||
* Config Templates
|
||||
* [Ignition](Documentation/ignition.md)
|
||||
* [Cloud-Config](Documentation/cloud-config.md)
|
||||
* Tutorials (libvirt)
|
||||
* [bootcfg with rkt](Documentation/getting-started-rkt.md)
|
||||
* [bootcfg with Docker](Documentation/getting-started-docker.md)
|
||||
* [Configuration](Documentation/config.md)
|
||||
* [HTTP API](Documentation/api.md)
|
||||
* [gRPC API](https://godoc.org/github.com/coreos/coreos-baremetal/bootcfg/client)
|
||||
* Backends
|
||||
* [FileStore](Documentation/bootcfg.md#data)
|
||||
* Deployment via
|
||||
* [rkt](Documentation/deployment.md#rkt)
|
||||
* [docker](Documentation/deployment.md#docker)
|
||||
* [Kubernetes](Documentation/deployment.md#kubernetes)
|
||||
* [binary](Documentation/deployment.md#binary) / [systemd](Documentation/deployment.md#systemd)
|
||||
* [Troubleshooting](Documentation/troubleshooting.md)
|
||||
* Going Further
|
||||
* [gRPC API Usage](config.md#grpc-api)
|
||||
* [Metadata](api.md#metadata)
|
||||
* OpenPGP [Signing](api.md#openpgp-signatures)
|
||||
### Tutorials
|
||||
|
||||
### Examples
|
||||
* [Getting Started](Documentation/getting-started.md) - provision physical machines with Container Linux
|
||||
* Local QEMU/KVM
|
||||
* [matchbox with Docker](Documentation/getting-started-docker.md)
|
||||
* [matchbox with rkt](Documentation/getting-started-rkt.md)
|
||||
* Clusters
|
||||
* [etcd3](Documentation/getting-started-rkt.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](Documentation/bootkube.md) - Install a 3-node Kubernetes v1.8.5 cluster
|
||||
* Clusters (Terraform-based)
|
||||
* [etcd3](examples/terraform/etcd3-install/README.md) - Install a 3-node etcd3 cluster
|
||||
* [Kubernetes](examples/terraform/bootkube-install/README.md) - Install a 3-node Kubernetes v1.10.3 cluster
|
||||
|
||||
The [examples](examples) network boot and provision CoreOS clusters. Network boot [libvirt](scripts/README.md#libvirt) VMs to try the examples on your Linux laptop.
|
||||
### Projects
|
||||
|
||||
* Multi-node [Kubernetes cluster](Documentation/kubernetes.md) with TLS
|
||||
* Multi-node [self-hosted Kubernetes cluster](Documentation/bootkube.md)
|
||||
* Multi-node etcd cluster
|
||||
* Multi-node [Torus](Documentation/torus.md) distributed storage cluster
|
||||
* Network boot or Install to Disk
|
||||
* Multi-stage CoreOS installs
|
||||
* [GRUB Netboot](Documentation/grub.md) CoreOS
|
||||
* iPXE Boot CoreOS with a root fs
|
||||
* iPXE Boot CoreOS
|
||||
* Lab [examples](https://github.com/dghubble/metal)
|
||||
* [Tectonic](https://coreos.com/tectonic/docs/latest/index.html) - enterprise-ready Kubernetes
|
||||
* [Typhoon](https://typhoon.psdn.io/) - minimal and free Kubernetes
|
||||
|
||||
## Contrib
|
||||
|
||||
* [dnsmasq](contrib/dnsmasq/README.md) - Run DHCP, TFTP, and DNS services with docker or rkt
|
||||
* [squid](contrib/squid/README.md) - Run a transparent cache proxy
|
||||
* [terraform-provider-matchbox](https://github.com/coreos/terraform-provider-matchbox) - Terraform provider plugin for Matchbox
|
||||
|
||||
[docs]: https://coreos.com/matchbox/docs/latest
|
||||
[terraform]: https://github.com/coreos/terraform-provider-matchbox
|
||||
[cl-config]: Documentation/container-linux-config.md
|
||||
[cloud-config]: Documentation/cloud-config.md
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNew_MissingEndpoints(t *testing.T) {
|
||||
cfg := &Config{
|
||||
Endpoints: []string{},
|
||||
}
|
||||
client, err := New(cfg)
|
||||
assert.Nil(t, client)
|
||||
assert.Equal(t, errNoEndpoints, err)
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// ContextHandler defines a handler which receives a passed context.Context
|
||||
// with the standard ResponseWriter and Request.
|
||||
type ContextHandler interface {
|
||||
ServeHTTP(context.Context, http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
// ContextHandlerFunc type is an adapter to allow the use of an ordinary
|
||||
// function as a ContextHandler. If f is a function with the correct
|
||||
// signature, ContextHandlerFunc(f) is a ContextHandler that calls f.
|
||||
type ContextHandlerFunc func(context.Context, http.ResponseWriter, *http.Request)
|
||||
|
||||
// ServeHTTP calls the function f(ctx, w, req).
|
||||
func (f ContextHandlerFunc) ServeHTTP(ctx context.Context, w http.ResponseWriter, req *http.Request) {
|
||||
f(ctx, w, req)
|
||||
}
|
||||
|
||||
// handler wraps a ContextHandler to implement the http.Handler interface for
|
||||
// compatability with ServeMux and middlewares.
|
||||
//
|
||||
// Middleswares which do not pass a ctx break the chain so place them before
|
||||
// or after chains of ContextHandlers.
|
||||
type handler struct {
|
||||
ctx context.Context
|
||||
handler ContextHandler
|
||||
}
|
||||
|
||||
// NewHandler returns an http.Handler which wraps the given ContextHandler
|
||||
// and creates a background context.Context.
|
||||
func NewHandler(h ContextHandler) http.Handler {
|
||||
return &handler{
|
||||
ctx: context.Background(),
|
||||
handler: h,
|
||||
}
|
||||
}
|
||||
|
||||
// ServeHTTP lets handler implement the http.Handler interface.
|
||||
func (h *handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
h.handler.ServeHTTP(h.ctx, w, req)
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestNewHandler(t *testing.T) {
|
||||
fn := func(ctx context.Context, w http.ResponseWriter, req *http.Request) {
|
||||
fmt.Fprintf(w, "ContextHandler called")
|
||||
}
|
||||
h := NewHandler(ContextHandlerFunc(fn))
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "/", nil)
|
||||
h.ServeHTTP(w, req)
|
||||
assert.Equal(t, "ContextHandler called", w.Body.String())
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
// Package http provides the bootcfg HTTP server
|
||||
package http
|
||||
@@ -1,57 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/server"
|
||||
pb "github.com/coreos/coreos-baremetal/bootcfg/server/serverpb"
|
||||
)
|
||||
|
||||
// pixiecoreHandler returns a handler that renders the boot config JSON for
|
||||
// the requester, to implement the Pixiecore API specification.
|
||||
// https://github.com/danderson/pixiecore/blob/master/README.api.md
|
||||
func (s *Server) pixiecoreHandler(core server.Server) ContextHandler {
|
||||
fn := func(ctx context.Context, w http.ResponseWriter, req *http.Request) {
|
||||
// pixiecore only provides a MAC address label
|
||||
macAddr, err := parseMAC(filepath.Base(req.URL.Path))
|
||||
if err != nil {
|
||||
s.logger.Errorf("unparseable MAC address: %v", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
attrs := map[string]string{"mac": macAddr.String()}
|
||||
|
||||
group, err := core.SelectGroup(ctx, &pb.SelectGroupRequest{Labels: attrs})
|
||||
if err != nil {
|
||||
s.logger.WithFields(logrus.Fields{
|
||||
"label": macAddr,
|
||||
}).Infof("No matching group")
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
profile, err := core.ProfileGet(ctx, &pb.ProfileGetRequest{Id: group.Profile})
|
||||
if err != nil {
|
||||
s.logger.WithFields(logrus.Fields{
|
||||
"label": macAddr,
|
||||
"group": group.Id,
|
||||
}).Infof("No profile named: %s", group.Profile)
|
||||
http.NotFound(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
// match was successful
|
||||
s.logger.WithFields(logrus.Fields{
|
||||
"label": macAddr,
|
||||
"group": group.Id,
|
||||
"profile": profile.Id,
|
||||
}).Debug("Matched a Pixiecore config")
|
||||
|
||||
s.renderJSON(w, profile.Boot)
|
||||
}
|
||||
return ContextHandlerFunc(fn)
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
logtest "github.com/Sirupsen/logrus/hooks/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/server"
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/storage/storagepb"
|
||||
fake "github.com/coreos/coreos-baremetal/bootcfg/storage/testfakes"
|
||||
)
|
||||
|
||||
func TestPixiecoreHandler(t *testing.T) {
|
||||
store := &fake.FixedStore{
|
||||
Groups: map[string]*storagepb.Group{testGroupWithMAC.Id: testGroupWithMAC},
|
||||
Profiles: map[string]*storagepb.Profile{testGroupWithMAC.Profile: fake.Profile},
|
||||
}
|
||||
logger, _ := logtest.NewNullLogger()
|
||||
srv := NewServer(&Config{Logger: logger})
|
||||
c := server.NewServer(&server.Config{Store: store})
|
||||
h := srv.pixiecoreHandler(c)
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "/"+validMACStr, nil)
|
||||
h.ServeHTTP(context.Background(), w, req)
|
||||
// assert that:
|
||||
// - MAC address parameter is used for Group matching
|
||||
// - the Profile's NetBoot config is rendered as Pixiecore JSON
|
||||
expectedJSON := `{"kernel":"/image/kernel","initrd":["/image/initrd_a","/image/initrd_b"],"cmdline":{"a":"b","c":""}}`
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
assert.Equal(t, jsonContentType, w.HeaderMap.Get(contentType))
|
||||
assert.Equal(t, expectedJSON, w.Body.String())
|
||||
}
|
||||
|
||||
func TestPixiecoreHandler_InvalidMACAddress(t *testing.T) {
|
||||
logger, _ := logtest.NewNullLogger()
|
||||
srv := NewServer(&Config{Logger: logger})
|
||||
c := server.NewServer(&server.Config{Store: &fake.EmptyStore{}})
|
||||
h := srv.pixiecoreHandler(c)
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "/", nil)
|
||||
h.ServeHTTP(context.Background(), w, req)
|
||||
assert.Equal(t, http.StatusBadRequest, w.Code)
|
||||
assert.Equal(t, "invalid MAC address /\n", w.Body.String())
|
||||
}
|
||||
|
||||
func TestPixiecoreHandler_NoMatchingGroup(t *testing.T) {
|
||||
logger, _ := logtest.NewNullLogger()
|
||||
srv := NewServer(&Config{Logger: logger})
|
||||
c := server.NewServer(&server.Config{Store: &fake.EmptyStore{}})
|
||||
h := srv.pixiecoreHandler(c)
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "/"+validMACStr, nil)
|
||||
h.ServeHTTP(context.Background(), w, req)
|
||||
assert.Equal(t, http.StatusNotFound, w.Code)
|
||||
}
|
||||
|
||||
func TestPixiecoreHandler_NoMatchingProfile(t *testing.T) {
|
||||
store := &fake.FixedStore{
|
||||
Groups: map[string]*storagepb.Group{fake.Group.Id: fake.Group},
|
||||
}
|
||||
logger, _ := logtest.NewNullLogger()
|
||||
srv := NewServer(&Config{Logger: logger})
|
||||
c := server.NewServer(&server.Config{Store: store})
|
||||
h := srv.pixiecoreHandler(c)
|
||||
w := httptest.NewRecorder()
|
||||
req, _ := http.NewRequest("GET", "/"+validMACStr, nil)
|
||||
h.ServeHTTP(context.Background(), w, req)
|
||||
assert.Equal(t, http.StatusNotFound, w.Code)
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
// Package rpc provides the bootcfg gRPC server
|
||||
package rpc
|
||||
@@ -1,25 +0,0 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/rpc/rpcpb"
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/server"
|
||||
pb "github.com/coreos/coreos-baremetal/bootcfg/server/serverpb"
|
||||
)
|
||||
|
||||
// ignitionServer takes a bootcfg Server and implements a gRPC IgnitionServer.
|
||||
type ignitionServer struct {
|
||||
srv server.Server
|
||||
}
|
||||
|
||||
func newIgnitionServer(s server.Server) rpcpb.IgnitionServer {
|
||||
return &ignitionServer{
|
||||
srv: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ignitionServer) IgnitionPut(ctx context.Context, req *pb.IgnitionPutRequest) (*pb.IgnitionPutResponse, error) {
|
||||
_, err := s.srv.IgnitionPut(ctx, req)
|
||||
return &pb.IgnitionPutResponse{}, grpcError(err)
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
// Package server is a bootcfg library package for implementing servers.
|
||||
package server
|
||||
@@ -1,2 +0,0 @@
|
||||
// Package serverpb provides bootcfg message types.
|
||||
package serverpb
|
||||
@@ -1,326 +0,0 @@
|
||||
// Code generated by protoc-gen-go.
|
||||
// source: messages.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package serverpb is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
messages.proto
|
||||
|
||||
It has these top-level messages:
|
||||
SelectGroupRequest
|
||||
SelectGroupResponse
|
||||
SelectProfileRequest
|
||||
SelectProfileResponse
|
||||
GroupPutRequest
|
||||
GroupPutResponse
|
||||
GroupGetRequest
|
||||
GroupListRequest
|
||||
GroupGetResponse
|
||||
GroupListResponse
|
||||
ProfilePutRequest
|
||||
ProfilePutResponse
|
||||
ProfileGetRequest
|
||||
ProfileGetResponse
|
||||
ProfileListRequest
|
||||
ProfileListResponse
|
||||
IgnitionPutRequest
|
||||
IgnitionPutResponse
|
||||
*/
|
||||
package serverpb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import storagepb "github.com/coreos/coreos-baremetal/bootcfg/storage/storagepb"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.ProtoPackageIsVersion1
|
||||
|
||||
type SelectGroupRequest struct {
|
||||
Labels map[string]string `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
}
|
||||
|
||||
func (m *SelectGroupRequest) Reset() { *m = SelectGroupRequest{} }
|
||||
func (m *SelectGroupRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SelectGroupRequest) ProtoMessage() {}
|
||||
func (*SelectGroupRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (m *SelectGroupRequest) GetLabels() map[string]string {
|
||||
if m != nil {
|
||||
return m.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SelectGroupResponse struct {
|
||||
Group *storagepb.Group `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SelectGroupResponse) Reset() { *m = SelectGroupResponse{} }
|
||||
func (m *SelectGroupResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SelectGroupResponse) ProtoMessage() {}
|
||||
func (*SelectGroupResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
|
||||
func (m *SelectGroupResponse) GetGroup() *storagepb.Group {
|
||||
if m != nil {
|
||||
return m.Group
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SelectProfileRequest struct {
|
||||
Labels map[string]string `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
}
|
||||
|
||||
func (m *SelectProfileRequest) Reset() { *m = SelectProfileRequest{} }
|
||||
func (m *SelectProfileRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SelectProfileRequest) ProtoMessage() {}
|
||||
func (*SelectProfileRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
|
||||
func (m *SelectProfileRequest) GetLabels() map[string]string {
|
||||
if m != nil {
|
||||
return m.Labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SelectProfileResponse struct {
|
||||
Profile *storagepb.Profile `protobuf:"bytes,1,opt,name=profile" json:"profile,omitempty"`
|
||||
}
|
||||
|
||||
func (m *SelectProfileResponse) Reset() { *m = SelectProfileResponse{} }
|
||||
func (m *SelectProfileResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SelectProfileResponse) ProtoMessage() {}
|
||||
func (*SelectProfileResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
|
||||
|
||||
func (m *SelectProfileResponse) GetProfile() *storagepb.Profile {
|
||||
if m != nil {
|
||||
return m.Profile
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GroupPutRequest struct {
|
||||
Group *storagepb.Group `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GroupPutRequest) Reset() { *m = GroupPutRequest{} }
|
||||
func (m *GroupPutRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GroupPutRequest) ProtoMessage() {}
|
||||
func (*GroupPutRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
|
||||
|
||||
func (m *GroupPutRequest) GetGroup() *storagepb.Group {
|
||||
if m != nil {
|
||||
return m.Group
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GroupPutResponse struct {
|
||||
}
|
||||
|
||||
func (m *GroupPutResponse) Reset() { *m = GroupPutResponse{} }
|
||||
func (m *GroupPutResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GroupPutResponse) ProtoMessage() {}
|
||||
func (*GroupPutResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
|
||||
|
||||
type GroupGetRequest struct {
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GroupGetRequest) Reset() { *m = GroupGetRequest{} }
|
||||
func (m *GroupGetRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GroupGetRequest) ProtoMessage() {}
|
||||
func (*GroupGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
|
||||
|
||||
type GroupListRequest struct {
|
||||
}
|
||||
|
||||
func (m *GroupListRequest) Reset() { *m = GroupListRequest{} }
|
||||
func (m *GroupListRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GroupListRequest) ProtoMessage() {}
|
||||
func (*GroupListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
|
||||
|
||||
type GroupGetResponse struct {
|
||||
Group *storagepb.Group `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GroupGetResponse) Reset() { *m = GroupGetResponse{} }
|
||||
func (m *GroupGetResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GroupGetResponse) ProtoMessage() {}
|
||||
func (*GroupGetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
|
||||
|
||||
func (m *GroupGetResponse) GetGroup() *storagepb.Group {
|
||||
if m != nil {
|
||||
return m.Group
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GroupListResponse struct {
|
||||
Groups []*storagepb.Group `protobuf:"bytes,1,rep,name=groups" json:"groups,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GroupListResponse) Reset() { *m = GroupListResponse{} }
|
||||
func (m *GroupListResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GroupListResponse) ProtoMessage() {}
|
||||
func (*GroupListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
|
||||
|
||||
func (m *GroupListResponse) GetGroups() []*storagepb.Group {
|
||||
if m != nil {
|
||||
return m.Groups
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ProfilePutRequest struct {
|
||||
Profile *storagepb.Profile `protobuf:"bytes,1,opt,name=profile" json:"profile,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProfilePutRequest) Reset() { *m = ProfilePutRequest{} }
|
||||
func (m *ProfilePutRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProfilePutRequest) ProtoMessage() {}
|
||||
func (*ProfilePutRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
|
||||
|
||||
func (m *ProfilePutRequest) GetProfile() *storagepb.Profile {
|
||||
if m != nil {
|
||||
return m.Profile
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ProfilePutResponse struct {
|
||||
}
|
||||
|
||||
func (m *ProfilePutResponse) Reset() { *m = ProfilePutResponse{} }
|
||||
func (m *ProfilePutResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProfilePutResponse) ProtoMessage() {}
|
||||
func (*ProfilePutResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
|
||||
|
||||
type ProfileGetRequest struct {
|
||||
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProfileGetRequest) Reset() { *m = ProfileGetRequest{} }
|
||||
func (m *ProfileGetRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProfileGetRequest) ProtoMessage() {}
|
||||
func (*ProfileGetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
|
||||
|
||||
type ProfileGetResponse struct {
|
||||
Profile *storagepb.Profile `protobuf:"bytes,1,opt,name=profile" json:"profile,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProfileGetResponse) Reset() { *m = ProfileGetResponse{} }
|
||||
func (m *ProfileGetResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProfileGetResponse) ProtoMessage() {}
|
||||
func (*ProfileGetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
|
||||
|
||||
func (m *ProfileGetResponse) GetProfile() *storagepb.Profile {
|
||||
if m != nil {
|
||||
return m.Profile
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ProfileListRequest struct {
|
||||
}
|
||||
|
||||
func (m *ProfileListRequest) Reset() { *m = ProfileListRequest{} }
|
||||
func (m *ProfileListRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProfileListRequest) ProtoMessage() {}
|
||||
func (*ProfileListRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
|
||||
|
||||
type ProfileListResponse struct {
|
||||
Profiles []*storagepb.Profile `protobuf:"bytes,1,rep,name=profiles" json:"profiles,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ProfileListResponse) Reset() { *m = ProfileListResponse{} }
|
||||
func (m *ProfileListResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ProfileListResponse) ProtoMessage() {}
|
||||
func (*ProfileListResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
|
||||
|
||||
func (m *ProfileListResponse) GetProfiles() []*storagepb.Profile {
|
||||
if m != nil {
|
||||
return m.Profiles
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type IgnitionPutRequest struct {
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Config []byte `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
|
||||
}
|
||||
|
||||
func (m *IgnitionPutRequest) Reset() { *m = IgnitionPutRequest{} }
|
||||
func (m *IgnitionPutRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*IgnitionPutRequest) ProtoMessage() {}
|
||||
func (*IgnitionPutRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
|
||||
|
||||
type IgnitionPutResponse struct {
|
||||
}
|
||||
|
||||
func (m *IgnitionPutResponse) Reset() { *m = IgnitionPutResponse{} }
|
||||
func (m *IgnitionPutResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*IgnitionPutResponse) ProtoMessage() {}
|
||||
func (*IgnitionPutResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SelectGroupRequest)(nil), "serverpb.SelectGroupRequest")
|
||||
proto.RegisterType((*SelectGroupResponse)(nil), "serverpb.SelectGroupResponse")
|
||||
proto.RegisterType((*SelectProfileRequest)(nil), "serverpb.SelectProfileRequest")
|
||||
proto.RegisterType((*SelectProfileResponse)(nil), "serverpb.SelectProfileResponse")
|
||||
proto.RegisterType((*GroupPutRequest)(nil), "serverpb.GroupPutRequest")
|
||||
proto.RegisterType((*GroupPutResponse)(nil), "serverpb.GroupPutResponse")
|
||||
proto.RegisterType((*GroupGetRequest)(nil), "serverpb.GroupGetRequest")
|
||||
proto.RegisterType((*GroupListRequest)(nil), "serverpb.GroupListRequest")
|
||||
proto.RegisterType((*GroupGetResponse)(nil), "serverpb.GroupGetResponse")
|
||||
proto.RegisterType((*GroupListResponse)(nil), "serverpb.GroupListResponse")
|
||||
proto.RegisterType((*ProfilePutRequest)(nil), "serverpb.ProfilePutRequest")
|
||||
proto.RegisterType((*ProfilePutResponse)(nil), "serverpb.ProfilePutResponse")
|
||||
proto.RegisterType((*ProfileGetRequest)(nil), "serverpb.ProfileGetRequest")
|
||||
proto.RegisterType((*ProfileGetResponse)(nil), "serverpb.ProfileGetResponse")
|
||||
proto.RegisterType((*ProfileListRequest)(nil), "serverpb.ProfileListRequest")
|
||||
proto.RegisterType((*ProfileListResponse)(nil), "serverpb.ProfileListResponse")
|
||||
proto.RegisterType((*IgnitionPutRequest)(nil), "serverpb.IgnitionPutRequest")
|
||||
proto.RegisterType((*IgnitionPutResponse)(nil), "serverpb.IgnitionPutResponse")
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 441 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x54, 0x5d, 0x8b, 0xd3, 0x40,
|
||||
0x14, 0x25, 0x5d, 0x37, 0xae, 0xb7, 0xb2, 0x76, 0xa7, 0x5d, 0x59, 0xf6, 0x49, 0x47, 0x90, 0x20,
|
||||
0x3a, 0x85, 0xf5, 0xc5, 0x5d, 0x58, 0x58, 0x17, 0xca, 0xa2, 0xec, 0x43, 0x89, 0xbf, 0x20, 0x89,
|
||||
0xb7, 0x31, 0x98, 0x64, 0xe2, 0xcc, 0xa4, 0xd0, 0x9f, 0xe1, 0x83, 0xff, 0xd7, 0x76, 0x3e, 0xe2,
|
||||
0xa4, 0x15, 0xb1, 0xe2, 0xd3, 0xdc, 0xb9, 0xf7, 0x9c, 0x73, 0x7b, 0xce, 0x94, 0xc0, 0x71, 0x85,
|
||||
0x52, 0x26, 0x39, 0x4a, 0xd6, 0x08, 0xae, 0x38, 0x39, 0x92, 0x28, 0x96, 0x28, 0x9a, 0xf4, 0xfc,
|
||||
0x63, 0x5e, 0xa8, 0x2f, 0x6d, 0xca, 0x32, 0x5e, 0x4d, 0x33, 0x2e, 0x90, 0x4b, 0x7b, 0xbc, 0x49,
|
||||
0x13, 0x81, 0x15, 0xaa, 0xa4, 0x9c, 0xa6, 0x9c, 0xab, 0x6c, 0x91, 0x4f, 0xa5, 0xe2, 0x62, 0x2d,
|
||||
0xe2, 0xce, 0x26, 0x75, 0x95, 0x51, 0xa5, 0xdf, 0x03, 0x20, 0x9f, 0xb0, 0xc4, 0x4c, 0xdd, 0x09,
|
||||
0xde, 0x36, 0x31, 0x7e, 0x6b, 0x51, 0x2a, 0x72, 0x03, 0x61, 0x99, 0xa4, 0x58, 0xca, 0xb3, 0xe0,
|
||||
0xd9, 0x41, 0x34, 0xbc, 0x88, 0x98, 0xdb, 0xce, 0x76, 0xd1, 0xec, 0x5e, 0x43, 0x67, 0xb5, 0x12,
|
||||
0xab, 0xd8, 0xf2, 0xce, 0x2f, 0x61, 0xe8, 0xb5, 0xc9, 0x08, 0x0e, 0xbe, 0xe2, 0x6a, 0xad, 0x16,
|
||||
0x44, 0x8f, 0xe2, 0x4d, 0x49, 0x26, 0x70, 0xb8, 0x4c, 0xca, 0x16, 0xcf, 0x06, 0xba, 0x67, 0x2e,
|
||||
0x57, 0x83, 0x77, 0x01, 0xbd, 0x86, 0x71, 0x6f, 0x89, 0x6c, 0x78, 0x2d, 0x91, 0xbc, 0x84, 0xc3,
|
||||
0x7c, 0xd3, 0xd0, 0x22, 0xc3, 0x8b, 0x11, 0xeb, 0x3c, 0x31, 0x03, 0x34, 0x63, 0xfa, 0x23, 0x80,
|
||||
0x89, 0xe1, 0xcf, 0x05, 0x5f, 0x14, 0x25, 0x3a, 0x53, 0xb7, 0x5b, 0xa6, 0x5e, 0x6d, 0x9b, 0xea,
|
||||
0xe3, 0xff, 0xb7, 0xad, 0x19, 0x9c, 0x6e, 0xad, 0xb1, 0xc6, 0x5e, 0xc3, 0xc3, 0xc6, 0xb4, 0xac,
|
||||
0x35, 0xe2, 0x59, 0x73, 0x60, 0x07, 0xa1, 0x97, 0xf0, 0x44, 0xdb, 0x9d, 0xb7, 0xca, 0x19, 0xfb,
|
||||
0xdb, 0x64, 0x08, 0x8c, 0x7e, 0x51, 0xcd, 0x72, 0xfa, 0xdc, 0xca, 0xdd, 0x61, 0x27, 0x77, 0x0c,
|
||||
0x83, 0xe2, 0xb3, 0xf5, 0xb4, 0xae, 0x3a, 0xda, 0x7d, 0x21, 0x1d, 0x86, 0x5e, 0xd9, 0x9e, 0xa6,
|
||||
0xed, 0xf9, 0x40, 0xd7, 0x70, 0xe2, 0xe9, 0x59, 0x72, 0x04, 0xa1, 0x9e, 0xba, 0xc7, 0xd9, 0x65,
|
||||
0xdb, 0x39, 0x7d, 0x0f, 0x27, 0x36, 0x14, 0x2f, 0x82, 0xfd, 0x32, 0x9c, 0x00, 0xf1, 0x25, 0x6c,
|
||||
0x14, 0x2f, 0x3a, 0xe1, 0x3f, 0x84, 0x71, 0xdb, 0x51, 0x7d, 0xeb, 0xff, 0xba, 0xde, 0x8f, 0x74,
|
||||
0x06, 0xe3, 0x5e, 0xd7, 0x4a, 0x33, 0x38, 0xb2, 0x3c, 0x17, 0xcd, 0xef, 0xb4, 0x3b, 0x0c, 0xbd,
|
||||
0x01, 0xf2, 0x21, 0xaf, 0x0b, 0x55, 0xf0, 0xda, 0xcb, 0x87, 0xc0, 0x83, 0x3a, 0xa9, 0xd0, 0x1a,
|
||||
0xd1, 0x35, 0x79, 0x0a, 0x61, 0xc6, 0xeb, 0x45, 0x91, 0xeb, 0xff, 0xea, 0xe3, 0xd8, 0xde, 0xe8,
|
||||
0x29, 0x8c, 0x7b, 0x0a, 0xe6, 0x87, 0xa4, 0xa1, 0xfe, 0x62, 0xbc, 0xfd, 0x19, 0x00, 0x00, 0xff,
|
||||
0xff, 0x19, 0xef, 0xe3, 0x5b, 0x99, 0x04, 0x00, 0x00,
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
// Package sign adds signatures to bootcfg responses.
|
||||
package sign
|
||||
@@ -1,2 +0,0 @@
|
||||
// Package storage defines bootcfg's storage and object types.
|
||||
package storage
|
||||
@@ -1,72 +0,0 @@
|
||||
package storagepb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
testProfile = &Profile{
|
||||
Id: "id",
|
||||
CloudId: "cloud.yaml",
|
||||
IgnitionId: "ignition.json",
|
||||
}
|
||||
)
|
||||
|
||||
func TestProfileParse(t *testing.T) {
|
||||
cases := []struct {
|
||||
json string
|
||||
profile *Profile
|
||||
}{
|
||||
{`{"id": "id", "cloud_id": "cloud.yaml", "ignition_id": "ignition.json"}`, testProfile},
|
||||
}
|
||||
for _, c := range cases {
|
||||
profile, _ := ParseProfile([]byte(c.json))
|
||||
assert.Equal(t, c.profile, profile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProfileValidate(t *testing.T) {
|
||||
cases := []struct {
|
||||
profile *Profile
|
||||
valid bool
|
||||
}{
|
||||
{testProfile, true},
|
||||
{&Profile{Id: "a1b2c3d4"}, true},
|
||||
{&Profile{}, false},
|
||||
}
|
||||
for _, c := range cases {
|
||||
valid := c.profile.AssertValid() == nil
|
||||
assert.Equal(t, c.valid, valid)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProfileCopy(t *testing.T) {
|
||||
profile := &Profile{
|
||||
Id: "id",
|
||||
CloudId: "cloudy.tmpl",
|
||||
IgnitionId: "ignition.tmpl",
|
||||
Boot: &NetBoot{
|
||||
Kernel: "/image/kernel",
|
||||
Initrd: []string{"/image/initrd_a"},
|
||||
Cmdline: map[string]string{"a": "b"},
|
||||
},
|
||||
}
|
||||
copy := profile.Copy()
|
||||
// assert that:
|
||||
// - Profile fields are copied
|
||||
// - mutation of the copy does not affect the original
|
||||
assert.Equal(t, profile.Id, copy.Id)
|
||||
assert.Equal(t, profile.Name, copy.Name)
|
||||
assert.Equal(t, profile.IgnitionId, copy.IgnitionId)
|
||||
assert.Equal(t, profile.CloudId, copy.CloudId)
|
||||
assert.Equal(t, profile.Boot, copy.Boot)
|
||||
|
||||
copy.Id = "a-copy"
|
||||
copy.Boot.Initrd = []string{"/image/initrd_b"}
|
||||
copy.Boot.Cmdline["c"] = "d"
|
||||
assert.NotEqual(t, profile.Id, copy.Id)
|
||||
assert.NotEqual(t, profile.Boot.Initrd, copy.Boot.Initrd)
|
||||
assert.NotEqual(t, profile.Boot.Cmdline, copy.Boot.Cmdline)
|
||||
}
|
||||
7
build
7
build
@@ -1,7 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
LD_FLAGS="-w -X github.com/coreos/coreos-baremetal/bootcfg/version.Version=$(./git-version)"
|
||||
CGO_ENABLED=0 go build -o bin/bootcfg -ldflags "$LD_FLAGS" -a github.com/coreos/coreos-baremetal/cmd/bootcfg
|
||||
|
||||
# bootcmd CLI binary
|
||||
CGO_ENABLED=0 go build -o bin/bootcmd -ldflags "$LD_FLAGS" -a github.com/coreos/coreos-baremetal/cmd/bootcmd
|
||||
31
build-aci
31
build-aci
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
GIT_SHA=$(./git-version)
|
||||
|
||||
# Start with an empty ACI
|
||||
acbuild --debug begin
|
||||
|
||||
# In the event of the script exiting, end the build
|
||||
trap "{ export EXT=$?; acbuild --debug end && exit $EXT; }" EXIT
|
||||
|
||||
# Name the ACI
|
||||
acbuild --debug set-name coreos.com/bootcfg
|
||||
|
||||
# Add a version label
|
||||
acbuild --debug label add version $GIT_SHA
|
||||
|
||||
# Add alpine base dependency
|
||||
acbuild --debug dep add quay.io/coreos/alpine-sh
|
||||
|
||||
# Copy the static binary
|
||||
acbuild --debug copy bin/bootcfg /bootcfg
|
||||
|
||||
# Add a port for HTTP traffic
|
||||
acbuild --debug port add www tcp 8080
|
||||
|
||||
# Set the exec command
|
||||
acbuild --debug set-exec -- /bootcfg
|
||||
|
||||
# Save and overwrite any older bootcfg ACI
|
||||
acbuild --debug write --overwrite bootcfg.aci
|
||||
@@ -1,7 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
REPO=coreos/bootcfg
|
||||
GIT_SHA=$(./git-version)
|
||||
|
||||
docker build -q --rm=true -t $REPO:$GIT_SHA .
|
||||
docker tag $REPO:$GIT_SHA $REPO:latest
|
||||
@@ -1,171 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/coreos/pkg/flagutil"
|
||||
|
||||
web "github.com/coreos/coreos-baremetal/bootcfg/http"
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/rpc"
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/server"
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/sign"
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/storage"
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/tlsutil"
|
||||
"github.com/coreos/coreos-baremetal/bootcfg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
// Defaults to info logging
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flags := struct {
|
||||
address string
|
||||
rpcAddress string
|
||||
dataPath string
|
||||
assetsPath string
|
||||
logLevel string
|
||||
certFile string
|
||||
keyFile string
|
||||
caFile string
|
||||
keyRingPath string
|
||||
version bool
|
||||
help bool
|
||||
}{}
|
||||
flag.StringVar(&flags.address, "address", "127.0.0.1:8080", "HTTP listen address")
|
||||
flag.StringVar(&flags.rpcAddress, "rpc-address", "", "RPC listen address")
|
||||
flag.StringVar(&flags.dataPath, "data-path", "/var/lib/bootcfg", "Path to data directory")
|
||||
flag.StringVar(&flags.assetsPath, "assets-path", "/var/lib/bootcfg/assets", "Path to static assets")
|
||||
|
||||
// Log levels https://github.com/Sirupsen/logrus/blob/master/logrus.go#L36
|
||||
flag.StringVar(&flags.logLevel, "log-level", "info", "Set the logging level")
|
||||
|
||||
// gRPC Server TLS
|
||||
flag.StringVar(&flags.certFile, "cert-file", "/etc/bootcfg/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.keyFile, "key-file", "/etc/bootcfg/server.key", "Path to the server TLS key file")
|
||||
// TLS Client Authentication
|
||||
flag.StringVar(&flags.caFile, "ca-file", "/etc/bootcfg/ca.crt", "Path to the CA verify and authenticate client certificates")
|
||||
|
||||
// Signing
|
||||
flag.StringVar(&flags.keyRingPath, "key-ring-path", "", "Path to a private keyring file")
|
||||
|
||||
// subcommands
|
||||
flag.BoolVar(&flags.version, "version", false, "print version and exit")
|
||||
flag.BoolVar(&flags.help, "help", false, "print usage and exit")
|
||||
|
||||
// parse command-line and environment variable arguments
|
||||
flag.Parse()
|
||||
if err := flagutil.SetFlagsFromEnv(flag.CommandLine, "BOOTCFG"); err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
// restrict OpenPGP passphrase to pass via environment variable only
|
||||
passphrase := os.Getenv("BOOTCFG_PASSPHRASE")
|
||||
|
||||
if flags.version {
|
||||
fmt.Println(version.Version)
|
||||
return
|
||||
}
|
||||
|
||||
if flags.help {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
// validate arguments
|
||||
if url, err := url.Parse(flags.address); err != nil || url.String() == "" {
|
||||
log.Fatal("A valid HTTP listen address is required")
|
||||
}
|
||||
if finfo, err := os.Stat(flags.dataPath); err != nil || !finfo.IsDir() {
|
||||
log.Fatal("A valid -data-path is required")
|
||||
}
|
||||
if flags.assetsPath != "" {
|
||||
if finfo, err := os.Stat(flags.assetsPath); err != nil || !finfo.IsDir() {
|
||||
log.Fatalf("Provide a valid -assets-path or '' to disable asset serving: %s", flags.assetsPath)
|
||||
}
|
||||
}
|
||||
if flags.rpcAddress != "" {
|
||||
if _, err := os.Stat(flags.certFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS server certificate with -cert-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.keyFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS server key with -key-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.caFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS certificate authority for authorizing client certificates: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// logging setup
|
||||
lvl, err := logrus.ParseLevel(flags.logLevel)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid log-level: %v", err)
|
||||
}
|
||||
log.Level = lvl
|
||||
|
||||
// (optional) signing
|
||||
var signer, armoredSigner sign.Signer
|
||||
if flags.keyRingPath != "" {
|
||||
entity, err := sign.LoadGPGEntity(flags.keyRingPath, passphrase)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
signer = sign.NewGPGSigner(entity)
|
||||
armoredSigner = sign.NewArmoredGPGSigner(entity)
|
||||
}
|
||||
|
||||
// storage
|
||||
store := storage.NewFileStore(&storage.Config{
|
||||
Root: flags.dataPath,
|
||||
})
|
||||
|
||||
// core logic
|
||||
server := server.NewServer(&server.Config{
|
||||
Store: store,
|
||||
})
|
||||
|
||||
// gRPC Server (feature disabled by default)
|
||||
if flags.rpcAddress != "" {
|
||||
log.Infof("Starting bootcfg gRPC server on %s", flags.rpcAddress)
|
||||
log.Infof("Using TLS server certificate: %s", flags.certFile)
|
||||
log.Infof("Using TLS server key: %s", flags.keyFile)
|
||||
log.Infof("Using CA certificate: %s to authenticate client certificates", flags.caFile)
|
||||
lis, err := net.Listen("tcp", flags.rpcAddress)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
tlsinfo := tlsutil.TLSInfo{
|
||||
CertFile: flags.certFile,
|
||||
KeyFile: flags.keyFile,
|
||||
CAFile: flags.caFile,
|
||||
}
|
||||
tlscfg, err := tlsinfo.ServerConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid TLS credentials: %v", err)
|
||||
}
|
||||
grpcServer := rpc.NewServer(server, tlscfg)
|
||||
go grpcServer.Serve(lis)
|
||||
defer grpcServer.Stop()
|
||||
}
|
||||
|
||||
// HTTP Server
|
||||
config := &web.Config{
|
||||
Core: server,
|
||||
Logger: log,
|
||||
AssetsPath: flags.assetsPath,
|
||||
Signer: signer,
|
||||
ArmoredSigner: armoredSigner,
|
||||
}
|
||||
httpServer := web.NewServer(config)
|
||||
log.Infof("Starting bootcfg HTTP server on %s", flags.address)
|
||||
err = http.ListenAndServe(flags.address, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
package main
|
||||
|
||||
import "github.com/coreos/coreos-baremetal/bootcfg/cli"
|
||||
import "github.com/coreos/matchbox/matchbox/cli"
|
||||
|
||||
func main() {
|
||||
cli.Execute()
|
||||
|
||||
197
cmd/matchbox/main.go
Normal file
197
cmd/matchbox/main.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
web "github.com/coreos/matchbox/matchbox/http"
|
||||
"github.com/coreos/matchbox/matchbox/rpc"
|
||||
"github.com/coreos/matchbox/matchbox/server"
|
||||
"github.com/coreos/matchbox/matchbox/sign"
|
||||
"github.com/coreos/matchbox/matchbox/storage"
|
||||
"github.com/coreos/matchbox/matchbox/tlsutil"
|
||||
"github.com/coreos/matchbox/matchbox/version"
|
||||
"github.com/coreos/pkg/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// Defaults to info logging
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flags := struct {
|
||||
address string
|
||||
rpcAddress string
|
||||
dataPath string
|
||||
assetsPath string
|
||||
logLevel string
|
||||
grpcCAFile string
|
||||
grpcCertFile string
|
||||
grpcKeyFile string
|
||||
tlsCertFile string
|
||||
tlsKeyFile string
|
||||
tlsEnabled bool
|
||||
keyRingPath string
|
||||
version bool
|
||||
help bool
|
||||
}{}
|
||||
flag.StringVar(&flags.address, "address", "127.0.0.1:8080", "HTTP listen address")
|
||||
flag.StringVar(&flags.rpcAddress, "rpc-address", "", "RPC listen address")
|
||||
flag.StringVar(&flags.dataPath, "data-path", "/var/lib/matchbox", "Path to data directory")
|
||||
flag.StringVar(&flags.assetsPath, "assets-path", "/var/lib/matchbox/assets", "Path to static assets")
|
||||
|
||||
// Log levels https://github.com/Sirupsen/logrus/blob/master/logrus.go#L36
|
||||
flag.StringVar(&flags.logLevel, "log-level", "info", "Set the logging level")
|
||||
|
||||
// gRPC Server TLS
|
||||
flag.StringVar(&flags.grpcCertFile, "cert-file", "/etc/matchbox/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.grpcKeyFile, "key-file", "/etc/matchbox/server.key", "Path to the server TLS key file")
|
||||
|
||||
// gRPC TLS Client Authentication
|
||||
flag.StringVar(&flags.grpcCAFile, "ca-file", "/etc/matchbox/ca.crt", "Path to the CA verify and authenticate client certificates")
|
||||
|
||||
// Signing
|
||||
flag.StringVar(&flags.keyRingPath, "key-ring-path", "", "Path to a private keyring file")
|
||||
|
||||
// SSL flags
|
||||
flag.StringVar(&flags.tlsCertFile, "web-cert-file", "/etc/matchbox/ssl/server.crt", "Path to the server TLS certificate file")
|
||||
flag.StringVar(&flags.tlsKeyFile, "web-key-file", "/etc/matchbox/ssl/server.key", "Path to the server TLS key file")
|
||||
flag.BoolVar(&flags.tlsEnabled, "web-ssl", false, "True to enable HTTPS")
|
||||
|
||||
// subcommands
|
||||
flag.BoolVar(&flags.version, "version", false, "print version and exit")
|
||||
flag.BoolVar(&flags.help, "help", false, "print usage and exit")
|
||||
|
||||
// parse command-line and environment variable arguments
|
||||
flag.Parse()
|
||||
if err := flagutil.SetFlagsFromEnv(flag.CommandLine, "MATCHBOX"); err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
// restrict OpenPGP passphrase to pass via environment variable only
|
||||
passphrase := os.Getenv("MATCHBOX_PASSPHRASE")
|
||||
|
||||
if flags.version {
|
||||
fmt.Println(version.Version)
|
||||
return
|
||||
}
|
||||
|
||||
if flags.help {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
// validate arguments
|
||||
if finfo, err := os.Stat(flags.dataPath); err != nil || !finfo.IsDir() {
|
||||
log.Fatal("A valid -data-path is required")
|
||||
}
|
||||
if flags.assetsPath != "" {
|
||||
if finfo, err := os.Stat(flags.assetsPath); err != nil || !finfo.IsDir() {
|
||||
log.Fatalf("Provide a valid -assets-path or '' to disable asset serving: %s", flags.assetsPath)
|
||||
}
|
||||
}
|
||||
if flags.rpcAddress != "" {
|
||||
if _, err := os.Stat(flags.grpcCertFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS server certificate with -cert-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.grpcKeyFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS server key with -key-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.grpcCAFile); err != nil {
|
||||
log.Fatalf("Provide a valid TLS certificate authority for authorizing client certificates: %v", err)
|
||||
}
|
||||
}
|
||||
if flags.tlsEnabled {
|
||||
if _, err := os.Stat(flags.tlsCertFile); err != nil {
|
||||
log.Fatalf("Provide a valid SSL server certificate with -web-cert-file: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(flags.tlsKeyFile); err != nil {
|
||||
log.Fatalf("Provide a valid SSL server key with -web-key-file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// logging setup
|
||||
lvl, err := logrus.ParseLevel(flags.logLevel)
|
||||
if err != nil {
|
||||
log.Fatalf("invalid log-level: %v", err)
|
||||
}
|
||||
log.Level = lvl
|
||||
|
||||
// (optional) signing
|
||||
var signer, armoredSigner sign.Signer
|
||||
if flags.keyRingPath != "" {
|
||||
entity, err := sign.LoadGPGEntity(flags.keyRingPath, passphrase)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
signer = sign.NewGPGSigner(entity)
|
||||
armoredSigner = sign.NewArmoredGPGSigner(entity)
|
||||
}
|
||||
|
||||
// storage
|
||||
store := storage.NewFileStore(&storage.Config{
|
||||
Root: flags.dataPath,
|
||||
Logger: log,
|
||||
})
|
||||
|
||||
// core logic
|
||||
server := server.NewServer(&server.Config{
|
||||
Store: store,
|
||||
})
|
||||
|
||||
// gRPC Server (feature disabled by default)
|
||||
if flags.rpcAddress != "" {
|
||||
log.Infof("Starting matchbox gRPC server on %s", flags.rpcAddress)
|
||||
log.Infof("Using TLS server certificate: %s", flags.grpcCertFile)
|
||||
log.Infof("Using TLS server key: %s", flags.grpcKeyFile)
|
||||
log.Infof("Using CA certificate: %s to authenticate client certificates", flags.grpcCAFile)
|
||||
lis, err := net.Listen("tcp", flags.rpcAddress)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
tlsinfo := tlsutil.TLSInfo{
|
||||
CertFile: flags.grpcCertFile,
|
||||
KeyFile: flags.grpcKeyFile,
|
||||
CAFile: flags.grpcCAFile,
|
||||
}
|
||||
tlscfg, err := tlsinfo.ServerConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid TLS credentials: %v", err)
|
||||
}
|
||||
grpcServer := rpc.NewServer(server, tlscfg)
|
||||
go grpcServer.Serve(lis)
|
||||
defer grpcServer.Stop()
|
||||
}
|
||||
|
||||
config := &web.Config{
|
||||
Core: server,
|
||||
Logger: log,
|
||||
AssetsPath: flags.assetsPath,
|
||||
Signer: signer,
|
||||
ArmoredSigner: armoredSigner,
|
||||
}
|
||||
httpServer := web.NewServer(config)
|
||||
|
||||
if flags.tlsEnabled {
|
||||
// HTTPS Server
|
||||
log.Infof("Starting matchbox HTTPS server on %s", flags.address)
|
||||
log.Infof("Using SSL server certificate: %s", flags.tlsCertFile)
|
||||
log.Infof("Using SSL server key: %s", flags.tlsKeyFile)
|
||||
err = http.ListenAndServeTLS(flags.address, flags.tlsCertFile, flags.tlsKeyFile, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
} else {
|
||||
// HTTP Server
|
||||
log.Infof("Starting matchbox HTTP server on %s", flags.address)
|
||||
err = http.ListenAndServe(flags.address, httpServer.HTTPHandler())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start listening: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
61
code-of-conduct.md
Normal file
61
code-of-conduct.md
Normal file
@@ -0,0 +1,61 @@
|
||||
## CoreOS Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of
|
||||
fostering an open and welcoming community, we pledge to respect all people who
|
||||
contribute through reporting issues, posting feature requests, updating
|
||||
documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free
|
||||
experience for everyone, regardless of level of experience, gender, gender
|
||||
identity and expression, sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as physical or electronic addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct. By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently applying these
|
||||
principles to every aspect of managing this project. Project maintainers who do
|
||||
not follow or enforce the Code of Conduct may be permanently removed from the
|
||||
project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting a project maintainer, Brandon Philips
|
||||
<brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### CoreOS Events Code of Conduct
|
||||
|
||||
CoreOS events are working conferences intended for professional networking and
|
||||
collaboration in the CoreOS community. Attendees are expected to behave
|
||||
according to professional standards and in accordance with their employer’s
|
||||
policies on appropriate workplace behavior.
|
||||
|
||||
While at CoreOS events or related social networking opportunities, attendees
|
||||
should not engage in discriminatory or offensive speech or actions including
|
||||
but not limited to gender, sexuality, race, age, disability, or religion.
|
||||
Speakers should be especially aware of these concerns.
|
||||
|
||||
CoreOS does not condone any statements by speakers contrary to these standards.
|
||||
CoreOS reserves the right to deny entrance and/or eject from an event (without
|
||||
refund) any individual found to be engaging in discriminatory or offensive
|
||||
speech or actions.
|
||||
|
||||
Please bring any concerns to the immediate attention of designated on-site
|
||||
staff, Brandon Philips <brandon.philips@coreos.com>, and/or Rithu John <rithu.john@coreos.com>.
|
||||
18
contrib/dnsmasq/CHANGES.md
Normal file
18
contrib/dnsmasq/CHANGES.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# dnsmasq
|
||||
|
||||
Notable changes image releases. The dnsmasq project [upstream](http://www.thekelleys.org.uk/dnsmasq/doc.html) has its own [changelog](http://www.thekelleys.org.uk/dnsmasq/CHANGELOG).
|
||||
|
||||
## v0.4.1
|
||||
|
||||
* Rebuild with alpine:3.6 base image
|
||||
* Add EXPOSE ports 67 and 69 to Dockerfile
|
||||
|
||||
## v0.4.0
|
||||
|
||||
* `dnsmasq` package version 2.76
|
||||
* Rebuild with alpine:3.5 base image to receive patches
|
||||
* Update CoreOS `grub.efi` to be recent (stable, 1298.7.0)
|
||||
|
||||
## v0.3.0
|
||||
|
||||
* `dnsmasq` package version 2.75
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM alpine:latest
|
||||
FROM alpine:3.6
|
||||
MAINTAINER Dalton Hubble <dalton.hubble@coreos.com>
|
||||
RUN apk -U add dnsmasq curl
|
||||
COPY tftpboot /var/lib/tftpboot
|
||||
EXPOSE 53
|
||||
ENTRYPOINT ["/usr/sbin/dnsmasq"]
|
||||
EXPOSE 53 67 69
|
||||
ENTRYPOINT ["/usr/sbin/dnsmasq"]
|
||||
|
||||
23
contrib/dnsmasq/Makefile
Normal file
23
contrib/dnsmasq/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
VERSION=v0.5.0
|
||||
|
||||
IMAGE_REPO=coreos/dnsmasq
|
||||
QUAY_REPO=quay.io/coreos/dnsmasq
|
||||
|
||||
.PHONY: all
|
||||
all: docker-image
|
||||
|
||||
.PHONY: tftp
|
||||
tftp:
|
||||
@./get-tftp-files
|
||||
|
||||
.PHONY: docker-image
|
||||
docker-image: tftp
|
||||
@sudo docker build --rm=true -t $(IMAGE_REPO):$(VERSION) .
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(IMAGE_REPO):latest
|
||||
|
||||
.PHONY: docker-push
|
||||
docker-push:
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(QUAY_REPO):latest
|
||||
@sudo docker tag $(IMAGE_REPO):$(VERSION) $(QUAY_REPO):$(VERSION)
|
||||
@sudo docker push $(QUAY_REPO):latest
|
||||
@sudo docker push $(QUAY_REPO):$(VERSION)
|
||||
@@ -1,62 +1,79 @@
|
||||
# dnsmasq [](https://quay.io/repository/coreos/dnsmasq)
|
||||
|
||||
# dnsmasq
|
||||
`dnsmasq` provides a container image for running DHCP, proxy DHCP, DNS, and/or TFTP with [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html). Use it to test different network setups with clusters of network bootable machines.
|
||||
|
||||
[](https://quay.io/repository/coreos/dnsmasq)
|
||||
|
||||
`dnsmasq` provides an App Container Image (ACI) or Docker image for running DHCP, proxy DHCP, DNS, and/or TFTP with [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) in a container/pod. Use it to test different network setups with clusters of network bootable machines.
|
||||
|
||||
The image bundles `undionly.kpxe` which chainloads PXE clients to iPXE and `grub.efi` (experimental) which chainloads UEFI architectures to GRUB2.
|
||||
The image bundles `undionly.kpxe`, `ipxe.efi`, and `grub.efi` (experimental) for chainloading BIOS and UEFI clients to iPXE.
|
||||
|
||||
## Usage
|
||||
|
||||
Run the `coreos.com/dnsmasq` ACI with rkt.
|
||||
Run the container image as a DHCP, DNS, and TFTP service.
|
||||
|
||||
sudo rkt trust --prefix coreos.com/dnsmasq
|
||||
# gpg key fingerprint is: 18AD 5014 C99E F7E3 BA5F 6CE9 50BD D3E0 FC8A 365E
|
||||
sudo rkt run coreos.com/dnsmasq:v0.3.0
|
||||
```sh
|
||||
sudo rkt run --net=host quay.io/coreos/dnsmasq \
|
||||
--caps-retain=CAP_NET_ADMIN,CAP_NET_BIND_SERVICE,CAP_SETGID,CAP_SETUID,CAP_NET_RAW \
|
||||
-- -d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp \
|
||||
--tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example.com/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
|
||||
Press ^] three times to kill the container.
|
||||
```sh
|
||||
sudo docker run --rm --cap-add=NET_ADMIN --net=host quay.io/coreos/dnsmasq \
|
||||
-d -q \
|
||||
--dhcp-range=192.168.1.3,192.168.1.254 \
|
||||
--enable-tftp --tftp-root=/var/lib/tftpboot \
|
||||
--dhcp-match=set:bios,option:client-arch,0 \
|
||||
--dhcp-boot=tag:bios,undionly.kpxe \
|
||||
--dhcp-match=set:efi32,option:client-arch,6 \
|
||||
--dhcp-boot=tag:efi32,ipxe.efi \
|
||||
--dhcp-match=set:efibc,option:client-arch,7 \
|
||||
--dhcp-boot=tag:efibc,ipxe.efi \
|
||||
--dhcp-match=set:efi64,option:client-arch,9 \
|
||||
--dhcp-boot=tag:efi64,ipxe.efi \
|
||||
--dhcp-userclass=set:ipxe,iPXE \
|
||||
--dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe \
|
||||
--address=/matchbox.example/192.168.1.2 \
|
||||
--log-queries \
|
||||
--log-dhcp
|
||||
```
|
||||
|
||||
Alternately, Docker can be used.
|
||||
|
||||
docker pull quay.io/coreos/dnsmasq
|
||||
docker run quay.io/coreos/dnsmasq --cap-add=NET_ADMIN
|
||||
Press ^] three times to stop the rkt pod. Press ctrl-C to stop the Docker container.
|
||||
|
||||
## Configuration Flags
|
||||
|
||||
Configuration arguments can be provided at the command line. Check the dnsmasq [man pages](http://www.thekelleys.org.uk/dnsmasq/docs/dnsmasq-man.html) for a complete list, but here are some important flags.
|
||||
Configuration arguments can be provided as flags. Check the dnsmasq [man pages](http://www.thekelleys.org.uk/dnsmasq/docs/dnsmasq-man.html) for a complete list.
|
||||
|
||||
| flag | description | example |
|
||||
|----------|-------------|---------|
|
||||
| -dhcp-range | Enable DHCP, lease given range | `172.15,0.50,172.15.0.99`, `192.168.1.1,proxy,255.255.255.0` |
|
||||
| --dhcp-boot | DHCP next server option | `http://bootcfg.foo:8080/boot.ipxe` |
|
||||
| --dhcp-range | Enable DHCP, lease given range | `172.18.0.50,172.18.0.99`, `192.168.1.1,proxy,255.255.255.0` |
|
||||
| --dhcp-boot | DHCP next server option | `http://matchbox.foo:8080/boot.ipxe` |
|
||||
| --enable-tftp | Enable serving from tftp-root over TFTP | NA |
|
||||
| --address | IP address for a domain name | /bootcfg.foo/172.15.0.2 |
|
||||
| --address | IP address for a domain name | /matchbox.foo/172.18.0.2 |
|
||||
|
||||
## ACI
|
||||
## Development
|
||||
|
||||
Build a `dnsmasq` ACI with the build script which uses [acbuild](https://github.com/appc/acbuild).
|
||||
Build a container image locally.
|
||||
|
||||
cd contrib/dnsmasq
|
||||
./get-tftp-files
|
||||
sudo ./build-aci
|
||||
```
|
||||
make docker-image
|
||||
```
|
||||
|
||||
Run `dnsmasq.aci` with rkt to run DHCP/proxyDHCP/TFTP/DNS services.
|
||||
Run the image with Docker on the `docker0` bridge (default).
|
||||
|
||||
DHCP+TFTP+DNS on the `metal0` bridge:
|
||||
```
|
||||
sudo docker run --rm --cap-add=NET_ADMIN coreos/dnsmasq -d -q
|
||||
```
|
||||
|
||||
sudo rkt --insecure-options=image run dnsmasq.aci --net=metal0 -- -d -q --dhcp-range=172.15.0.50,172.15.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:#ipxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe --log-queries --log-dhcp --dhcp-option=3,172.15.0.1 --address=/bootcfg.foo/172.15.0.2
|
||||
|
||||
## Docker
|
||||
|
||||
Build a Docker image locally using the tag `latest`.
|
||||
|
||||
cd contrib/dnsmasq
|
||||
./get-tftp-files
|
||||
sudo ./build-docker
|
||||
|
||||
Run the Docker image to run DHCP/proxyDHCP/TFTP/DNS services.
|
||||
|
||||
DHCP+TFTP+DNS on the `docker0` bridge:
|
||||
|
||||
sudo docker run --rm --cap-add=NET_ADMIN quay.io/coreos/dnsmasq -d -q --dhcp-range=172.17.0.43,172.17.0.99 --enable-tftp --tftp-root=/var/lib/tftpboot --dhcp-userclass=set:ipxe,iPXE --dhcp-boot=tag:#ipxe,undionly.kpxe --dhcp-boot=tag:ipxe,http://bootcfg.foo:8080/boot.ipxe --log-queries --log-dhcp --dhcp-option=3,172.17.0.1 --address=/bootcfg.foo/172.17.0.2
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start with an empty ACI
|
||||
acbuild --debug begin
|
||||
|
||||
# In the event of the script exiting, end the build
|
||||
trap "{ export EXT=$?; acbuild --debug end && exit $EXT; }" EXIT
|
||||
|
||||
# Name the ACI
|
||||
acbuild --debug set-name coreos.com/dnsmasq
|
||||
|
||||
# Add a version label
|
||||
acbuild --debug label add version v0.3.0
|
||||
|
||||
# Add alpine base dependency
|
||||
acbuild --debug dep add quay.io/coreos/alpine-sh
|
||||
|
||||
# Install dnsmasq and curl
|
||||
acbuild --debug run apk update
|
||||
acbuild --debug run apk add dnsmasq curl
|
||||
|
||||
# Copy the PXE->iPXE chainloader
|
||||
acbuild --debug copy tftpboot /var/lib/tftpboot
|
||||
|
||||
# Add DHCP and DNS ports for dnsmasq
|
||||
acbuild --debug port add dhcp udp 67
|
||||
acbuild --debug port add dns udp 53
|
||||
|
||||
# Elevate network admin capabilities
|
||||
echo "{\"set\": [\"CAP_NET_ADMIN\", \"CAP_NET_BIND_SERVICE\", \"CAP_SETGID\", \"CAP_SETUID\", \"CAP_NET_RAW\"]}" | acbuild --debug isolator add os/linux/capabilities-retain-set -
|
||||
|
||||
# Set the exec command
|
||||
acbuild --debug set-exec -- /usr/sbin/dnsmasq -d
|
||||
|
||||
# Save and override any older ACI
|
||||
acbuild --debug write --overwrite dnsmasq.aci
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
REPO=coreos/dnsmasq
|
||||
|
||||
docker build -q --rm=true -t $REPO:latest .
|
||||
40
contrib/dnsmasq/docker0.conf
Normal file
40
contrib/dnsmasq/docker0.conf
Normal file
@@ -0,0 +1,40 @@
|
||||
# dnsmasq.conf
|
||||
|
||||
no-daemon
|
||||
dhcp-range=172.17.0.50,172.17.0.99
|
||||
dhcp-option=3,172.17.0.1
|
||||
dhcp-host=52:54:00:a1:9c:ae,172.17.0.21,1h
|
||||
dhcp-host=52:54:00:b2:2f:86,172.17.0.22,1h
|
||||
dhcp-host=52:54:00:c3:61:77,172.17.0.23,1h
|
||||
dhcp-host=52:54:00:d7:99:c7,172.17.0.24,1h
|
||||
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
|
||||
# Legacy PXE
|
||||
dhcp-match=set:bios,option:client-arch,0
|
||||
dhcp-boot=tag:bios,undionly.kpxe
|
||||
|
||||
# UEFI
|
||||
dhcp-match=set:efi32,option:client-arch,6
|
||||
dhcp-boot=tag:efi32,ipxe.efi
|
||||
|
||||
dhcp-match=set:efibc,option:client-arch,7
|
||||
dhcp-boot=tag:efibc,ipxe.efi
|
||||
|
||||
dhcp-match=set:efi64,option:client-arch,9
|
||||
dhcp-boot=tag:efi64,ipxe.efi
|
||||
|
||||
# iPXE
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
address=/matchbox.example.com/172.17.0.2
|
||||
address=/node1.example.com/172.17.0.21
|
||||
address=/node2.example.com/172.17.0.22
|
||||
address=/node3.example.com/172.17.0.23
|
||||
address=/node4.example.com/172.17.0.24
|
||||
address=/cluster.example.com/172.17.0.21
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/bin/bash -e
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
|
||||
DEST=tftpboot
|
||||
DEST=${1:-"tftpboot"}
|
||||
|
||||
if [ ! -d $DEST ]; then
|
||||
echo "Creating directory $DEST"
|
||||
@@ -9,3 +10,7 @@ fi
|
||||
|
||||
curl -s -o $DEST/undionly.kpxe http://boot.ipxe.org/undionly.kpxe
|
||||
cp $DEST/undionly.kpxe $DEST/undionly.kpxe.0
|
||||
curl -s -o $DEST/ipxe.efi http://boot.ipxe.org/ipxe.efi
|
||||
|
||||
# Any vaguely recent CoreOS grub.efi is fine
|
||||
curl -s -o $DEST/grub.efi https://stable.release.core-os.net/amd64-usr/1353.7.0/coreos_production_pxe_grub.efi
|
||||
|
||||
30
contrib/dnsmasq/metal0.conf
Normal file
30
contrib/dnsmasq/metal0.conf
Normal file
@@ -0,0 +1,30 @@
|
||||
# dnsmasq.conf
|
||||
|
||||
no-daemon
|
||||
dhcp-range=172.18.0.50,172.18.0.99
|
||||
dhcp-option=3,172.18.0.1
|
||||
dhcp-host=52:54:00:a1:9c:ae,172.18.0.21,1h
|
||||
dhcp-host=52:54:00:b2:2f:86,172.18.0.22,1h
|
||||
dhcp-host=52:54:00:c3:61:77,172.18.0.23,1h
|
||||
dhcp-host=52:54:00:d7:99:c7,172.18.0.24,1h
|
||||
|
||||
enable-tftp
|
||||
tftp-root=/var/lib/tftpboot
|
||||
|
||||
dhcp-userclass=set:ipxe,iPXE
|
||||
dhcp-boot=tag:#ipxe,undionly.kpxe
|
||||
dhcp-boot=tag:ipxe,http://matchbox.example.com:8080/boot.ipxe
|
||||
|
||||
log-queries
|
||||
log-dhcp
|
||||
|
||||
address=/matchbox.example.com/172.18.0.2
|
||||
address=/node1.example.com/172.18.0.21
|
||||
address=/node2.example.com/172.18.0.22
|
||||
address=/node3.example.com/172.18.0.23
|
||||
address=/node4.example.com/172.18.0.24
|
||||
address=/cluster.example.com/172.18.0.21
|
||||
|
||||
# for a Tectonic test, ignore
|
||||
address=/tectonic.example.com/172.18.0.22
|
||||
address=/tectonic.example.com/172.18.0.23
|
||||
@@ -1,55 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: bootcfg
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: bootcfg
|
||||
phase: prod
|
||||
spec:
|
||||
containers:
|
||||
- name: bootcfg
|
||||
image: quay.io/coreos/bootcfg:latest
|
||||
env:
|
||||
- {name: BOOTCFG_ADDRESS, value: "0.0.0.0:8080"}
|
||||
- {name: BOOTCFG_LOG_LEVEL, value: "debug"}
|
||||
ports:
|
||||
# port exposed on pod IP
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "50Mi"
|
||||
volumeMounts:
|
||||
- name: groups
|
||||
mountPath: /var/lib/bootcfg/groups
|
||||
- name: profiles
|
||||
mountPath: /var/lib/bootcfg/profiles
|
||||
- name: ignition
|
||||
mountPath: /var/lib/bootcfg/ignition
|
||||
- name: cloud
|
||||
mountPath: /var/lib/bootcfg/cloud
|
||||
- name: assets
|
||||
mountPath: /var/lib/bootcfg/assets
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: groups
|
||||
emptyDir: {}
|
||||
- name: profiles
|
||||
emptyDir: {}
|
||||
- name: ignition
|
||||
emptyDir: {}
|
||||
- name: cloud
|
||||
emptyDir: {}
|
||||
- name: assets
|
||||
emptyDir: {}
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: bootcfg
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
name: bootcfg
|
||||
phase: prod
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
# port exposed on each node
|
||||
nodePort: 31488
|
||||
# name or port exposed on targeted pod(s)
|
||||
targetPort: 8080
|
||||
52
contrib/k8s/matchbox-deployment.yaml
Normal file
52
contrib/k8s/matchbox-deployment.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: matchbox
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: matchbox
|
||||
phase: prod
|
||||
spec:
|
||||
containers:
|
||||
- name: matchbox
|
||||
image: quay.io/coreos/matchbox:v0.7.1
|
||||
env:
|
||||
- name: MATCHBOX_ADDRESS
|
||||
value: "0.0.0.0:8080"
|
||||
- name: MATCHBOX_RPC_ADDRESS
|
||||
value: "0.0.0.0:8081"
|
||||
- name: MATCHBOX_LOG_LEVEL
|
||||
value: "debug"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
- name: https
|
||||
containerPort: 8081
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "50Mi"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/matchbox
|
||||
- name: data
|
||||
mountPath: /var/lib/matchbox
|
||||
- name: assets
|
||||
mountPath: /var/lib/matchbox/assets
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: config
|
||||
secret:
|
||||
secretName: matchbox-rpc
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
- name: assets
|
||||
emptyDir: {}
|
||||
32
contrib/k8s/matchbox-ingress.yaml
Normal file
32
contrib/k8s/matchbox-ingress.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
spec:
|
||||
rules:
|
||||
- host: matchbox.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: matchbox
|
||||
servicePort: 8080
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: matchbox
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- matchbox-rpc.example.com
|
||||
rules:
|
||||
- host: matchbox-rpc.example.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: matchbox
|
||||
servicePort: 8081
|
||||
18
contrib/k8s/matchbox-service.yaml
Normal file
18
contrib/k8s/matchbox-service.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: matchbox
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
name: matchbox
|
||||
phase: prod
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
- name: https
|
||||
protocol: TCP
|
||||
port: 8081
|
||||
targetPort: 8081
|
||||
86
contrib/rpm/matchbox.spec
Normal file
86
contrib/rpm/matchbox.spec
Normal file
@@ -0,0 +1,86 @@
|
||||
%global import_path github.com/coreos/matchbox
|
||||
%global repo matchbox
|
||||
%global debug_package %{nil}
|
||||
|
||||
Name: matchbox
|
||||
Version: 0.6.0
|
||||
Release: 2%{?dist}
|
||||
Summary: Network boot and provision CoreOS machines
|
||||
License: ASL 2.0
|
||||
URL: https://%{import_path}
|
||||
Source0: https://%{import_path}/archive/v%{version}/%{name}-%{version}.tar.gz
|
||||
|
||||
|
||||
BuildRequires: golang
|
||||
BuildRequires: systemd
|
||||
%{?systemd_requires}
|
||||
|
||||
Requires(pre): shadow-utils
|
||||
|
||||
%description
|
||||
matchbox is a service that matches machines to profiles to PXE boot and provision
|
||||
clusters. Profiles specify the kernel/initrd, kernel args, iPXE config, GRUB
|
||||
config, Container Linux config, Cloud-config, or other configs. matchbox provides
|
||||
a read-only HTTP API for machines and an authenticated gRPC API for clients.
|
||||
|
||||
# Limit to architectures supported by golang or gcc-go compilers
|
||||
ExclusiveArch: %{go_arches}
|
||||
# Use golang or gcc-go compiler depending on architecture
|
||||
BuildRequires: compiler(golang)
|
||||
|
||||
%prep
|
||||
%setup -q -n %{repo}-%{version}
|
||||
|
||||
%build
|
||||
# create a Go workspace with a symlink to builddir source
|
||||
mkdir -p src/github.com/coreos
|
||||
ln -s ../../../ src/github.com/coreos/matchbox
|
||||
export GOPATH=$(pwd):%{gopath}
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
function gobuild { go build -a -ldflags "-w -X github.com/coreos/matchbox/matchbox/version.Version=v%{version}" "$@"; }
|
||||
gobuild -o bin/matchbox %{import_path}/cmd/matchbox
|
||||
|
||||
%install
|
||||
install -d %{buildroot}/%{_bindir}
|
||||
install -d %{buildroot}%{_sharedstatedir}/%{name}
|
||||
install -p -m 0755 bin/matchbox %{buildroot}/%{_bindir}
|
||||
# systemd service unit
|
||||
mkdir -p %{buildroot}%{_unitdir}
|
||||
cp contrib/systemd/%{name}.service %{buildroot}%{_unitdir}/
|
||||
|
||||
%files
|
||||
%doc README.md CHANGES.md CONTRIBUTING.md LICENSE NOTICE DCO
|
||||
%{_bindir}/matchbox
|
||||
%{_sharedstatedir}/%{name}
|
||||
%{_unitdir}/%{name}.service
|
||||
|
||||
%pre
|
||||
getent group matchbox >/dev/null || groupadd -r matchbox
|
||||
getent passwd matchbox >/dev/null || \
|
||||
useradd -r -g matchbox -s /sbin/nologin matchbox
|
||||
|
||||
%post
|
||||
%systemd_post matchbox.service
|
||||
|
||||
%preun
|
||||
%systemd_preun matchbox.service
|
||||
|
||||
%postun
|
||||
%systemd_postun_with_restart matchbox.service
|
||||
|
||||
%changelog
|
||||
* Mon Apr 24 2017 <dalton.hubble@coreos.com> - 0.6.0-1
|
||||
- New support for terraform-provider-matchbox plugin
|
||||
- Add ProfileDelete, GroupDelete, IgnitionGet and IgnitionDelete gRPC endpoints
|
||||
- Generate code with gRPC v1.2.1 and matching Go protoc-gen-go plugin
|
||||
- Update Ignition to v0.14.0 and coreos-cloudinit to v1.13.0
|
||||
- New documentation at https://coreos.com/matchbox/docs/latest
|
||||
* Wed Jan 25 2017 <dalton.hubble@coreos.com> - 0.5.0-1
|
||||
- Rename project from bootcfg to matchbox
|
||||
* Sat Dec 3 2016 <dalton.hubble@coreos.com> - 0.4.1-3
|
||||
- Add missing ldflags which caused bootcfg -version to report wrong version
|
||||
* Fri Dec 2 2016 <dalton.hubble@coreos.com> - 0.4.1-2
|
||||
- Fix bootcfg user creation
|
||||
* Fri Dec 2 2016 <dalton.hubble@coreos.com> - 0.4.1-1
|
||||
- Initial package
|
||||
|
||||
96
contrib/squid/README.md
Normal file
96
contrib/squid/README.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# Squid Proxy (DRAFT)
|
||||
|
||||
This guide shows how to setup a [Squid](http://www.squid-cache.org/) cache proxy for providing kernel/initrd files to PXE, iPXE, or GRUB2 client machines. This setup runs Squid as a Docker container using the [sameersbn/squid](https://quay.io/repository/sameersbn/squid)
|
||||
image.
|
||||
|
||||
The Squid container requires a squid.conf file to run. Download the example squid.conf file from the [sameersbn/docker-squid](https://github.com/sameersbn/docker-squid) repo:
|
||||
```
|
||||
curl -O https://raw.githubusercontent.com/sameersbn/docker-squid/master/squid.conf
|
||||
```
|
||||
|
||||
Squid [interception caching](http://wiki.squid-cache.org/SquidFaq/InterceptionProxy#Concepts_of_Interception_Caching) is required for proxying PXE, iPXE, or GRUB2 client machines. Set the intercept mode in squid.conf:
|
||||
```
|
||||
sed -ie 's/http_port 3128/http_port 3128 intercept/g' squid.conf
|
||||
```
|
||||
|
||||
By default, Squid caches objects that are 4MB or less. Increase the maximum object size to cache large files such as kernel and initrd images. The following example increases the maximum object size to 300MB:
|
||||
```
|
||||
sed -ie 's/# maximum_object_size 4 MB/maximum_object_size 300 MB/g' squid.conf
|
||||
```
|
||||
|
||||
Squid supports a wide range of cache configurations. Review the Squid [documentation](http://www.squid-cache.org/Doc/) to learn more about configuring Squid.
|
||||
|
||||
This example uses systemd to manage squid. Create the squid service systemd unit file:
|
||||
```
|
||||
cat /etc/systemd/system/squid.service
|
||||
#/etc/systemd/system/squid.service
|
||||
[Unit]
|
||||
Description=squid proxy service
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Restart=always
|
||||
TimeoutStartSec=0
|
||||
ExecStart=/usr/bin/docker run --net=host --rm \
|
||||
-v /path/to/squid.conf:/etc/squid3/squid.conf:Z \
|
||||
-v /srv/docker/squid/cache:/var/spool/squid3:Z \
|
||||
quay.io/sameersbn/squid
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Start Squid:
|
||||
```
|
||||
systemctl start squid
|
||||
```
|
||||
|
||||
If your Squid host is running iptables or firewalld, modify rules to allow the interception and redirection of traffic. In the following example, 192.168.10.1 is the IP address of the interface facing PXE, iPXE, or GRUB2 client machines. The default port number used by squid is 3128.
|
||||
|
||||
For firewalld:
|
||||
```
|
||||
firewall-cmd --permanent --zone=internal --add-forward-port=port=80:proto=tcp:toport=3128:toaddr=192.168.10.1
|
||||
firewall-cmd --permanent --zone=internal --add-port=3128/tcp
|
||||
firewall-cmd --reload
|
||||
firewall-cmd --zone=internal --list-all
|
||||
```
|
||||
|
||||
For iptables:
|
||||
```
|
||||
iptables -t nat -A POSTROUTING -o enp15s0 -j MASQUERADE
|
||||
iptables -t nat -A PREROUTING -i enp14s0 -p tcp --dport 80 -j REDIRECT --to-port 3128
|
||||
```
|
||||
**Note**: enp14s0 faces PXE, iPXE, or GRUB2 clients and enp15s0 faces Internet access.
|
||||
|
||||
Your DHCP server should be configured so the Squid host is the default gateway for PXE, iPXE, or GRUB2 clients. For deployments that run Squid on the same host as dnsmasq, remove any DHCP option 3 settings. For example ```--dhcp-option=3,192.168.10.1"```
|
||||
|
||||
Update Matchbox policies to use the url of the Container Linux kernel/initrd download site:
|
||||
```
|
||||
cat policy/etcd3.json
|
||||
{
|
||||
"id": "etcd3",
|
||||
"name": "etcd3",
|
||||
"boot": {
|
||||
"kernel": "http://stable.release.core-os.net/amd64-usr/1235.9.0/coreos_production_pxe.vmlinuz",
|
||||
"initrd": ["http://stable.release.core-os.net/amd64-usr/1235.9.0/coreos_production_pxe_image.cpio.gz"],
|
||||
"args": [
|
||||
"coreos.config.url=http://matchbox.foo:8080/ignition?uuid=${uuid}&mac=${mac:hexhyp}",
|
||||
"coreos.first_boot=yes",
|
||||
"console=tty0",
|
||||
"console=ttyS0",
|
||||
"coreos.autologin"
|
||||
]
|
||||
},
|
||||
"ignition_id": "etcd3.yaml"
|
||||
}
|
||||
```
|
||||
|
||||
(Optional) Configure Matchbox to not serve static assets by providing an empty assets-path value.
|
||||
```
|
||||
# /etc/systemd/system/matchbox.service.d/override.conf
|
||||
[Service]
|
||||
Environment="MATCHBOX_ASSETS_PATHS="
|
||||
```
|
||||
|
||||
Boot your PXE, iPXE, or GRUB2 clients.
|
||||
@@ -1,17 +0,0 @@
|
||||
[Unit]
|
||||
Description=CoreOS bootcfg Server
|
||||
Documentation=https://github.com/coreos/coreos-baremetal
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=bootcfg
|
||||
Group=bootcfg
|
||||
ExecStart=/usr/local/bin/bootcfg -address=0.0.0.0:8080 -rpc-address=0.0.0.0:8081 -log-level=debug
|
||||
|
||||
# systemd.exec
|
||||
ProtectHome=yes
|
||||
ProtectSystem=full
|
||||
ReadWriteDirectories=/var/lib/bootcfg
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
24
contrib/systemd/matchbox-for-tectonic.service
Normal file
24
contrib/systemd/matchbox-for-tectonic.service
Normal file
@@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
Environment="IMAGE=quay.io/coreos/matchbox"
|
||||
Environment="VERSION=v0.7.1"
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
Environment="MATCHBOX_RPC_ADDRESS=0.0.0.0:8081"
|
||||
Environment="MATCHBOX_LOG_LEVEL=debug"
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/matchbox
|
||||
ExecStartPre=/usr/bin/mkdir -p /var/lib/matchbox/assets
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--net=host \
|
||||
--inherit-env \
|
||||
--trust-keys-from-https \
|
||||
--mount volume=data,target=/var/lib/matchbox \
|
||||
--mount volume=config,target=/etc/matchbox \
|
||||
--volume data,kind=host,source=/var/lib/matchbox \
|
||||
--volume config,kind=host,source=/etc/matchbox \
|
||||
${IMAGE}:${VERSION}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
16
contrib/systemd/matchbox-local.service
Normal file
16
contrib/systemd/matchbox-local.service
Normal file
@@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
User=matchbox
|
||||
Group=matchbox
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
ExecStart=/usr/local/bin/matchbox
|
||||
|
||||
# systemd.exec
|
||||
ProtectHome=yes
|
||||
ProtectSystem=full
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
22
contrib/systemd/matchbox-on-coreos.service
Normal file
22
contrib/systemd/matchbox-on-coreos.service
Normal file
@@ -0,0 +1,22 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
Environment="IMAGE=quay.io/coreos/matchbox"
|
||||
Environment="VERSION=v0.7.1"
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
ExecStartPre=/usr/bin/mkdir -p /etc/matchbox
|
||||
ExecStartPre=/usr/bin/mkdir -p /var/lib/matchbox/assets
|
||||
ExecStart=/usr/bin/rkt run \
|
||||
--net=host \
|
||||
--inherit-env \
|
||||
--trust-keys-from-https \
|
||||
--mount volume=data,target=/var/lib/matchbox \
|
||||
--mount volume=config,target=/etc/matchbox \
|
||||
--volume data,kind=host,source=/var/lib/matchbox \
|
||||
--volume config,kind=host,source=/etc/matchbox \
|
||||
${IMAGE}:${VERSION}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
16
contrib/systemd/matchbox.service
Normal file
16
contrib/systemd/matchbox.service
Normal file
@@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=CoreOS matchbox Server
|
||||
Documentation=https://github.com/coreos/matchbox
|
||||
|
||||
[Service]
|
||||
User=matchbox
|
||||
Group=matchbox
|
||||
Environment="MATCHBOX_ADDRESS=0.0.0.0:8080"
|
||||
ExecStart=/usr/bin/matchbox
|
||||
|
||||
# systemd.exec
|
||||
ProtectHome=yes
|
||||
ProtectSystem=full
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,38 +1,46 @@
|
||||
|
||||
# Examples
|
||||
|
||||
These examples network boot and provision machines into CoreOS clusters using `bootcfg`. You can re-use their profiles to provision your own physical machines.
|
||||
Matchbox automates network booting and provisioning of clusters. These examples show how to use matchbox on-premise or locally with [QEMU/KVM](scripts/README.md#libvirt).
|
||||
|
||||
| Name | Description | CoreOS Version | FS | Docs |
|
||||
## Terraform Examples
|
||||
|
||||
These examples use [Terraform](https://www.terraform.io/intro/) as a client to Matchbox.
|
||||
|
||||
| Name | Description |
|
||||
|-------------------------------|-------------------------------|
|
||||
| [simple-install](terraform/simple-install/) | Install Container Linux with an SSH key |
|
||||
| [etcd3-install](terraform/etcd3-install/) | Install a 3-node etcd3 cluster |
|
||||
| [bootkube-install](terraform/bootkube-install/) | Install a 3-node Kubernetes v1.10.3 cluster |
|
||||
|
||||
### Customization
|
||||
|
||||
You are encouraged to look through the examples and Terraform modules. Implement your own profiles or package them as modules to meet your needs. We've just provided a starting point. Learn more about [matchbox](../Documentation/matchbox.md) and [Container Linux configs](../Documentation/container-linux-config.md).
|
||||
|
||||
## Manual Examples
|
||||
|
||||
These examples mount raw Matchbox objects into a Matchbox server's `/var/lib/matchbox/` directory.
|
||||
|
||||
| Name | Description | CoreOS Container Linux Version | FS | Docs |
|
||||
|------------|-------------|----------------|----|-----------|
|
||||
| pxe | CoreOS via iPXE | alpha/1053.2.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| grub | CoreOS via GRUB2 Netboot | alpha/1053.2.0 | RAM | NA |
|
||||
| pxe-disk | CoreOS via iPXE, with a root filesystem | alpha/1053.2.0 | Disk | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| etcd, etcd-docker | iPXE boot a 3 node etcd cluster and proxy | alpha/1053.2.0 | RAM | [reference](https://coreos.com/os/docs/latest/cluster-architectures.html) |
|
||||
| etcd-install | Install a 3-node etcd cluster to disk | alpha/1053.2.0 | Disk | [reference](https://coreos.com/os/docs/latest/installing-to-disk.html) |
|
||||
| k8s, k8s-docker | Kubernetes cluster with 1 master, 2 workers, and TLS-authentication | alpha/1053.2.0 | Disk | [tutorial](../Documentation/kubernetes.md) |
|
||||
| k8s-install | Install a Kubernetes cluster to disk | alpha/1053.2.0 | Disk | [tutorial](../Documentation/kubernetes.md) |
|
||||
| bootkube | iPXE boot a self-hosted Kubernetes cluster (with bootkube) | alpha/1053.2.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| bootkube-install | Install a self-hosted Kubernetes cluster (with bootkube) | alpha/1053.2.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| torus | Torus distributed storage | alpha/1053.2.0 | Disk | [tutorial](../Documentation/torus.md) |
|
||||
| simple | CoreOS Container Linux with autologin, using iPXE | stable/1576.5.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| simple-install | CoreOS Container Linux Install, using iPXE | stable/1576.5.0 | RAM | [reference](https://coreos.com/os/docs/latest/booting-with-ipxe.html) |
|
||||
| grub | CoreOS Container Linux via GRUB2 Netboot | stable/1576.5.0 | RAM | NA |
|
||||
| etcd3 | PXE boot a 3-node etcd3 cluster with proxies | stable/1576.5.0 | RAM | None |
|
||||
| etcd3-install | Install a 3-node etcd3 cluster to disk | stable/1576.5.0 | Disk | None |
|
||||
| bootkube | PXE boot a 3-node Kubernetes v1.8.5 cluster | stable/1576.5.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
| bootkube-install | Install a 3-node Kubernetes v1.8.5 cluster | stable/1576.5.0 | Disk | [tutorial](../Documentation/bootkube.md) |
|
||||
|
||||
## Tutorials
|
||||
### Customization
|
||||
|
||||
Get started running `bootcfg` on your Linux machine to network boot and provision clusters of VMs or physical hardware.
|
||||
#### Autologin
|
||||
|
||||
* Getting Started
|
||||
* [bootcfg with rkt](../Documentation/getting-started-rkt.md)
|
||||
* [bootcfg with Docker](../Documentation/getting-started-docker.md)
|
||||
* [Kubernetes (static manifests)](../Documentation/kubernetes.md)
|
||||
* [Kubernetes (self-hosted)](../Documentation/bootkube.md)
|
||||
* [Torus Storage](..Documentation/torus.md)
|
||||
* [Lab Examples](https://github.com/dghubble/metal)
|
||||
Example profiles pass the `coreos.autologin` kernel argument. This skips the password prompt for development and troubleshooting and should be removed **before production**.
|
||||
|
||||
## SSH Keys
|
||||
|
||||
Most examples allow `ssh_authorized_keys` to be added for the `core` user as machine group metadata.
|
||||
Example groups allow `ssh_authorized_keys` to be added for the `core` user as metadata. You might also include this directly in your Ignition.
|
||||
|
||||
# /var/lib/bootcfg/groups/default.json
|
||||
# /var/lib/matchbox/groups/default.json
|
||||
{
|
||||
"name": "Example Machine Group",
|
||||
"profile": "pxe",
|
||||
@@ -41,12 +49,8 @@ Most examples allow `ssh_authorized_keys` to be added for the `core` user as mac
|
||||
}
|
||||
}
|
||||
|
||||
## Conditional Variables
|
||||
#### Conditional Variables
|
||||
|
||||
### "pxe"
|
||||
**"pxe"**
|
||||
|
||||
Some examples check the `pxe` variable to determine whether to create a `/dev/sda1` filesystem and partition for PXEing with `root=/dev/sda1` ("pxe":"true") or to write files to the existing filesystem on `/dev/disk/by-label/ROOT` ("pxe":"false").
|
||||
|
||||
### "skip_networkd"
|
||||
|
||||
Some examples (mainly Kubernetes examples) check the `skip_networkd` variable to determine whether to skip configuring networkd. When `true`, the default networkd config is used, which uses DCHP to setup networking. Use this if you've pre-configured static IP mappings for Kubernetes nodes in your DHCP server. Otherwise, `networkd_address`, `networkd_dns`, and `networkd_gateway` machine metadata are used to populate a networkd configuration on each host.
|
||||
|
||||
56
examples/addons/cluo/update-agent.yaml
Normal file
56
examples/addons/cluo/update-agent.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: container-linux-update-agent
|
||||
namespace: kube-system
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: container-linux-update-agent
|
||||
spec:
|
||||
containers:
|
||||
- name: update-agent
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.3.1
|
||||
command:
|
||||
- "/bin/update-agent"
|
||||
volumeMounts:
|
||||
- mountPath: /var/run/dbus
|
||||
name: var-run-dbus
|
||||
- mountPath: /etc/coreos
|
||||
name: etc-coreos
|
||||
- mountPath: /usr/share/coreos
|
||||
name: usr-share-coreos
|
||||
- mountPath: /etc/os-release
|
||||
name: etc-os-release
|
||||
env:
|
||||
# read by update-agent as the node name to manage reboots for
|
||||
- name: UPDATE_AGENT_NODE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: var-run-dbus
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
- name: etc-coreos
|
||||
hostPath:
|
||||
path: /etc/coreos
|
||||
- name: usr-share-coreos
|
||||
hostPath:
|
||||
path: /usr/share/coreos
|
||||
- name: etc-os-release
|
||||
hostPath:
|
||||
path: /etc/os-release
|
||||
22
examples/addons/cluo/update-operator.yaml
Normal file
22
examples/addons/cluo/update-operator.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: container-linux-update-operator
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: container-linux-update-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: update-operator
|
||||
image: quay.io/coreos/container-linux-update-operator:v0.3.1
|
||||
command:
|
||||
- "/bin/update-operator"
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
@@ -1,44 +0,0 @@
|
||||
|
||||
## gRPC API Credentials
|
||||
|
||||
Create FAKE TLS credentials for running the `bootcfg` gRPC API examples.
|
||||
|
||||
**DO NOT** use these certificates for anything other than running `bootcfg` examples. Use your organization's production PKI for production deployments.
|
||||
|
||||
Navigate to the example directory which will be mounted as `/etc/bootcfg` in examples:
|
||||
|
||||
cd coreos-baremetal/examples/etc/bootcfg
|
||||
|
||||
Set certificate subject alt names which should be used by exporting `SAN`. Use the DNS name or IP at which `bootcfg` is hosted.
|
||||
|
||||
# for examples on metal0 or docker0 bridges
|
||||
export SAN=IP.1:127.0.0.1,IP.2:172.15.0.2
|
||||
|
||||
# production example
|
||||
export SAN=DNS.1:bootcfg.example.com
|
||||
|
||||
Create a fake `ca.crt`, `server.crt`, `server.key`, `client.crt`, and `client.key`. Type 'Y' when prompted.
|
||||
|
||||
$ ./cert-gen
|
||||
Creating FAKE CA, server cert/key, and client cert/key...
|
||||
...
|
||||
...
|
||||
...
|
||||
******************************************************************
|
||||
WARNING: Generated TLS credentials are ONLY SUITABLE FOR EXAMPLES!
|
||||
Use your organization's production PKI for production deployments!
|
||||
|
||||
## Inpsect
|
||||
|
||||
Inspect the generated FAKE certificates if desired.
|
||||
|
||||
openssl x509 -noout -text -in ca.crt
|
||||
openssl x509 -noout -text -in server.crt
|
||||
openssl x509 -noout -text -in client.crt
|
||||
|
||||
## Verify
|
||||
|
||||
Verify that the FAKE server and client certificates were signed by the fake CA.
|
||||
|
||||
openssl verify -CAfile ca.crt server.crt
|
||||
openssl verify -CAfile ca.crt client.crt
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"id": "coreos-install",
|
||||
"name": "CoreOS Install",
|
||||
"name": "CoreOS Container Linux Install",
|
||||
"profile": "install-reboot",
|
||||
"metadata": {
|
||||
"coreos_channel": "alpha",
|
||||
"coreos_version": "1053.2.0",
|
||||
"ignition_endpoint": "http://bootcfg.foo:8080/ignition",
|
||||
"baseurl": "http://bootcfg.foo:8080/assets/coreos"
|
||||
"coreos_channel": "stable",
|
||||
"coreos_version": "1576.5.0",
|
||||
"ignition_endpoint": "http://matchbox.example.com:8080/ignition",
|
||||
"baseurl": "http://matchbox.example.com:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,25 +1,19 @@
|
||||
{
|
||||
"id": "node1",
|
||||
"name": "Master Node",
|
||||
"profile": "bootkube-master",
|
||||
"name": "Controller Node",
|
||||
"profile": "bootkube-controller",
|
||||
"selector": {
|
||||
"mac": "52:54:00:a1:9c:ae",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"ipv4_address": "172.15.0.21",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
|
||||
"etcd_name": "node1",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"k8s_etcd_endpoints": "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379",
|
||||
"networkd_address": "172.15.0.21/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,18 +7,10 @@
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"ipv4_address": "172.15.0.22",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
|
||||
"etcd_name": "node2",
|
||||
"domain_name": "node2.example.com",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"networkd_address": "172.15.0.22/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,18 +7,10 @@
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"ipv4_address": "172.15.0.23",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
|
||||
"etcd_name": "node3",
|
||||
"domain_name": "node3.example.com",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"networkd_address": "172.15.0.23/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,25 +1,18 @@
|
||||
{
|
||||
"id": "node1",
|
||||
"name": "Master Node",
|
||||
"profile": "bootkube-master",
|
||||
"name": "Controller Node",
|
||||
"profile": "bootkube-controller",
|
||||
"selector": {
|
||||
"mac": "52:54:00:a1:9c:ae"
|
||||
},
|
||||
"metadata": {
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
|
||||
"domain_name": "node1.example.com",
|
||||
"etcd_initial_cluster": "node1=https://node1.example.com:2380",
|
||||
"etcd_name": "node1",
|
||||
"ipv4_address": "172.15.0.21",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"k8s_etcd_endpoints": "http://172.15.0.21:2379,http://172.15.0.22:2379,http://172.15.0.23:2379",
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"networkd_address": "172.15.0.21/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,19 +6,11 @@
|
||||
"mac": "52:54:00:b2:2f:86"
|
||||
},
|
||||
"metadata": {
|
||||
"ipv4_address": "172.15.0.22",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
|
||||
"etcd_name": "node2",
|
||||
"domain_name": "node2.example.com",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"networkd_address": "172.15.0.22/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,19 +6,11 @@
|
||||
"mac": "52:54:00:c3:61:77"
|
||||
},
|
||||
"metadata": {
|
||||
"ipv4_address": "172.15.0.23",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380",
|
||||
"etcd_name": "node3",
|
||||
"domain_name": "node3.example.com",
|
||||
"k8s_dns_service_ip": "10.3.0.10",
|
||||
"k8s_master_endpoint": "https://172.15.0.21:443",
|
||||
"k8s_pod_network": "10.2.0.0/16",
|
||||
"k8s_service_ip_range": "10.3.0.0/24",
|
||||
"networkd_address": "172.15.0.23/16",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"pxe": "true",
|
||||
"ssh_authorized_keys": [
|
||||
"ADD ME"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPQFdwVLr+alsWIgYRz9OdqDhnx9jjuFbkdSdpqq4gd9uZApYlivMDD4UgjFazQpezx8DiNhu9ym7i6LgAcdwi+10hE4L9yoJv9uBgbBxOAd65znqLqF91NtV4mlKP5YfJtR7Ehs+pTB+IIC+o5veDbPn+BYgDMJ2x7Osbn1/gFSDken/yoOFbYbRMGMfVEQYjJzC4r/qCKH0bl/xuVNLxf9FkWSTCcQFKGOndwuGITDkshD4r2Kk8gUddXPxoahBv33/2QH0CY5zbKYjhgN6I6WtwO+O1uJwtNeV1AGhYjurdd60qggNwx+W7623uK3nIXvJd3hzDO8u5oa53/tIL fake-test-key-REMOVE-ME"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"id": "etcd-aws",
|
||||
"name": "etcd Node",
|
||||
"profile": "etcd-aws",
|
||||
"selector": {
|
||||
"name": "etcd",
|
||||
"platform": "aws"
|
||||
},
|
||||
"metadata": {
|
||||
"etcd_discovery": "token from https://discovery.etcd.io/new?size=N",
|
||||
"ssh_authorized_keys": [
|
||||
"ssh-rsa pub-key-goes-here"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "default",
|
||||
"profile": "etcd-proxy",
|
||||
"metadata": {
|
||||
"etcd_initial_cluster": "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380",
|
||||
"fleet_metadata": "role=etcd-proxy"
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"id": "node1",
|
||||
"name": "etcd Node 1",
|
||||
"profile": "etcd",
|
||||
"selector": {
|
||||
"mac": "52:54:00:a1:9c:ae"
|
||||
},
|
||||
"metadata": {
|
||||
"etcd_initial_cluster": "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380",
|
||||
"etcd_name": "node1",
|
||||
"fleet_metadata": "role=etcd,name=node1",
|
||||
"ipv4_address": "172.17.0.21",
|
||||
"networkd_address": "172.17.0.21/16",
|
||||
"networkd_dns": "172.17.0.3",
|
||||
"networkd_gateway": "172.17.0.1"
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"id": "node2",
|
||||
"name": "etcd Node 2",
|
||||
"profile": "etcd",
|
||||
"selector": {
|
||||
"mac": "52:54:00:b2:2f:86"
|
||||
},
|
||||
"metadata": {
|
||||
"etcd_initial_cluster": "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380",
|
||||
"etcd_name": "node2",
|
||||
"fleet_metadata": "role=etcd,name=node2",
|
||||
"ipv4_address": "172.17.0.22",
|
||||
"networkd_address": "172.17.0.22/16",
|
||||
"networkd_dns": "172.17.0.3",
|
||||
"networkd_gateway": "172.17.0.1"
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"id": "node3",
|
||||
"name": "etcd Node 3",
|
||||
"profile": "etcd",
|
||||
"selector": {
|
||||
"mac": "52:54:00:c3:61:77"
|
||||
},
|
||||
"metadata": {
|
||||
"etcd_initial_cluster": "node1=http://172.17.0.21:2380,node2=http://172.17.0.22:2380,node3=http://172.17.0.23:2380",
|
||||
"etcd_name": "node3",
|
||||
"fleet_metadata": "role=etcd,name=node3",
|
||||
"ipv4_address": "172.17.0.23",
|
||||
"networkd_address": "172.17.0.23/16",
|
||||
"networkd_dns": "172.17.0.3",
|
||||
"networkd_gateway": "172.17.0.1"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"id": "coreos-install",
|
||||
"name": "CoreOS Install",
|
||||
"profile": "install-reboot",
|
||||
"metadata": {
|
||||
"coreos_channel": "alpha",
|
||||
"coreos_version": "1053.2.0",
|
||||
"ignition_endpoint": "http://bootcfg.foo:8080/ignition",
|
||||
"baseurl": "http://bootcfg.foo:8080/assets/coreos"
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"id": "node1",
|
||||
"name": "etcd Node 1",
|
||||
"profile": "etcd",
|
||||
"selector": {
|
||||
"mac": "52:54:00:a1:9c:ae",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"ipv4_address": "172.15.0.21",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_address": "172.15.0.21/16",
|
||||
"fleet_metadata": "role=etcd,name=node1",
|
||||
"etcd_name": "node1",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"id": "node2",
|
||||
"name": "etcd Node 2",
|
||||
"profile": "etcd",
|
||||
"selector": {
|
||||
"mac": "52:54:00:b2:2f:86",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"ipv4_address": "172.15.0.22",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_address": "172.15.0.22/16",
|
||||
"fleet_metadata": "role=etcd,name=node2",
|
||||
"etcd_name": "node2",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"id": "node3",
|
||||
"name": "etcd Node 3",
|
||||
"profile": "etcd",
|
||||
"selector": {
|
||||
"mac": "52:54:00:c3:61:77",
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"ipv4_address": "172.15.0.23",
|
||||
"networkd_gateway": "172.15.0.1",
|
||||
"networkd_dns": "172.15.0.3",
|
||||
"networkd_address": "172.15.0.23/16",
|
||||
"fleet_metadata": "role=etcd,name=node3",
|
||||
"etcd_name": "node3",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"id": "etcd-proxies",
|
||||
"name": "etcd Proxy",
|
||||
"profile": "etcd-proxy",
|
||||
"selector": {
|
||||
"os": "installed"
|
||||
},
|
||||
"metadata": {
|
||||
"fleet_metadata": "role=etcd-proxy",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
{
|
||||
"id": "default",
|
||||
"name": "default",
|
||||
"profile": "etcd-proxy",
|
||||
"metadata": {
|
||||
"fleet_metadata": "role=etcd-proxy",
|
||||
"etcd_initial_cluster": "node1=http://172.15.0.21:2380,node2=http://172.15.0.22:2380,node3=http://172.15.0.23:2380"
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user