mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
704 Commits
fix/intege
...
tym83-patc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
14aba9edb2 | ||
|
|
28c9fcd61c | ||
|
|
a010fde4b0 | ||
|
|
379e0da6d2 | ||
|
|
a60dff1215 | ||
|
|
a5896be36a | ||
|
|
9022b8bda8 | ||
|
|
190f94c485 | ||
|
|
72e7b5e0b5 | ||
|
|
def5a612c6 | ||
|
|
725f94f347 | ||
|
|
a0b1914972 | ||
|
|
bb907e5e7d | ||
|
|
909208baec | ||
|
|
7abca1bdf5 | ||
|
|
4728127253 | ||
|
|
d919dcc05a | ||
|
|
8a1929038b | ||
|
|
1d6b9a025a | ||
|
|
3475cdb17a | ||
|
|
181e8dce28 | ||
|
|
38f76f6ad0 | ||
|
|
2c2b44e8fd | ||
|
|
5199021b8d | ||
|
|
f2a8c3d0d1 | ||
|
|
5b6ebbc796 | ||
|
|
7b87d555e4 | ||
|
|
e5cde60311 | ||
|
|
d0fba985e2 | ||
|
|
7d5ab78b84 | ||
|
|
493ad821c1 | ||
|
|
c01462d3f9 | ||
|
|
bccf6113cc | ||
|
|
a862d41aa4 | ||
|
|
096227d025 | ||
|
|
4d62961c89 | ||
|
|
2466a0ae6c | ||
|
|
8042c85bca | ||
|
|
79f7300474 | ||
|
|
7a74936d6b | ||
|
|
c5d3fe9aaa | ||
|
|
d201e03d5e | ||
|
|
168a9ae7f4 | ||
|
|
c664d4550f | ||
|
|
19b79b7ca4 | ||
|
|
0de9a0a262 | ||
|
|
edc9995832 | ||
|
|
6023dffd6d | ||
|
|
6fdde29723 | ||
|
|
d63aac727c | ||
|
|
7b9a19c94b | ||
|
|
f78ab1c867 | ||
|
|
7c918125e5 | ||
|
|
d3f1dca1ad | ||
|
|
259a2f5cab | ||
|
|
c7376ef3c9 | ||
|
|
7a619d8b04 | ||
|
|
c58aa798a4 | ||
|
|
378e6e018e | ||
|
|
55cfdb3a38 | ||
|
|
83e0ab3adf | ||
|
|
cc2b36fbe0 | ||
|
|
76c8de7f4d | ||
|
|
c1a4a58500 | ||
|
|
1faf40cd81 | ||
|
|
1b7a597f1c | ||
|
|
aa84b1c054 | ||
|
|
8b0fc77202 | ||
|
|
6e96dd0a33 | ||
|
|
adc2c17c38 | ||
|
|
56f230391d | ||
|
|
08cb7c0f28 | ||
|
|
ef30e69245 | ||
|
|
847980f03d | ||
|
|
999faa7f66 | ||
|
|
0ecb8585bc | ||
|
|
32aea4254b | ||
|
|
e49918745e | ||
|
|
220c347cc5 | ||
|
|
a4ec46a941 | ||
|
|
2c126786b3 | ||
|
|
784f1454ba | ||
|
|
9d9226b575 | ||
|
|
9ec5863a75 | ||
|
|
50f3089f14 | ||
|
|
1aadefef75 | ||
|
|
5727110542 | ||
|
|
f2fffb03e4 | ||
|
|
ab5eae3fbc | ||
|
|
38cf5fd58c | ||
|
|
cda554b58c | ||
|
|
a73794d751 | ||
|
|
81a412517c | ||
|
|
23a7281fbf | ||
|
|
f32c6426a9 | ||
|
|
91583a4e1a | ||
|
|
f628e7d9c7 | ||
|
|
68d1646ae7 | ||
|
|
8fde834e39 | ||
|
|
e99d238647 | ||
|
|
e9435c2d3d | ||
|
|
da3ee5d0ea | ||
|
|
411a465b14 | ||
|
|
cad57cd922 | ||
|
|
fe1776b4c8 | ||
|
|
d9779d55ea | ||
|
|
74d3c89235 | ||
|
|
9af6ce25bc | ||
|
|
c831f53444 | ||
|
|
2c68eee9f8 | ||
|
|
e6ffb4f4e5 | ||
|
|
e63cc1890e | ||
|
|
1079472a2a | ||
|
|
e70dfdec31 | ||
|
|
08c0eecbc5 | ||
|
|
1609931e3f | ||
|
|
699d38d8b9 | ||
|
|
acd4663aee | ||
|
|
f251cba363 | ||
|
|
91a07dcda6 | ||
|
|
99552bf792 | ||
|
|
45031055f8 | ||
|
|
d200017f74 | ||
|
|
f6eaca3843 | ||
|
|
8d3324f958 | ||
|
|
dd16b8f27f | ||
|
|
70f8266767 | ||
|
|
a9674d2ae7 | ||
|
|
cb6a55bc4a | ||
|
|
3ecbaf23a4 | ||
|
|
946fad8bb8 | ||
|
|
f1d86e5045 | ||
|
|
9adcd48c44 | ||
|
|
fb82bfae11 | ||
|
|
bd9e283d3b | ||
|
|
d2126b6703 | ||
|
|
73fe621da1 | ||
|
|
0b7bbb1ba9 | ||
|
|
bb46aa4b7d | ||
|
|
6256e40169 | ||
|
|
22cda073b9 | ||
|
|
0d46393e8c | ||
|
|
193f43d7bb | ||
|
|
8ec882ca5f | ||
|
|
c596805b60 | ||
|
|
f891d0bee6 | ||
|
|
1f748d563f | ||
|
|
210f3c7b6b | ||
|
|
433bfe7b6c | ||
|
|
fa6442998a | ||
|
|
6d06d3b1fb | ||
|
|
4c347cc026 | ||
|
|
986de717f1 | ||
|
|
d38c8aa5ab | ||
|
|
7f9f850b47 | ||
|
|
ca772fae2e | ||
|
|
fb831c05c0 | ||
|
|
f7f8020b9b | ||
|
|
98194a7414 | ||
|
|
70c7978306 | ||
|
|
d5521df9bd | ||
|
|
6ed1243f86 | ||
|
|
d1275ecd08 | ||
|
|
6c9d8bb47f | ||
|
|
1f240387f9 | ||
|
|
1d3964352e | ||
|
|
512277fa93 | ||
|
|
cd7fec68fc | ||
|
|
d12d07fd5c | ||
|
|
00bd212886 | ||
|
|
d19d6b58d0 | ||
|
|
f953db50da | ||
|
|
55e11fcc7b | ||
|
|
12184bc2b9 | ||
|
|
39daa3a38a | ||
|
|
a5ff9bf65b | ||
|
|
036fa6f888 | ||
|
|
792f6b4af8 | ||
|
|
52714f5cce | ||
|
|
bc54bd7bb0 | ||
|
|
0b85a52bee | ||
|
|
b3a2bc85e3 | ||
|
|
d097433266 | ||
|
|
2d294f0546 | ||
|
|
78b4d06b25 | ||
|
|
ae90969b7e | ||
|
|
6732205b24 | ||
|
|
60dee45a61 | ||
|
|
70cd3ce3e7 | ||
|
|
9dc21c6c2d | ||
|
|
4648c7b4c1 | ||
|
|
6a080fbf5d | ||
|
|
72f40f32ad | ||
|
|
cfc8c269f3 | ||
|
|
1da45ff039 | ||
|
|
c6ee006d6b | ||
|
|
848abc4bd1 | ||
|
|
4369b03141 | ||
|
|
baefc78bfe | ||
|
|
1db08d0b73 | ||
|
|
b2ed7525cd | ||
|
|
4f11814551 | ||
|
|
307b5617f0 | ||
|
|
7cf0ce1abf | ||
|
|
5602e9753f | ||
|
|
ab20502b37 | ||
|
|
8369fcddbf | ||
|
|
9f9ca50dd9 | ||
|
|
e7681debe2 | ||
|
|
36b10341ca | ||
|
|
0c234e400b | ||
|
|
c0b7f4e938 | ||
|
|
654778a0c7 | ||
|
|
86fdb51236 | ||
|
|
e8b83fbbda | ||
|
|
29f26f4dd0 | ||
|
|
a0526be17d | ||
|
|
4e41c133b4 | ||
|
|
587904e8cc | ||
|
|
6358fd7a45 | ||
|
|
af595f34dc | ||
|
|
2832058036 | ||
|
|
b9d3b43c3e | ||
|
|
bd0bc64c2a | ||
|
|
2dd62f052e | ||
|
|
778577e0d5 | ||
|
|
8568b9925f | ||
|
|
46ad1b1cd8 | ||
|
|
066ed77918 | ||
|
|
c7be1a5572 | ||
|
|
439e927f6b | ||
|
|
c354d5adc6 | ||
|
|
5ffe11dfc6 | ||
|
|
37a8bfaa06 | ||
|
|
0b03768482 | ||
|
|
620d626887 | ||
|
|
4e2a081c8b | ||
|
|
fa09845ef9 | ||
|
|
a2a79cb5d9 | ||
|
|
7f7cb019e6 | ||
|
|
ba74f397f5 | ||
|
|
7c45335abb | ||
|
|
ae13b58d5f | ||
|
|
3c7f7d1127 | ||
|
|
f0fc3238ca | ||
|
|
b3380d8365 | ||
|
|
d97d6cb81d | ||
|
|
b2a697f98d | ||
|
|
6e6a05d11e | ||
|
|
5d76294ff0 | ||
|
|
62a6da0063 | ||
|
|
6a8530a00a | ||
|
|
b3b40dcf9c | ||
|
|
4479ed5e95 | ||
|
|
b16e73ad42 | ||
|
|
4631f85114 | ||
|
|
746641e523 | ||
|
|
e848dde422 | ||
|
|
3ce6dbe850 | ||
|
|
8d5007919f | ||
|
|
08e569918b | ||
|
|
6498000721 | ||
|
|
8486e6b3aa | ||
|
|
3f6b6798f4 | ||
|
|
c1b928b8ef | ||
|
|
c2e8fba483 | ||
|
|
62cb694d72 | ||
|
|
c619343aa2 | ||
|
|
75ad26989d | ||
|
|
c4fc8c18df | ||
|
|
8663dc940f | ||
|
|
cf983a8f9c | ||
|
|
ad6aa0ca94 | ||
|
|
9dc5d62f47 | ||
|
|
3b8a9f9d2c | ||
|
|
ab9926a177 | ||
|
|
f83741eb09 | ||
|
|
028f2e4e8d | ||
|
|
255fa8cbe1 | ||
|
|
b42f5cdc01 | ||
|
|
74633ad699 | ||
|
|
980185ca2b | ||
|
|
8eabe30548 | ||
|
|
0c9c688e6d | ||
|
|
908c75927e | ||
|
|
0a1f078384 | ||
|
|
6a713e5eb4 | ||
|
|
8f0a28bad5 | ||
|
|
0fa70d9d38 | ||
|
|
b14c82d606 | ||
|
|
8e79f24c5b | ||
|
|
3266a5514e | ||
|
|
0c37323a15 | ||
|
|
10af98e158 | ||
|
|
632224a30a | ||
|
|
e8d11e64a6 | ||
|
|
27c7a2feb5 | ||
|
|
9555386bd7 | ||
|
|
9733de38a3 | ||
|
|
775a05cc3a | ||
|
|
4e5cc2ae61 | ||
|
|
32adf5ab38 | ||
|
|
28302e776e | ||
|
|
911ca64de0 | ||
|
|
045ea76539 | ||
|
|
cee820e82c | ||
|
|
6183b715b7 | ||
|
|
2669ab6072 | ||
|
|
96506c7cce | ||
|
|
7bb70c839e | ||
|
|
ba97a4593c | ||
|
|
c467ed798a | ||
|
|
ed881f0741 | ||
|
|
0e0dabdd08 | ||
|
|
bd8f8bde95 | ||
|
|
646dab497c | ||
|
|
dc3b61d164 | ||
|
|
4479a038cd | ||
|
|
dfd01ff118 | ||
|
|
d2bb66db31 | ||
|
|
7af97e2d9f | ||
|
|
ac5145be87 | ||
|
|
4779db2dda | ||
|
|
25c2774bc8 | ||
|
|
bbee8103eb | ||
|
|
730ea4d5ef | ||
|
|
13fccdc465 | ||
|
|
f1b66c80f6 | ||
|
|
f34f140d49 | ||
|
|
520fbfb2e4 | ||
|
|
25016580c1 | ||
|
|
f10f8455fc | ||
|
|
974581d39b | ||
|
|
7e24297913 | ||
|
|
b6142cd4f5 | ||
|
|
e87994c769 | ||
|
|
b140f1b57f | ||
|
|
64936021d2 | ||
|
|
a887e19e6c | ||
|
|
92b97a569e | ||
|
|
0e22358b30 | ||
|
|
7429daf99c | ||
|
|
b470b82e2a | ||
|
|
a0700e7399 | ||
|
|
228e1983bc | ||
|
|
7023abdba7 | ||
|
|
1b43a5f160 | ||
|
|
20f4066c16 | ||
|
|
ea0dd68e84 | ||
|
|
e0c3d2324f | ||
|
|
cb303d694c | ||
|
|
6130f43d06 | ||
|
|
4db55ac5eb | ||
|
|
bfd20a5e0e | ||
|
|
977141bed3 | ||
|
|
c4f8d6a251 | ||
|
|
9633ca4d25 | ||
|
|
f798cbd9f9 | ||
|
|
cf87779f7b | ||
|
|
c69135e0e5 | ||
|
|
a9c3a4c601 | ||
|
|
d1081c86b3 | ||
|
|
beadc80778 | ||
|
|
5bbb5a6266 | ||
|
|
0664370218 | ||
|
|
225d103509 | ||
|
|
0e22e3c12c | ||
|
|
7b8e7e40ce | ||
|
|
c941e487fb | ||
|
|
8386e985f2 | ||
|
|
e4c944488f | ||
|
|
99a7754c00 | ||
|
|
6cbfab9b2a | ||
|
|
461f756c88 | ||
|
|
50932ba49e | ||
|
|
f9f8bb2f11 | ||
|
|
2ae8f2aa19 | ||
|
|
1a872ca95c | ||
|
|
3e379e9697 | ||
|
|
7746974644 | ||
|
|
d989a8865d | ||
|
|
4aad0fc8f2 | ||
|
|
0e5ac5ed7c | ||
|
|
c267c7eb9a | ||
|
|
7792e29065 | ||
|
|
d35ff17de8 | ||
|
|
3a7d4c24ee | ||
|
|
ff2638ef66 | ||
|
|
bc294a0fe6 | ||
|
|
bf5bccb7d9 | ||
|
|
f00364037e | ||
|
|
e83bf379ba | ||
|
|
ae0549f78b | ||
|
|
74e7e5cdfb | ||
|
|
2bf4032d5b | ||
|
|
ee1763cb85 | ||
|
|
d497be9e95 | ||
|
|
6176a18a12 | ||
|
|
5789f12f3f | ||
|
|
6279873a35 | ||
|
|
79c441acb7 | ||
|
|
7864811016 | ||
|
|
13938f34fd | ||
|
|
9fb6b41e03 | ||
|
|
a8ba6b1328 | ||
|
|
9592f7fe46 | ||
|
|
119d582379 | ||
|
|
451267310b | ||
|
|
0fee3f280b | ||
|
|
2461fcd531 | ||
|
|
866b6e0a5a | ||
|
|
1dccf96506 | ||
|
|
bca27dcfdc | ||
|
|
5407ee01ee | ||
|
|
fc8b52d73d | ||
|
|
609e7ede86 | ||
|
|
289853e661 | ||
|
|
013e1936ec | ||
|
|
893818f64e | ||
|
|
56bdaae2c9 | ||
|
|
775ecb7b11 | ||
|
|
8d74a35e9c | ||
|
|
b753fd9fa8 | ||
|
|
9698f3c9f4 | ||
|
|
31b110cd39 | ||
|
|
b4da00f96f | ||
|
|
0369852035 | ||
|
|
115497b73f | ||
|
|
4f78b133c2 | ||
|
|
d550a67f19 | ||
|
|
8e6941dfbd | ||
|
|
c54567ab45 | ||
|
|
dd592ca676 | ||
|
|
5273722769 | ||
|
|
fb26e3e9b7 | ||
|
|
5e0b0167fc | ||
|
|
73fdc5ded7 | ||
|
|
5fe7b3bf16 | ||
|
|
4ecf492cd4 | ||
|
|
c42a50229f | ||
|
|
6f55a66328 | ||
|
|
9d551cc69b | ||
|
|
93b8dbb9ab | ||
|
|
8ad010d331 | ||
|
|
404579c361 | ||
|
|
f8210cf276 | ||
|
|
545e256695 | ||
|
|
e9c463c867 | ||
|
|
798ca12e43 | ||
|
|
3780925a68 | ||
|
|
a240c0b6ed | ||
|
|
de1b38c64b | ||
|
|
15d7b6d99e | ||
|
|
9377f55000 | ||
|
|
d002879b0b | ||
|
|
2c6338a2ef | ||
|
|
fd72d7c486 | ||
|
|
db34f31175 | ||
|
|
653e2bc774 | ||
|
|
31ea5eeeb2 | ||
|
|
4a2c67e045 | ||
|
|
68fb7570f7 | ||
|
|
56fc08fab4 | ||
|
|
b00ba53171 | ||
|
|
4dd52290ea | ||
|
|
492aff5265 | ||
|
|
395cdc3af1 | ||
|
|
e6f3000b3c | ||
|
|
e21c38c103 | ||
|
|
7a7512da30 | ||
|
|
58b5f6610d | ||
|
|
e81053f7dd | ||
|
|
424aab4a83 | ||
|
|
77e6db3381 | ||
|
|
f6e3188ab8 | ||
|
|
1ca0594060 | ||
|
|
ac59b4540b | ||
|
|
d0bd4b1329 | ||
|
|
ccbcaf6331 | ||
|
|
1ad1b15a5b | ||
|
|
2349ff61c1 | ||
|
|
13139dd71d | ||
|
|
57ac614865 | ||
|
|
bbb93c647d | ||
|
|
951ba75d93 | ||
|
|
15c9c4a068 | ||
|
|
57fefde732 | ||
|
|
b4a04df6f3 | ||
|
|
1e63b5e8ce | ||
|
|
6ad30915eb | ||
|
|
557ffa536f | ||
|
|
ae05d2f545 | ||
|
|
563c643813 | ||
|
|
68c85ac9ef | ||
|
|
3ac00ea4ec | ||
|
|
29b49496f2 | ||
|
|
3c27192d3e | ||
|
|
dca732cde0 | ||
|
|
0346dc05bb | ||
|
|
a03cdeff04 | ||
|
|
062d72805a | ||
|
|
70fed8148d | ||
|
|
12c6df83f5 | ||
|
|
f61a7817e6 | ||
|
|
c482289b14 | ||
|
|
1e59e5fbb6 | ||
|
|
6106a9fe51 | ||
|
|
ec9e26c054 | ||
|
|
108fc647ea | ||
|
|
a9b235048d | ||
|
|
e1c14619d2 | ||
|
|
f644bf20c5 | ||
|
|
93bdf41144 | ||
|
|
bacf15f037 | ||
|
|
9239852ec8 | ||
|
|
87a286fc74 | ||
|
|
6d253b937b | ||
|
|
255176c321 | ||
|
|
fa341deaac | ||
|
|
f08566d3f1 | ||
|
|
a29040faf7 | ||
|
|
637551eb33 | ||
|
|
58d959b305 | ||
|
|
fcc7056e5c | ||
|
|
5d7e56bffe | ||
|
|
69b3ddf717 | ||
|
|
79b5c6b5af | ||
|
|
076128c783 | ||
|
|
894cb14d49 | ||
|
|
a0935e9ae4 | ||
|
|
f2c248acbd | ||
|
|
590f14a614 | ||
|
|
4c8dba880a | ||
|
|
de0c7b94f4 | ||
|
|
2682a6e674 | ||
|
|
e3e0b21612 | ||
|
|
455d66fbe4 | ||
|
|
7db7277636 | ||
|
|
7be5db8cff | ||
|
|
249950d94b | ||
|
|
44565dca88 | ||
|
|
cefcd24ebb | ||
|
|
13d7df47d7 | ||
|
|
1ccd3074dc | ||
|
|
70d3591ed2 | ||
|
|
700991f4fa | ||
|
|
d89acbf44d | ||
|
|
59ef3296f0 | ||
|
|
3ed0cdee1c | ||
|
|
9f5230a342 | ||
|
|
b895ccfdeb | ||
|
|
d54a407d68 | ||
|
|
f9ec630509 | ||
|
|
3f47181c10 | ||
|
|
19409d801d | ||
|
|
8a4793d571 | ||
|
|
0fc3fdcb3d | ||
|
|
04e2b3952b | ||
|
|
b56624a781 | ||
|
|
07d7fadb1a | ||
|
|
8db92d53d1 | ||
|
|
7537235f43 | ||
|
|
4bb524e53d | ||
|
|
e7ded52f93 | ||
|
|
8547dc3b21 | ||
|
|
c22603bf7e | ||
|
|
89525dedb5 | ||
|
|
1c53a6f9f6 | ||
|
|
16ee0f2c3a | ||
|
|
72d0394475 | ||
|
|
0a998c8b49 | ||
|
|
7bfad655c2 | ||
|
|
e81cbf780c | ||
|
|
e8cc44450a | ||
|
|
d3a8a4a7de | ||
|
|
fc2c5a0f6b | ||
|
|
0f8b8e1744 | ||
|
|
197434ff94 | ||
|
|
703073a164 | ||
|
|
6a0fc64475 | ||
|
|
f1624353ef | ||
|
|
277b438f68 | ||
|
|
405863cb11 | ||
|
|
63ebab5c2a | ||
|
|
0ddaff9380 | ||
|
|
a6b02bf381 | ||
|
|
39ede77fec | ||
|
|
e505857832 | ||
|
|
d8f3547db7 | ||
|
|
6d8a99269b | ||
|
|
b9112a398e | ||
|
|
719fdd29cc | ||
|
|
9e1376f709 | ||
|
|
7a9a1fcba4 | ||
|
|
2def9f4e83 | ||
|
|
c1046aae6a | ||
|
|
53cf1c537c | ||
|
|
ccedcb7419 | ||
|
|
f94a01febd | ||
|
|
495e584313 | ||
|
|
172e660cd1 | ||
|
|
14262cdd2a | ||
|
|
80576cb757 | ||
|
|
fde6e9cc73 | ||
|
|
57ca60c5a5 | ||
|
|
1d0ee15948 | ||
|
|
eeaa1b4517 | ||
|
|
a14bcf98dd | ||
|
|
be84fc6e4e | ||
|
|
73a3f481bc | ||
|
|
5903bbc64a | ||
|
|
f204809e43 | ||
|
|
fe4806ce49 | ||
|
|
8f535acc3f | ||
|
|
53cbb4ae12 | ||
|
|
4e9446d934 | ||
|
|
acbfb6ad64 | ||
|
|
8570449080 | ||
|
|
ffe6109dfb | ||
|
|
7dbb8a1d75 | ||
|
|
86210c1fc1 | ||
|
|
e96f15773d | ||
|
|
bc5635dd8e | ||
|
|
5d71c90f0a | ||
|
|
05d6ab9516 | ||
|
|
ccb001ee97 | ||
|
|
5a5cf91742 | ||
|
|
6a0d4913f2 | ||
|
|
685e50bf6c | ||
|
|
f90fc6f681 | ||
|
|
d8f3f2dee1 | ||
|
|
da8100965f | ||
|
|
6d2ea1295e | ||
|
|
d7914ff9aa | ||
|
|
7f4af5ebbc | ||
|
|
8f575c455c | ||
|
|
819166eb35 | ||
|
|
f507802ec9 | ||
|
|
62267811cb | ||
|
|
12bedef2d3 | ||
|
|
fd240701f8 | ||
|
|
60b96e0a62 | ||
|
|
1d377bab9d | ||
|
|
799690dc07 | ||
|
|
aa02d0c5e6 | ||
|
|
6b8ecf3953 | ||
|
|
e5b81f367e | ||
|
|
ba4798464d | ||
|
|
655e8be382 | ||
|
|
fd9a5b0d7b | ||
|
|
d09314bbb5 | ||
|
|
371791215a | ||
|
|
4c220bb443 | ||
|
|
e27611d45a | ||
|
|
a3e647c547 | ||
|
|
be52fe5461 | ||
|
|
1d639fda0d | ||
|
|
bbdde79428 | ||
|
|
1966f86120 | ||
|
|
fa7c98bc38 | ||
|
|
6671869acc | ||
|
|
434c5d1b9c | ||
|
|
cc9abfe03f | ||
|
|
e02fd14a3c | ||
|
|
559eb8dea9 | ||
|
|
9e6478b9c9 | ||
|
|
3a295c4474 | ||
|
|
f8dfc43cae | ||
|
|
3e19bc74d4 | ||
|
|
2966922c0b | ||
|
|
991c7e1943 | ||
|
|
c31a7710ad | ||
|
|
f4cace093c | ||
|
|
01e417d436 | ||
|
|
261ce4278f | ||
|
|
785898b507 | ||
|
|
47a2cf7cd5 | ||
|
|
1f19793613 | ||
|
|
a0df2989af | ||
|
|
bdb538ab42 | ||
|
|
c844a4fb2b | ||
|
|
fea142774a | ||
|
|
4b575299bc | ||
|
|
4eec016f7d | ||
|
|
4078b21ac6 | ||
|
|
1721d397a7 | ||
|
|
558a0572f5 | ||
|
|
d60b81c8a0 | ||
|
|
cc14c1fbab | ||
|
|
80aee1354b | ||
|
|
332d69259b | ||
|
|
9ad6b0d726 | ||
|
|
ea9df9e371 | ||
|
|
d69a9c4862 | ||
|
|
6270a11bb1 | ||
|
|
18726483a6 | ||
|
|
aed184f6ef | ||
|
|
f688a57132 | ||
|
|
e954ab7f8b | ||
|
|
c9c8235c64 | ||
|
|
8e2e77da56 | ||
|
|
1e27dedde5 | ||
|
|
e947805c15 | ||
|
|
7a1c3b6209 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
|||||||
* @kvaps @lllamnyp
|
* @kvaps @lllamnyp @klinch0
|
||||||
|
|||||||
24
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
24
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
<!-- Thank you for making a contribution! Here are some tips for you:
|
||||||
|
- Start the PR title with the [label] of Cozystack component:
|
||||||
|
- For system components: [platform], [system], [linstor], [cilium], [kube-ovn], [dashboard], [cluster-api], etc.
|
||||||
|
- For managed apps: [apps], [tenant], [kubernetes], [postgres], [virtual-machine] etc.
|
||||||
|
- For development and maintenance: [tests], [ci], [docs], [maintenance].
|
||||||
|
- If it's a work in progress, consider creating this PR as a draft.
|
||||||
|
- Don't hesistate to ask for opinion and review in the community chats, even if it's still a draft.
|
||||||
|
- Add the label `backport` if it's a bugfix that needs to be backported to a previous version.
|
||||||
|
-->
|
||||||
|
|
||||||
|
## What this PR does
|
||||||
|
|
||||||
|
|
||||||
|
### Release note
|
||||||
|
|
||||||
|
<!-- Write a release note:
|
||||||
|
- Explain what has changed internally and for users.
|
||||||
|
- Start with the same [label] as in the PR title
|
||||||
|
- Follow the guidelines at https://github.com/kubernetes/community/blob/master/contributors/guide/release-notes.md.
|
||||||
|
-->
|
||||||
|
|
||||||
|
```release-note
|
||||||
|
[]
|
||||||
|
```
|
||||||
53
.github/workflows/backport.yaml
vendored
Normal file
53
.github/workflows/backport.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
name: Automatic Backport
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [closed] # fires when PR is closed (merged)
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: backport-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
backport:
|
||||||
|
if: |
|
||||||
|
github.event.pull_request.merged == true &&
|
||||||
|
contains(github.event.pull_request.labels.*.name, 'backport')
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# 1. Decide which maintenance branch should receive the back‑port
|
||||||
|
- name: Determine target maintenance branch
|
||||||
|
id: target
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
let rel;
|
||||||
|
try {
|
||||||
|
rel = await github.rest.repos.getLatestRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
} catch (e) {
|
||||||
|
core.setFailed('No existing releases found; cannot determine backport target.');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
|
||||||
|
const branch = `release-${maj}.${min}`;
|
||||||
|
core.setOutput('branch', branch);
|
||||||
|
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
|
||||||
|
# 2. Checkout (required by backport‑action)
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# 3. Create the back‑port pull request
|
||||||
|
- name: Create back‑port PR
|
||||||
|
uses: korthout/backport-action@v3
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
label_pattern: '' # don't read labels for targets
|
||||||
|
target_branches: ${{ steps.target.outputs.branch }}
|
||||||
21
.github/workflows/pre-commit.yml
vendored
21
.github/workflows/pre-commit.yml
vendored
@@ -1,6 +1,12 @@
|
|||||||
name: Pre-Commit Checks
|
name: Pre-Commit Checks
|
||||||
|
|
||||||
on: [push, pull_request]
|
on:
|
||||||
|
pull_request:
|
||||||
|
types: [opened, synchronize, reopened]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
pre-commit:
|
pre-commit:
|
||||||
@@ -8,6 +14,9 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
@@ -19,15 +28,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Install generate
|
- name: Install generate
|
||||||
run: |
|
run: |
|
||||||
sudo apt update
|
curl -sSL https://github.com/cozystack/readme-generator-for-helm/releases/download/v1.0.0/readme-generator-for-helm-linux-amd64.tar.gz | tar -xzvf- -C /usr/local/bin/ readme-generator-for-helm
|
||||||
sudo apt install curl -y
|
|
||||||
curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -
|
|
||||||
sudo apt install nodejs -y
|
|
||||||
git clone https://github.com/bitnami/readme-generator-for-helm
|
|
||||||
cd ./readme-generator-for-helm
|
|
||||||
npm install
|
|
||||||
npm install -g pkg
|
|
||||||
pkg . -o /usr/local/bin/readme-generator
|
|
||||||
|
|
||||||
- name: Run pre-commit hooks
|
- name: Run pre-commit hooks
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
195
.github/workflows/pull-requests-release.yaml
vendored
195
.github/workflows/pull-requests-release.yaml
vendored
@@ -1,76 +1,153 @@
|
|||||||
name: Releasing PR
|
name: "Releasing PR"
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [labeled, opened, synchronize, reopened, closed]
|
types: [closed]
|
||||||
|
paths-ignore:
|
||||||
|
- 'docs/**/*'
|
||||||
|
|
||||||
|
# Cancel in‑flight runs for the same PR when a new push arrives.
|
||||||
|
concurrency:
|
||||||
|
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
verify:
|
|
||||||
name: Test Release
|
|
||||||
runs-on: [self-hosted]
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
packages: write
|
|
||||||
|
|
||||||
if: |
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
|
||||||
contains(github.event.pull_request.labels.*.name, 'release') &&
|
|
||||||
github.event.action != 'closed'
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
fetch-tags: true
|
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
username: ${{ github.repository_owner }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
registry: ghcr.io
|
|
||||||
|
|
||||||
- name: Run tests
|
|
||||||
run: make test
|
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
name: Finalize Release
|
name: Finalize Release
|
||||||
runs-on: [self-hosted]
|
runs-on: [self-hosted]
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
if: |
|
if: |
|
||||||
github.event.pull_request.merged == true &&
|
github.event.pull_request.merged == true &&
|
||||||
contains(github.event.pull_request.labels.*.name, 'release')
|
contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
# Extract tag from branch name (branch = release-X.Y.Z*)
|
||||||
- name: Extract tag from branch name
|
- name: Extract tag from branch name
|
||||||
id: get_tag
|
id: get_tag
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
script: |
|
script: |
|
||||||
const branch = context.payload.pull_request.head.ref;
|
const branch = context.payload.pull_request.head.ref;
|
||||||
const match = branch.match(/^release-(v\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||||
|
if (!m) {
|
||||||
if (!match) {
|
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||||
core.setFailed(`Branch '${branch}' does not match expected format 'release-vX.Y.Z[-suffix]'`);
|
return;
|
||||||
} else {
|
|
||||||
const tag = match[1];
|
|
||||||
core.setOutput('tag', tag);
|
|
||||||
console.log(`✅ Extracted tag: ${tag}`);
|
|
||||||
}
|
}
|
||||||
|
const tag = `v${m[1]}`;
|
||||||
|
core.setOutput('tag', tag);
|
||||||
|
console.log(`✅ Tag to publish: ${tag}`);
|
||||||
|
|
||||||
|
# Checkout repo & create / push annotated tag
|
||||||
- name: Checkout repo
|
- name: Checkout repo
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Create tag on merged commit
|
- name: Create tag on merge commit
|
||||||
run: |
|
run: |
|
||||||
git tag ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||||
git push origin ${{ steps.get_tag.outputs.tag }}
|
git push -f origin ${{ steps.get_tag.outputs.tag }}
|
||||||
|
|
||||||
|
# Ensure maintenance branch release-X.Y
|
||||||
|
- name: Ensure maintenance branch release-X.Y
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GH_PAT }}
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
|
||||||
|
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
|
||||||
|
if (!match) {
|
||||||
|
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-suffix'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const line = `${match[1]}.${match[2]}`;
|
||||||
|
const branch = `release-${line}`;
|
||||||
|
|
||||||
|
// Get main branch commit for the tag
|
||||||
|
const ref = await github.rest.git.getRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: `tags/${tag}`
|
||||||
|
});
|
||||||
|
|
||||||
|
const commitSha = ref.data.object.sha;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await github.rest.repos.getBranch({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
branch
|
||||||
|
});
|
||||||
|
|
||||||
|
await github.rest.git.updateRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: `heads/${branch}`,
|
||||||
|
sha: commitSha,
|
||||||
|
force: true
|
||||||
|
});
|
||||||
|
console.log(`🔁 Force-updated '${branch}' to ${commitSha}`);
|
||||||
|
} catch (err) {
|
||||||
|
if (err.status === 404) {
|
||||||
|
await github.rest.git.createRef({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: `refs/heads/${branch}`,
|
||||||
|
sha: commitSha
|
||||||
|
});
|
||||||
|
console.log(`✅ Created branch '${branch}' at ${commitSha}`);
|
||||||
|
} else {
|
||||||
|
console.error('Unexpected error --', err);
|
||||||
|
core.setFailed(`Unexpected error creating/updating branch: ${err.message}`);
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get the latest published release
|
||||||
|
- name: Get the latest published release
|
||||||
|
id: latest_release
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
try {
|
||||||
|
const rel = await github.rest.repos.getLatestRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
core.setOutput('tag', rel.data.tag_name);
|
||||||
|
} catch (_) {
|
||||||
|
core.setOutput('tag', '');
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compare current tag vs latest using semver-utils
|
||||||
|
- name: Semver compare
|
||||||
|
id: semver
|
||||||
|
uses: madhead/semver-utils@v4.3.0
|
||||||
|
with:
|
||||||
|
version: ${{ steps.get_tag.outputs.tag }}
|
||||||
|
compare-to: ${{ steps.latest_release.outputs.tag }}
|
||||||
|
|
||||||
|
# Derive flags: prerelease? make_latest?
|
||||||
|
- name: Calculate publish flags
|
||||||
|
id: flags
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1
|
||||||
|
const m = tag.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/);
|
||||||
|
if (!m) {
|
||||||
|
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
|
||||||
|
const isRc = Boolean(m[2]);
|
||||||
|
core.setOutput('is_rc', isRc);
|
||||||
|
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||||
|
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
|
||||||
|
|
||||||
|
# Publish draft release with correct flags
|
||||||
- name: Publish draft release
|
- name: Publish draft release
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
@@ -78,19 +155,17 @@ jobs:
|
|||||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||||
const releases = await github.rest.repos.listReleases({
|
const releases = await github.rest.repos.listReleases({
|
||||||
owner: context.repo.owner,
|
owner: context.repo.owner,
|
||||||
repo: context.repo.repo
|
repo: context.repo.repo
|
||||||
});
|
});
|
||||||
|
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||||
const release = releases.data.find(r => r.tag_name === tag && r.draft);
|
if (!draft) throw new Error(`Draft release for ${tag} not found`);
|
||||||
if (!release) {
|
|
||||||
throw new Error(`Draft release with tag ${tag} not found`);
|
|
||||||
}
|
|
||||||
|
|
||||||
await github.rest.repos.updateRelease({
|
await github.rest.repos.updateRelease({
|
||||||
owner: context.repo.owner,
|
owner: context.repo.owner,
|
||||||
repo: context.repo.repo,
|
repo: context.repo.repo,
|
||||||
release_id: release.id,
|
release_id: draft.id,
|
||||||
draft: false
|
draft: false,
|
||||||
|
prerelease: ${{ steps.flags.outputs.is_rc }},
|
||||||
|
make_latest: '${{ steps.flags.outputs.make_latest }}'
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(`✅ Published release for ${tag}`);
|
console.log(`🚀 Published release for ${tag}`);
|
||||||
|
|||||||
330
.github/workflows/pull-requests.yaml
vendored
330
.github/workflows/pull-requests.yaml
vendored
@@ -2,18 +2,25 @@ name: Pull Request
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [labeled, opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
|
paths-ignore:
|
||||||
|
- 'docs/**/*'
|
||||||
|
|
||||||
|
# Cancel in‑flight runs for the same PR when a new push arrives.
|
||||||
|
concurrency:
|
||||||
|
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
e2e:
|
build:
|
||||||
name: Build and Test
|
name: Build
|
||||||
runs-on: [self-hosted]
|
runs-on: [self-hosted]
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
|
# Never run when the PR carries the "release" label.
|
||||||
if: |
|
if: |
|
||||||
contains(github.event.pull_request.labels.*.name, 'ok-to-test') &&
|
|
||||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
@@ -29,11 +36,316 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
|
env:
|
||||||
|
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||||
|
|
||||||
- name: make build
|
- name: Build
|
||||||
run: |
|
run: make build
|
||||||
make build
|
env:
|
||||||
|
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||||
|
|
||||||
- name: make test
|
- name: Build Talos image
|
||||||
|
run: make -C packages/core/installer talos-nocloud
|
||||||
|
|
||||||
|
- name: Save git diff as patch
|
||||||
|
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||||
|
run: git diff HEAD > _out/assets/pr.patch
|
||||||
|
|
||||||
|
- name: Upload git diff patch
|
||||||
|
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: pr-patch
|
||||||
|
path: _out/assets/pr.patch
|
||||||
|
|
||||||
|
- name: Upload installer
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: cozystack-installer
|
||||||
|
path: _out/assets/cozystack-installer.yaml
|
||||||
|
|
||||||
|
- name: Upload Talos image
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: talos-image
|
||||||
|
path: _out/assets/nocloud-amd64.raw.xz
|
||||||
|
|
||||||
|
resolve_assets:
|
||||||
|
name: "Resolve assets"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
outputs:
|
||||||
|
installer_id: ${{ steps.fetch_assets.outputs.installer_id }}
|
||||||
|
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Extract tag from PR branch (release PR)
|
||||||
|
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
id: get_tag
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const branch = context.payload.pull_request.head.ref;
|
||||||
|
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||||
|
if (!m) {
|
||||||
|
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
core.setOutput('tag', `v${m[1]}`);
|
||||||
|
|
||||||
|
- name: Find draft release & asset IDs (release PR)
|
||||||
|
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
id: fetch_assets
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
github-token: ${{ secrets.GH_PAT }}
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||||
|
const releases = await github.rest.repos.listReleases({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
per_page: 100
|
||||||
|
});
|
||||||
|
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||||
|
if (!draft) {
|
||||||
|
core.setFailed(`Draft release '${tag}' not found`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const find = (n) => draft.assets.find(a => a.name === n)?.id;
|
||||||
|
const installerId = find('cozystack-installer.yaml');
|
||||||
|
const diskId = find('nocloud-amd64.raw.xz');
|
||||||
|
if (!installerId || !diskId) {
|
||||||
|
core.setFailed('Required assets missing in draft release');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
core.setOutput('installer_id', installerId);
|
||||||
|
core.setOutput('disk_id', diskId);
|
||||||
|
|
||||||
|
|
||||||
|
prepare_env:
|
||||||
|
name: "Prepare environment"
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: read
|
||||||
|
needs: ["build", "resolve_assets"]
|
||||||
|
if: ${{ always() && (needs.build.result == 'success' || needs.resolve_assets.result == 'success') }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
# ▸ Checkout and prepare the codebase
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# ▸ Regular PR path – download artefacts produced by the *build* job
|
||||||
|
- name: "Download Talos image (regular PR)"
|
||||||
|
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: talos-image
|
||||||
|
path: _out/assets
|
||||||
|
|
||||||
|
- name: Download PR patch
|
||||||
|
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: pr-patch
|
||||||
|
path: _out/assets
|
||||||
|
|
||||||
|
- name: Apply patch
|
||||||
|
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||||
run: |
|
run: |
|
||||||
make test
|
git apply _out/assets/pr.patch
|
||||||
|
|
||||||
|
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
||||||
|
- name: Download assets from draft release (release PR)
|
||||||
|
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
run: |
|
||||||
|
mkdir -p _out/assets
|
||||||
|
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||||
|
-o _out/assets/nocloud-amd64.raw.xz \
|
||||||
|
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.disk_id }}"
|
||||||
|
env:
|
||||||
|
GH_PAT: ${{ secrets.GH_PAT }}
|
||||||
|
|
||||||
|
- name: Set sandbox ID
|
||||||
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
# ▸ Start actual job steps
|
||||||
|
- name: Prepare workspace
|
||||||
|
run: |
|
||||||
|
rm -rf /tmp/$SANDBOX_NAME
|
||||||
|
cp -r ${{ github.workspace }} /tmp/$SANDBOX_NAME
|
||||||
|
|
||||||
|
- name: Prepare environment
|
||||||
|
run: |
|
||||||
|
cd /tmp/$SANDBOX_NAME
|
||||||
|
attempt=0
|
||||||
|
until make SANDBOX_NAME=$SANDBOX_NAME prepare-env; do
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
if [ $attempt -ge 3 ]; then
|
||||||
|
echo "❌ Attempt $attempt failed, exiting..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "❌ Attempt $attempt failed, retrying..."
|
||||||
|
done
|
||||||
|
echo "✅ The task completed successfully after $attempt attempts"
|
||||||
|
|
||||||
|
install_cozystack:
|
||||||
|
name: "Install Cozystack"
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: read
|
||||||
|
needs: ["prepare_env", "resolve_assets"]
|
||||||
|
if: ${{ always() && needs.prepare_env.result == 'success' }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Prepare _out/assets directory
|
||||||
|
run: mkdir -p _out/assets
|
||||||
|
|
||||||
|
# ▸ Regular PR path – download artefacts produced by the *build* job
|
||||||
|
- name: "Download installer (regular PR)"
|
||||||
|
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: cozystack-installer
|
||||||
|
path: _out/assets
|
||||||
|
|
||||||
|
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
||||||
|
- name: Download assets from draft release (release PR)
|
||||||
|
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||||
|
run: |
|
||||||
|
mkdir -p _out/assets
|
||||||
|
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||||
|
-o _out/assets/cozystack-installer.yaml \
|
||||||
|
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.installer_id }}"
|
||||||
|
env:
|
||||||
|
GH_PAT: ${{ secrets.GH_PAT }}
|
||||||
|
|
||||||
|
# ▸ Start actual job steps
|
||||||
|
- name: Set sandbox ID
|
||||||
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Sync _out/assets directory
|
||||||
|
run: |
|
||||||
|
mkdir -p /tmp/$SANDBOX_NAME/_out/assets
|
||||||
|
mv _out/assets/* /tmp/$SANDBOX_NAME/_out/assets/
|
||||||
|
|
||||||
|
- name: Install Cozystack into sandbox
|
||||||
|
run: |
|
||||||
|
cd /tmp/$SANDBOX_NAME
|
||||||
|
attempt=0
|
||||||
|
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack; do
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
if [ $attempt -ge 3 ]; then
|
||||||
|
echo "❌ Attempt $attempt failed, exiting..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "❌ Attempt $attempt failed, retrying..."
|
||||||
|
done
|
||||||
|
echo "✅ The task completed successfully after $attempt attempts."
|
||||||
|
|
||||||
|
detect_test_matrix:
|
||||||
|
name: "Detect e2e test matrix"
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.set.outputs.matrix }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- id: set
|
||||||
|
run: |
|
||||||
|
apps=$(find hack/e2e-apps -maxdepth 1 -mindepth 1 -name '*.bats' | \
|
||||||
|
awk -F/ '{sub(/\..+/, "", $NF); print $NF}' | jq -R . | jq -cs .)
|
||||||
|
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
|
||||||
|
|
||||||
|
test_apps:
|
||||||
|
strategy:
|
||||||
|
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
|
||||||
|
name: Test ${{ matrix.app }}
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
needs: [install_cozystack,detect_test_matrix]
|
||||||
|
if: ${{ always() && (needs.install_cozystack.result == 'success' && needs.detect_test_matrix.result == 'success') }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Set sandbox ID
|
||||||
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: E2E Apps
|
||||||
|
run: |
|
||||||
|
cd /tmp/$SANDBOX_NAME
|
||||||
|
attempt=0
|
||||||
|
until make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-${{ matrix.app }}; do
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
if [ $attempt -ge 3 ]; then
|
||||||
|
echo "❌ Attempt $attempt failed, exiting..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "❌ Attempt $attempt failed, retrying..."
|
||||||
|
done
|
||||||
|
echo "✅ The task completed successfully after $attempt attempts"
|
||||||
|
|
||||||
|
collect_debug_information:
|
||||||
|
name: Collect debug information
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
needs: [test_apps]
|
||||||
|
if: ${{ always() }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set sandbox ID
|
||||||
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Collect report
|
||||||
|
run: |
|
||||||
|
cd /tmp/$SANDBOX_NAME
|
||||||
|
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-report
|
||||||
|
|
||||||
|
- name: Upload cozyreport.tgz
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: cozyreport
|
||||||
|
path: /tmp/${{ env.SANDBOX_NAME }}/_out/cozyreport.tgz
|
||||||
|
|
||||||
|
- name: Collect images list
|
||||||
|
run: |
|
||||||
|
cd /tmp/$SANDBOX_NAME
|
||||||
|
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME collect-images
|
||||||
|
|
||||||
|
- name: Upload image list
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: image-list
|
||||||
|
path: /tmp/${{ env.SANDBOX_NAME }}/_out/images.txt
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
name: Tear down environment
|
||||||
|
runs-on: [self-hosted]
|
||||||
|
needs: [collect_debug_information]
|
||||||
|
if: ${{ always() && needs.test_apps.result == 'success' }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
fetch-tags: true
|
||||||
|
|
||||||
|
- name: Set sandbox ID
|
||||||
|
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Tear down sandbox
|
||||||
|
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
|
||||||
|
|
||||||
|
- name: Remove workspace
|
||||||
|
run: rm -rf /tmp/$SANDBOX_NAME
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
247
.github/workflows/tags.yaml
vendored
247
.github/workflows/tags.yaml
vendored
@@ -3,7 +3,14 @@ name: Versioned Tag
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- 'v*.*.*'
|
- 'v*.*.*' # vX.Y.Z
|
||||||
|
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
|
||||||
|
- 'v*.*.*-beta.*' # vX.Y.Z-beta.N
|
||||||
|
- 'v*.*.*-alpha.*' # vX.Y.Z-alpha.N
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: tags-${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
prepare-release:
|
prepare-release:
|
||||||
@@ -13,8 +20,10 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
packages: write
|
packages: write
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
|
actions: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
# Check if a non-draft release with this tag already exists
|
||||||
- name: Check if release already exists
|
- name: Check if release already exists
|
||||||
id: check_release
|
id: check_release
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
@@ -23,137 +32,211 @@ jobs:
|
|||||||
const tag = context.ref.replace('refs/tags/', '');
|
const tag = context.ref.replace('refs/tags/', '');
|
||||||
const releases = await github.rest.repos.listReleases({
|
const releases = await github.rest.repos.listReleases({
|
||||||
owner: context.repo.owner,
|
owner: context.repo.owner,
|
||||||
repo: context.repo.repo
|
repo: context.repo.repo
|
||||||
});
|
});
|
||||||
|
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
|
||||||
|
core.setOutput('skip', exists);
|
||||||
|
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
|
||||||
|
|
||||||
const existing = releases.data.find(r => r.tag_name === tag && !r.draft);
|
# If a published release already exists, skip the rest of the workflow
|
||||||
if (existing) {
|
|
||||||
core.setOutput('skip', 'true');
|
|
||||||
} else {
|
|
||||||
core.setOutput('skip', 'false');
|
|
||||||
}
|
|
||||||
|
|
||||||
- name: Skip if release already exists
|
- name: Skip if release already exists
|
||||||
if: steps.check_release.outputs.skip == 'true'
|
if: steps.check_release.outputs.skip == 'true'
|
||||||
run: echo "Release already exists, skipping workflow."
|
run: echo "Release already exists, skipping workflow."
|
||||||
|
|
||||||
|
# Parse tag meta-data (rc?, maintenance line, etc.)
|
||||||
|
- name: Parse tag
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: tag
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1
|
||||||
|
const m = ref.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/); // ['0.31.5', '-rc.1' | '-beta.1' | …]
|
||||||
|
if (!m) {
|
||||||
|
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
|
||||||
|
const isRc = Boolean(m[2]);
|
||||||
|
const [maj, min] = m[1].split('.');
|
||||||
|
core.setOutput('tag', ref); // v0.31.5-rc.1
|
||||||
|
core.setOutput('version', version); // 0.31.5-rc.1
|
||||||
|
core.setOutput('is_rc', isRc); // true
|
||||||
|
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||||
|
|
||||||
|
# Detect base branch (main or release-X.Y) the tag was pushed from
|
||||||
|
- name: Get base branch
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: get_base
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const baseRef = context.payload.base_ref;
|
||||||
|
if (!baseRef) {
|
||||||
|
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const branch = baseRef.replace('refs/heads/', '');
|
||||||
|
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
|
||||||
|
if (!ok) {
|
||||||
|
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
core.setOutput('branch', branch);
|
||||||
|
|
||||||
|
# Checkout & login once
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
fetch-tags: true
|
fetch-tags: true
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
- name: Login to GHCR
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
|
env:
|
||||||
|
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||||
|
|
||||||
|
# Build project artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
run: make build
|
run: make build
|
||||||
|
env:
|
||||||
|
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||||
|
|
||||||
|
# Commit built artifacts
|
||||||
- name: Commit release artifacts
|
- name: Commit release artifacts
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
env:
|
env:
|
||||||
GIT_AUTHOR_NAME: ${{ github.actor }}
|
GH_PAT: ${{ secrets.GH_PAT }}
|
||||||
GIT_AUTHOR_EMAIL: ${{ github.actor }}@users.noreply.github.com
|
|
||||||
run: |
|
run: |
|
||||||
git config user.name "$GIT_AUTHOR_NAME"
|
git config user.name "cozystack-bot"
|
||||||
git config user.email "$GIT_AUTHOR_EMAIL"
|
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
|
||||||
|
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
|
||||||
|
git config --unset-all http.https://github.com/.extraheader || true
|
||||||
git add .
|
git add .
|
||||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||||
|
git push origin HEAD || true
|
||||||
|
|
||||||
- name: Create release branch
|
# Get `latest_version` from latest published release
|
||||||
|
- name: Get latest published release
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: latest_release
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
try {
|
||||||
|
const rel = await github.rest.repos.getLatestRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
core.setOutput('tag', rel.data.tag_name);
|
||||||
|
} catch (_) {
|
||||||
|
core.setOutput('tag', '');
|
||||||
|
}
|
||||||
|
|
||||||
|
# Compare tag (A) with latest (B)
|
||||||
|
- name: Semver compare
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: semver
|
||||||
|
uses: madhead/semver-utils@v4.3.0
|
||||||
|
with:
|
||||||
|
version: ${{ steps.tag.outputs.tag }} # A
|
||||||
|
compare-to: ${{ steps.latest_release.outputs.tag }} # B
|
||||||
|
|
||||||
|
# Create or reuse DRAFT GitHub Release
|
||||||
|
- name: Create / reuse draft release
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
id: release
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const tag = '${{ steps.tag.outputs.tag }}';
|
||||||
|
const isRc = ${{ steps.tag.outputs.is_rc }};
|
||||||
|
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||||
|
const makeLatest = outdated ? false : 'legacy';
|
||||||
|
const releases = await github.rest.repos.listReleases({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
let rel = releases.data.find(r => r.tag_name === tag);
|
||||||
|
if (!rel) {
|
||||||
|
rel = await github.rest.repos.createRelease({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
tag_name: tag,
|
||||||
|
name: tag,
|
||||||
|
draft: true,
|
||||||
|
prerelease: isRc,
|
||||||
|
make_latest: makeLatest
|
||||||
|
});
|
||||||
|
console.log(`Draft release created for ${tag}`);
|
||||||
|
} else {
|
||||||
|
console.log(`Re-using existing release ${tag}`);
|
||||||
|
}
|
||||||
|
core.setOutput('upload_url', rel.upload_url);
|
||||||
|
|
||||||
|
# Build + upload assets (optional)
|
||||||
|
- name: Build & upload assets
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
run: |
|
run: |
|
||||||
BRANCH_NAME="release-${GITHUB_REF#refs/tags/v}"
|
make assets
|
||||||
git branch -f "$BRANCH_NAME"
|
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
|
||||||
git push origin "$BRANCH_NAME" --force
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
# Create release-X.Y.Z branch and push (force-update)
|
||||||
|
- name: Create release branch
|
||||||
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
|
env:
|
||||||
|
GH_PAT: ${{ secrets.GH_PAT }}
|
||||||
|
run: |
|
||||||
|
git config user.name "cozystack-bot"
|
||||||
|
git config user.email "217169706+cozystack-bot@users.noreply.github.com"
|
||||||
|
git remote set-url origin https://cozystack-bot:${GH_PAT}@github.com/${GITHUB_REPOSITORY}
|
||||||
|
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||||
|
git branch -f "$BRANCH"
|
||||||
|
git push -f origin "$BRANCH"
|
||||||
|
|
||||||
|
# Create pull request into original base branch (if absent)
|
||||||
- name: Create pull request if not exists
|
- name: Create pull request if not exists
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
if: steps.check_release.outputs.skip == 'false'
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
with:
|
with:
|
||||||
|
github-token: ${{ secrets.GH_PAT }}
|
||||||
script: |
|
script: |
|
||||||
const version = context.ref.replace('refs/tags/v', '');
|
const version = context.ref.replace('refs/tags/v', '');
|
||||||
const branch = `release-${version}`;
|
const base = '${{ steps.get_base.outputs.branch }}';
|
||||||
const base = 'main';
|
const head = `release-${version}`;
|
||||||
|
|
||||||
const prs = await github.rest.pulls.list({
|
const prs = await github.rest.pulls.list({
|
||||||
owner: context.repo.owner,
|
owner: context.repo.owner,
|
||||||
repo: context.repo.repo,
|
repo: context.repo.repo,
|
||||||
head: `${context.repo.owner}:${branch}`,
|
head: `${context.repo.owner}:${head}`,
|
||||||
base
|
base
|
||||||
});
|
});
|
||||||
|
|
||||||
if (prs.data.length === 0) {
|
if (prs.data.length === 0) {
|
||||||
const newPr = await github.rest.pulls.create({
|
const pr = await github.rest.pulls.create({
|
||||||
owner: context.repo.owner,
|
owner: context.repo.owner,
|
||||||
repo: context.repo.repo,
|
repo: context.repo.repo,
|
||||||
head: branch,
|
head,
|
||||||
base: base,
|
base,
|
||||||
title: `Release v${version}`,
|
title: `Release v${version}`,
|
||||||
body:
|
body: `This PR prepares the release \`v${version}\`.`,
|
||||||
`This PR prepares the release \`v${version}\`.\n` +
|
|
||||||
`(Please merge it before releasing draft)`,
|
|
||||||
draft: false
|
draft: false
|
||||||
});
|
});
|
||||||
|
|
||||||
console.log(`Created pull request #${newPr.data.number} from ${branch} to ${base}`);
|
|
||||||
|
|
||||||
await github.rest.issues.addLabels({
|
await github.rest.issues.addLabels({
|
||||||
owner: context.repo.owner,
|
owner: context.repo.owner,
|
||||||
repo: context.repo.repo,
|
repo: context.repo.repo,
|
||||||
issue_number: newPr.data.number,
|
issue_number: pr.data.number,
|
||||||
labels: ['release', 'ok-to-test']
|
labels: ['release']
|
||||||
});
|
});
|
||||||
|
console.log(`Created PR #${pr.data.number}`);
|
||||||
} else {
|
} else {
|
||||||
console.log(`Pull request already exists from ${branch} to ${base}`);
|
console.log(`PR already exists from ${head} to ${base}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
- name: Create or reuse draft release
|
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
|
||||||
id: create_release
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const tag = context.ref.replace('refs/tags/', '');
|
|
||||||
const releases = await github.rest.repos.listReleases({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo
|
|
||||||
});
|
|
||||||
|
|
||||||
let release = releases.data.find(r => r.tag_name === tag);
|
|
||||||
if (!release) {
|
|
||||||
release = await github.rest.repos.createRelease({
|
|
||||||
owner: context.repo.owner,
|
|
||||||
repo: context.repo.repo,
|
|
||||||
tag_name: tag,
|
|
||||||
name: `${tag}`,
|
|
||||||
draft: true,
|
|
||||||
prerelease: false
|
|
||||||
});
|
|
||||||
}
|
|
||||||
core.setOutput('upload_url', release.upload_url);
|
|
||||||
|
|
||||||
- name: Build assets
|
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
|
||||||
run: make assets
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Upload assets
|
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
|
||||||
run: make upload_assets VERSION=${GITHUB_REF#refs/tags/}
|
|
||||||
env:
|
|
||||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Delete pushed tag
|
|
||||||
if: steps.check_release.outputs.skip == 'false'
|
|
||||||
run: |
|
|
||||||
git push --delete origin ${GITHUB_REF#refs/tags/}
|
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,6 +1,7 @@
|
|||||||
_out
|
_out
|
||||||
.git
|
.git
|
||||||
.idea
|
.idea
|
||||||
|
.vscode
|
||||||
|
|
||||||
# User-specific stuff
|
# User-specific stuff
|
||||||
.idea/**/workspace.xml
|
.idea/**/workspace.xml
|
||||||
@@ -75,4 +76,4 @@ fabric.properties
|
|||||||
.idea/caches/build_file_checksums.ser
|
.idea/caches/build_file_checksums.ser
|
||||||
|
|
||||||
.DS_Store
|
.DS_Store
|
||||||
**/.DS_Store
|
**/.DS_Store
|
||||||
|
|||||||
@@ -11,13 +11,14 @@ repos:
|
|||||||
- id: run-make-generate
|
- id: run-make-generate
|
||||||
name: Run 'make generate' in all app directories
|
name: Run 'make generate' in all app directories
|
||||||
entry: |
|
entry: |
|
||||||
/bin/bash -c '
|
flock -x .git/pre-commit.lock sh -c '
|
||||||
for dir in ./packages/apps/*/; do
|
for dir in ./packages/apps/*/ ./packages/extra/*/ ./packages/system/cozystack-api/; do
|
||||||
if [ -d "$dir" ]; then
|
if [ -d "$dir" ]; then
|
||||||
echo "Running make generate in $dir"
|
echo "Running make generate in $dir"
|
||||||
(cd "$dir" && make generate)
|
make generate -C "$dir" || exit $?
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
git diff --color=always | cat
|
||||||
'
|
'
|
||||||
language: script
|
language: system
|
||||||
files: ^.*$
|
files: ^.*$
|
||||||
|
|||||||
@@ -6,13 +6,13 @@ As you get started, you are in the best position to give us feedbacks on areas o
|
|||||||
|
|
||||||
* Problems found while setting up the development environment
|
* Problems found while setting up the development environment
|
||||||
* Gaps in our documentation
|
* Gaps in our documentation
|
||||||
* Bugs in our Github actions
|
* Bugs in our GitHub actions
|
||||||
|
|
||||||
First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md).
|
First, though, it is important that you read the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||||
|
|
||||||
The guidelines below are a starting point. We don't want to limit your
|
The guidelines below are a starting point. We don't want to limit your
|
||||||
creativity, passion, and initiative. If you think there's a better way, please
|
creativity, passion, and initiative. If you think there's a better way, please
|
||||||
feel free to bring it up in a Github discussion, or open a pull request. We're
|
feel free to bring it up in a GitHub discussion, or open a pull request. We're
|
||||||
certain there are always better ways to do things, we just need to start some
|
certain there are always better ways to do things, we just need to start some
|
||||||
constructive dialogue!
|
constructive dialogue!
|
||||||
|
|
||||||
@@ -23,9 +23,9 @@ We welcome many types of contributions including:
|
|||||||
* New features
|
* New features
|
||||||
* Builds, CI/CD
|
* Builds, CI/CD
|
||||||
* Bug fixes
|
* Bug fixes
|
||||||
* [Documentation](https://github.com/cozystack/cozystack-website/tree/main)
|
* [Documentation](https://GitHub.com/cozystack/cozystack-website/tree/main)
|
||||||
* Issue Triage
|
* Issue Triage
|
||||||
* Answering questions on Slack or Github Discussions
|
* Answering questions on Slack or GitHub Discussions
|
||||||
* Web design
|
* Web design
|
||||||
* Communications / Social Media / Blog Posts
|
* Communications / Social Media / Blog Posts
|
||||||
* Events participation
|
* Events participation
|
||||||
@@ -34,7 +34,7 @@ We welcome many types of contributions including:
|
|||||||
## Ask for Help
|
## Ask for Help
|
||||||
|
|
||||||
The best way to reach us with a question when contributing is to drop a line in
|
The best way to reach us with a question when contributing is to drop a line in
|
||||||
our [Telegram channel](https://t.me/cozystack), or start a new Github discussion.
|
our [Telegram channel](https://t.me/cozystack), or start a new GitHub discussion.
|
||||||
|
|
||||||
## Raising Issues
|
## Raising Issues
|
||||||
|
|
||||||
|
|||||||
151
CONTRIBUTOR_LADDER.md
Normal file
151
CONTRIBUTOR_LADDER.md
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
# Contributor Ladder Template
|
||||||
|
|
||||||
|
* [Contributor Ladder](#contributor-ladder-template)
|
||||||
|
* [Community Participant](#community-participant)
|
||||||
|
* [Contributor](#contributor)
|
||||||
|
* [Reviewer](#reviewer)
|
||||||
|
* [Maintainer](#maintainer)
|
||||||
|
* [Inactivity](#inactivity)
|
||||||
|
* [Involuntary Removal](#involuntary-removal-or-demotion)
|
||||||
|
* [Stepping Down/Emeritus Process](#stepping-downemeritus-process)
|
||||||
|
* [Contact](#contact)
|
||||||
|
|
||||||
|
|
||||||
|
## Contributor Ladder
|
||||||
|
|
||||||
|
Hello! We are excited that you want to learn more about our project contributor ladder! This contributor ladder outlines the different contributor roles within the project, along with the responsibilities and privileges that come with them. Community members generally start at the first levels of the "ladder" and advance up it as their involvement in the project grows. Our project members are happy to help you advance along the contributor ladder.
|
||||||
|
|
||||||
|
Each of the contributor roles below is organized into lists of three types of things. "Responsibilities" are things that a contributor is expected to do. "Requirements" are qualifications a person needs to meet to be in that role, and "Privileges" are things contributors on that level are entitled to.
|
||||||
|
|
||||||
|
|
||||||
|
### Community Participant
|
||||||
|
Description: A Community Participant engages with the project and its community, contributing their time, thoughts, etc. Community participants are usually users who have stopped being anonymous and started being active in project discussions.
|
||||||
|
|
||||||
|
* Responsibilities:
|
||||||
|
* Must follow the [CNCF CoC](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
|
||||||
|
* How users can get involved with the community:
|
||||||
|
* Participating in community discussions
|
||||||
|
* Helping other users
|
||||||
|
* Submitting bug reports
|
||||||
|
* Commenting on issues
|
||||||
|
* Trying out new releases
|
||||||
|
* Attending community events
|
||||||
|
|
||||||
|
|
||||||
|
### Contributor
|
||||||
|
Description: A Contributor contributes directly to the project and adds value to it. Contributions need not be code. People at the Contributor level may be new contributors, or they may only contribute occasionally.
|
||||||
|
|
||||||
|
* Responsibilities include:
|
||||||
|
* Follow the [CNCF CoC](https://github.com/cncf/foundation/blob/main/code-of-conduct.md)
|
||||||
|
* Follow the project [contributing guide] (https://github.com/cozystack/cozystack/blob/main/CONTRIBUTING.md)
|
||||||
|
* Requirements (one or several of the below):
|
||||||
|
* Report and sometimes resolve issues
|
||||||
|
* Occasionally submit PRs
|
||||||
|
* Contribute to the documentation
|
||||||
|
* Show up at meetings, takes notes
|
||||||
|
* Answer questions from other community members
|
||||||
|
* Submit feedback on issues and PRs
|
||||||
|
* Test releases and patches and submit reviews
|
||||||
|
* Run or helps run events
|
||||||
|
* Promote the project in public
|
||||||
|
* Help run the project infrastructure
|
||||||
|
* Privileges:
|
||||||
|
* Invitations to contributor events
|
||||||
|
* Eligible to become a Maintainer
|
||||||
|
|
||||||
|
|
||||||
|
### Reviewer
|
||||||
|
Description: A Reviewer has responsibility for specific code, documentation, test, or other project areas. They are collectively responsible, with other Reviewers, for reviewing all changes to those areas and indicating whether those changes are ready to merge. They have a track record of contribution and review in the project.
|
||||||
|
|
||||||
|
Reviewers are responsible for a "specific area." This can be a specific code directory, driver, chapter of the docs, test job, event, or other clearly-defined project component that is smaller than an entire repository or subproject. Most often it is one or a set of directories in one or more Git repositories. The "specific area" below refers to this area of responsibility.
|
||||||
|
|
||||||
|
Reviewers have all the rights and responsibilities of a Contributor, plus:
|
||||||
|
|
||||||
|
* Responsibilities include:
|
||||||
|
* Continues to contribute regularly, as demonstrated by having at least 15 PRs a year, as demonstrated by [Cozystack devstats](https://cozystack.devstats.cncf.io).
|
||||||
|
* Following the reviewing guide
|
||||||
|
* Reviewing most Pull Requests against their specific areas of responsibility
|
||||||
|
* Reviewing at least 40 PRs per year
|
||||||
|
* Helping other contributors become reviewers
|
||||||
|
* Requirements:
|
||||||
|
* Must have successful contributions to the project, including at least one of the following:
|
||||||
|
* 10 accepted PRs,
|
||||||
|
* Reviewed 20 PRs,
|
||||||
|
* Resolved and closed 20 Issues,
|
||||||
|
* Become responsible for a key project management area,
|
||||||
|
* Or some equivalent combination or contribution
|
||||||
|
* Must have been contributing for at least 6 months
|
||||||
|
* Must be actively contributing to at least one project area
|
||||||
|
* Must have two sponsors who are also Reviewers or Maintainers, at least one of whom does not work for the same employer
|
||||||
|
* Has reviewed, or helped review, at least 20 Pull Requests
|
||||||
|
* Has analyzed and resolved test failures in their specific area
|
||||||
|
* Has demonstrated an in-depth knowledge of the specific area
|
||||||
|
* Commits to being responsible for that specific area
|
||||||
|
* Is supportive of new and occasional contributors and helps get useful PRs in shape to commit
|
||||||
|
* Additional privileges:
|
||||||
|
* Has GitHub or CI/CD rights to approve pull requests in specific directories
|
||||||
|
* Can recommend and review other contributors to become Reviewers
|
||||||
|
* May be assigned Issues and Reviews
|
||||||
|
* May give commands to CI/CD automation
|
||||||
|
* Can recommend other contributors to become Reviewers
|
||||||
|
|
||||||
|
|
||||||
|
The process of becoming a Reviewer is:
|
||||||
|
1. The contributor is nominated by opening a PR against the appropriate repository, which adds their GitHub username to the OWNERS file for one or more directories.
|
||||||
|
2. At least two members of the team that owns that repository or main directory, who are already Approvers, approve the PR.
|
||||||
|
|
||||||
|
|
||||||
|
### Maintainer
|
||||||
|
Description: Maintainers are very established contributors who are responsible for the entire project. As such, they have the ability to approve PRs against any area of the project, and are expected to participate in making decisions about the strategy and priorities of the project.
|
||||||
|
|
||||||
|
A Maintainer must meet the responsibilities and requirements of a Reviewer, plus:
|
||||||
|
|
||||||
|
* Responsibilities include:
|
||||||
|
* Reviewing at least 40 PRs per year, especially PRs that involve multiple parts of the project
|
||||||
|
* Mentoring new Reviewers
|
||||||
|
* Writing refactoring PRs
|
||||||
|
* Participating in CNCF maintainer activities
|
||||||
|
* Determining strategy and policy for the project
|
||||||
|
* Participating in, and leading, community meetings
|
||||||
|
* Requirements
|
||||||
|
* Experience as a Reviewer for at least 6 months
|
||||||
|
* Demonstrates a broad knowledge of the project across multiple areas
|
||||||
|
* Is able to exercise judgment for the good of the project, independent of their employer, friends, or team
|
||||||
|
* Mentors other contributors
|
||||||
|
* Can commit to spending at least 10 hours per month working on the project
|
||||||
|
* Additional privileges:
|
||||||
|
* Approve PRs to any area of the project
|
||||||
|
* Represent the project in public as a Maintainer
|
||||||
|
* Communicate with the CNCF on behalf of the project
|
||||||
|
* Have a vote in Maintainer decision-making meetings
|
||||||
|
|
||||||
|
|
||||||
|
Process of becoming a maintainer:
|
||||||
|
1. Any current Maintainer may nominate a current Reviewer to become a new Maintainer, by opening a PR against the root of the cozystack repository adding the nominee as an Approver in the [MAINTAINERS](https://github.com/cozystack/cozystack/blob/main/MAINTAINERS.md) file.
|
||||||
|
2. The nominee will add a comment to the PR testifying that they agree to all requirements of becoming a Maintainer.
|
||||||
|
3. A majority of the current Maintainers must then approve the PR.
|
||||||
|
|
||||||
|
|
||||||
|
## Inactivity
|
||||||
|
It is important for contributors to be and stay active to set an example and show commitment to the project. Inactivity is harmful to the project as it may lead to unexpected delays, contributor attrition, and a lost of trust in the project.
|
||||||
|
|
||||||
|
* Inactivity is measured by:
|
||||||
|
* Periods of no contributions for longer than 6 months
|
||||||
|
* Periods of no communication for longer than 3 months
|
||||||
|
* Consequences of being inactive include:
|
||||||
|
* Involuntary removal or demotion
|
||||||
|
* Being asked to move to Emeritus status
|
||||||
|
|
||||||
|
## Involuntary Removal or Demotion
|
||||||
|
|
||||||
|
Involuntary removal/demotion of a contributor happens when responsibilities and requirements aren't being met. This may include repeated patterns of inactivity, extended period of inactivity, a period of failing to meet the requirements of your role, and/or a violation of the Code of Conduct. This process is important because it protects the community and its deliverables while also opens up opportunities for new contributors to step in.
|
||||||
|
|
||||||
|
Involuntary removal or demotion is handled through a vote by a majority of the current Maintainers.
|
||||||
|
|
||||||
|
## Stepping Down/Emeritus Process
|
||||||
|
If and when contributors' commitment levels change, contributors can consider stepping down (moving down the contributor ladder) vs moving to emeritus status (completely stepping away from the project).
|
||||||
|
|
||||||
|
Contact the Maintainers about changing to Emeritus status, or reducing your contributor level.
|
||||||
|
|
||||||
|
## Contact
|
||||||
|
* For inquiries, please reach out to: @kvaps, @tym83
|
||||||
9
Makefile
9
Makefile
@@ -9,7 +9,6 @@ build-deps:
|
|||||||
|
|
||||||
build: build-deps
|
build: build-deps
|
||||||
make -C packages/apps/http-cache image
|
make -C packages/apps/http-cache image
|
||||||
make -C packages/apps/postgres image
|
|
||||||
make -C packages/apps/mysql image
|
make -C packages/apps/mysql image
|
||||||
make -C packages/apps/clickhouse image
|
make -C packages/apps/clickhouse image
|
||||||
make -C packages/apps/kubernetes image
|
make -C packages/apps/kubernetes image
|
||||||
@@ -20,6 +19,7 @@ build: build-deps
|
|||||||
make -C packages/system/kubeovn image
|
make -C packages/system/kubeovn image
|
||||||
make -C packages/system/kubeovn-webhook image
|
make -C packages/system/kubeovn-webhook image
|
||||||
make -C packages/system/dashboard image
|
make -C packages/system/dashboard image
|
||||||
|
make -C packages/system/metallb image
|
||||||
make -C packages/system/kamaji image
|
make -C packages/system/kamaji image
|
||||||
make -C packages/system/bucket image
|
make -C packages/system/bucket image
|
||||||
make -C packages/core/testing image
|
make -C packages/core/testing image
|
||||||
@@ -42,12 +42,15 @@ manifests:
|
|||||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||||
|
|
||||||
assets:
|
assets:
|
||||||
make -C packages/core/installer/ assets
|
make -C packages/core/installer assets
|
||||||
|
|
||||||
test:
|
test:
|
||||||
make -C packages/core/testing apply
|
make -C packages/core/testing apply
|
||||||
make -C packages/core/testing test
|
make -C packages/core/testing test
|
||||||
#make -C packages/core/testing test-applications
|
|
||||||
|
prepare-env:
|
||||||
|
make -C packages/core/testing apply
|
||||||
|
make -C packages/core/testing prepare-cluster
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
hack/update-codegen.sh
|
hack/update-codegen.sh
|
||||||
|
|||||||
43
README.md
43
README.md
@@ -12,32 +12,34 @@
|
|||||||
|
|
||||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||||
|
|
||||||
With Cozystack, you can transform your bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters, Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
Cozystack is a [CNCF Sandbox Level Project](https://www.cncf.io/sandbox-projects/) that was originally built and sponsored by [Ænix](https://aenix.io/).
|
||||||
|
|
||||||
You can use Cozystack to build your own cloud or to provide a cost-effective development environments.
|
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||||
|
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||||
|
|
||||||
|
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
## Use-Cases
|
## Use-Cases
|
||||||
|
|
||||||
* [**Using Cozystack to build public cloud**](https://cozystack.io/docs/use-cases/public-cloud/)
|
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||||
You can use Cozystack as backend for a public cloud
|
You can use Cozystack as a backend for a public cloud
|
||||||
|
|
||||||
* [**Using Cozystack to build private cloud**](https://cozystack.io/docs/use-cases/private-cloud/)
|
* [**Using Cozystack to build a private cloud**](https://cozystack.io/docs/guides/use-cases/private-cloud/)
|
||||||
You can use Cozystack as platform to build a private cloud powered by Infrastructure-as-Code approach
|
You can use Cozystack as a platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||||
|
|
||||||
* [**Using Cozystack as Kubernetes distribution**](https://cozystack.io/docs/use-cases/kubernetes-distribution/)
|
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||||
You can use Cozystack as Kubernetes distribution for Bare Metal
|
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||||
|
|
||||||
## Screenshot
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
The documentation is located on official [cozystack.io](https://cozystack.io) website.
|
The documentation is located on the [cozystack.io](https://cozystack.io) website.
|
||||||
|
|
||||||
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
Read the [Getting Started](https://cozystack.io/docs/getting-started/) section for a quick start.
|
||||||
|
|
||||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/troubleshooting/), and work your way through the process that we've outlined.
|
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/operations/troubleshooting/) and work your way through the process that we've outlined.
|
||||||
|
|
||||||
## Versioning
|
## Versioning
|
||||||
|
|
||||||
@@ -50,15 +52,18 @@ A full list of the available releases is available in the GitHub repository's [R
|
|||||||
|
|
||||||
Contributions are highly appreciated and very welcomed!
|
Contributions are highly appreciated and very welcomed!
|
||||||
|
|
||||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
In case of bugs, please check if the issue has already been opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
If it isn't, you can open a new one. A detailed report will help us replicate it, assess it, and work on a fix.
|
||||||
|
|
||||||
You can express your intention in working on the fix on your own.
|
You can express your intention to on the fix on your own.
|
||||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||||
|
|
||||||
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||||
|
|
||||||
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
## Community
|
||||||
|
|
||||||
|
You are welcome to join our [Telegram group](https://t.me/cozystack) and come to our weekly community meetings.
|
||||||
|
Add them to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics) for convenience.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@@ -39,6 +39,8 @@ import (
|
|||||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||||
"github.com/cozystack/cozystack/internal/controller"
|
"github.com/cozystack/cozystack/internal/controller"
|
||||||
"github.com/cozystack/cozystack/internal/telemetry"
|
"github.com/cozystack/cozystack/internal/telemetry"
|
||||||
|
|
||||||
|
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||||
// +kubebuilder:scaffold:imports
|
// +kubebuilder:scaffold:imports
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -51,6 +53,7 @@ func init() {
|
|||||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||||
|
|
||||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||||
|
utilruntime.Must(helmv2.AddToScheme(scheme))
|
||||||
// +kubebuilder:scaffold:scheme
|
// +kubebuilder:scaffold:scheme
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -178,6 +181,31 @@ func main() {
|
|||||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err = (&controller.WorkloadReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
}).SetupWithManager(mgr); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = (&controller.TenantHelmReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
}).SetupWithManager(mgr); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create controller", "controller", "TenantHelmReconciler")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = (&controller.CozystackConfigReconciler{
|
||||||
|
Client: mgr.GetClient(),
|
||||||
|
Scheme: mgr.GetScheme(),
|
||||||
|
}).SetupWithManager(mgr); err != nil {
|
||||||
|
setupLog.Error(err, "unable to create controller", "controller", "CozystackConfigReconciler")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
// +kubebuilder:scaffold:builder
|
// +kubebuilder:scaffold:builder
|
||||||
|
|
||||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||||
|
|||||||
@@ -626,7 +626,7 @@
|
|||||||
"datasource": {
|
"datasource": {
|
||||||
"uid": "${DS_PROMETHEUS}"
|
"uid": "${DS_PROMETHEUS}"
|
||||||
},
|
},
|
||||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"POD\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
|
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"legendFormat": "{{pod}}",
|
"legendFormat": "{{pod}}",
|
||||||
|
|||||||
@@ -450,7 +450,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -520,7 +520,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -590,7 +590,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -660,7 +660,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "__auto",
|
"legendFormat": "__auto",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -1128,7 +1128,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1143,7 +1143,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -1527,7 +1527,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1542,7 +1542,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"legendFormat": "Total",
|
"legendFormat": "Total",
|
||||||
"range": true,
|
"range": true,
|
||||||
@@ -1909,7 +1909,7 @@
|
|||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"exemplar": false,
|
"exemplar": false,
|
||||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"range": false,
|
"range": false,
|
||||||
@@ -2037,7 +2037,7 @@
|
|||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"exemplar": false,
|
"exemplar": false,
|
||||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"range": false,
|
"range": false,
|
||||||
@@ -2160,7 +2160,7 @@
|
|||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"exemplar": false,
|
"exemplar": false,
|
||||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"range": false,
|
"range": false,
|
||||||
@@ -2288,7 +2288,7 @@
|
|||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"exemplar": false,
|
"exemplar": false,
|
||||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"range": false,
|
"range": false,
|
||||||
|
|||||||
@@ -684,7 +684,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -710,7 +710,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -723,7 +723,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -736,7 +736,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -762,7 +762,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -786,7 +786,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -798,7 +798,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -810,7 +810,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -848,7 +848,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -860,7 +860,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1315,7 +1315,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1488,7 +1488,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1502,7 +1502,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1642,7 +1642,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -1779,7 +1779,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2095,7 +2095,7 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Usage",
|
"legendFormat": "Usage",
|
||||||
@@ -2109,7 +2109,7 @@
|
|||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
@@ -2295,7 +2295,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "System",
|
"legendFormat": "System",
|
||||||
@@ -2306,7 +2306,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -2468,7 +2468,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -2653,7 +2653,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
@@ -2666,7 +2666,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Cache",
|
"legendFormat": "Cache",
|
||||||
@@ -2679,7 +2679,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Swap",
|
"legendFormat": "Swap",
|
||||||
@@ -2692,7 +2692,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
@@ -2705,7 +2705,7 @@
|
|||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -2837,7 +2837,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -2974,7 +2974,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -3290,56 +3290,56 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Cache",
|
"legendFormat": "Cache",
|
||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Swap",
|
"legendFormat": "Swap",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"POD\", resource=\"memory\"}[$__rate_interval]))\n)",
|
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "F"
|
"refId": "F"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -3834,7 +3834,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
@@ -3972,7 +3972,7 @@
|
|||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"editorMode": "code",
|
"editorMode": "code",
|
||||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ pod }}",
|
"legendFormat": "{{ pod }}",
|
||||||
|
|||||||
@@ -656,7 +656,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -680,7 +680,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -692,7 +692,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -704,7 +704,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -728,7 +728,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -740,7 +740,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -752,7 +752,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -764,7 +764,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -776,7 +776,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -814,7 +814,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -826,7 +826,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -877,7 +877,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1475,7 +1475,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -1646,7 +1646,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "System",
|
"legendFormat": "System",
|
||||||
@@ -1657,7 +1657,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -1798,7 +1798,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -1939,7 +1939,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2257,28 +2257,28 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Usage",
|
"legendFormat": "Usage",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
@@ -2458,7 +2458,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2470,7 +2470,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -2622,7 +2622,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -2799,14 +2799,14 @@
|
|||||||
"pluginVersion": "8.5.13",
|
"pluginVersion": "8.5.13",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2814,7 +2814,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2822,14 +2822,14 @@
|
|||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -2955,7 +2955,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -3091,7 +3091,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -3408,14 +3408,14 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3423,7 +3423,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3431,35 +3431,35 @@
|
|||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "F"
|
"refId": "F"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -3910,7 +3910,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
@@ -4049,7 +4049,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ controller }}",
|
"legendFormat": "{{ controller }}",
|
||||||
|
|||||||
@@ -869,7 +869,7 @@
|
|||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -878,7 +878,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -895,7 +895,7 @@
|
|||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -903,7 +903,7 @@
|
|||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -919,7 +919,7 @@
|
|||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -935,7 +935,7 @@
|
|||||||
"refId": "I"
|
"refId": "I"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -943,7 +943,7 @@
|
|||||||
"refId": "J"
|
"refId": "J"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -968,7 +968,7 @@
|
|||||||
"refId": "M"
|
"refId": "M"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -977,7 +977,7 @@
|
|||||||
"refId": "N"
|
"refId": "N"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -1449,7 +1449,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -1616,7 +1616,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "System",
|
"legendFormat": "System",
|
||||||
@@ -1627,7 +1627,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -1764,7 +1764,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -1901,7 +1901,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -2210,7 +2210,7 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2218,21 +2218,21 @@
|
|||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
@@ -2407,7 +2407,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2419,7 +2419,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -2572,7 +2572,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -2754,14 +2754,14 @@
|
|||||||
"pluginVersion": "8.5.13",
|
"pluginVersion": "8.5.13",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2769,7 +2769,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2777,14 +2777,14 @@
|
|||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -2910,7 +2910,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -3046,7 +3046,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -3370,14 +3370,14 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "RSS",
|
"legendFormat": "RSS",
|
||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3385,7 +3385,7 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3393,35 +3393,35 @@
|
|||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "VPA Target",
|
"legendFormat": "VPA Target",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
"refId": "F"
|
"refId": "F"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -3873,7 +3873,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
@@ -4008,7 +4008,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ namespace }}",
|
"legendFormat": "{{ namespace }}",
|
||||||
|
|||||||
@@ -686,7 +686,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -759,7 +759,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -847,7 +847,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -860,7 +860,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"instant": true,
|
"instant": true,
|
||||||
@@ -899,7 +899,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "${ds_prometheus}"
|
"uid": "${ds_prometheus}"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||||
"format": "table",
|
"format": "table",
|
||||||
"instant": true,
|
"instant": true,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1503,7 +1503,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1669,7 +1669,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -1681,7 +1681,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -1820,7 +1820,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2269,7 +2269,7 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Usage",
|
"legendFormat": "Usage",
|
||||||
@@ -2476,7 +2476,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2488,7 +2488,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "User",
|
"legendFormat": "User",
|
||||||
@@ -2639,7 +2639,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2816,7 +2816,7 @@
|
|||||||
"pluginVersion": "8.5.13",
|
"pluginVersion": "8.5.13",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -2824,28 +2824,28 @@
|
|||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Cache",
|
"legendFormat": "Cache",
|
||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Swap",
|
"legendFormat": "Swap",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -2974,7 +2974,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3110,7 +3110,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"hide": false,
|
"hide": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3431,7 +3431,7 @@
|
|||||||
"repeatDirection": "h",
|
"repeatDirection": "h",
|
||||||
"targets": [
|
"targets": [
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"instant": false,
|
"instant": false,
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3439,7 +3439,7 @@
|
|||||||
"refId": "A"
|
"refId": "A"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"interval": "",
|
"interval": "",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
@@ -3447,28 +3447,28 @@
|
|||||||
"refId": "B"
|
"refId": "B"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Swap",
|
"legendFormat": "Swap",
|
||||||
"refId": "C"
|
"refId": "C"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Working set bytes without kmem",
|
"legendFormat": "Working set bytes without kmem",
|
||||||
"refId": "D"
|
"refId": "D"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Limits",
|
"legendFormat": "Limits",
|
||||||
"refId": "E"
|
"refId": "E"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Requests",
|
"legendFormat": "Requests",
|
||||||
@@ -3482,7 +3482,7 @@
|
|||||||
"refId": "G"
|
"refId": "G"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "Kmem",
|
"legendFormat": "Kmem",
|
||||||
@@ -3930,7 +3930,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ container }}",
|
"legendFormat": "{{ container }}",
|
||||||
@@ -4068,7 +4068,7 @@
|
|||||||
"type": "prometheus",
|
"type": "prometheus",
|
||||||
"uid": "$ds_prometheus"
|
"uid": "$ds_prometheus"
|
||||||
},
|
},
|
||||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||||
"format": "time_series",
|
"format": "time_series",
|
||||||
"intervalFactor": 1,
|
"intervalFactor": 1,
|
||||||
"legendFormat": "{{ container }}",
|
"legendFormat": "{{ container }}",
|
||||||
|
|||||||
11
docs/changelogs/template.md
Normal file
11
docs/changelogs/template.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
## Major Features and Improvements
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
## Fixes
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
## Development, Testing, and CI/CD
|
||||||
243
docs/changelogs/v0.31.0.md
Normal file
243
docs/changelogs/v0.31.0.md
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
Cozystack v0.31.0 is a significant release that brings new features, key fixes, and updates to underlying components.
|
||||||
|
This version enhances GPU support, improves many components of Cozystack, and introduces a more robust release process to improve stability.
|
||||||
|
Below, we'll go over the highlights in each area for current users, developers, and our community.
|
||||||
|
|
||||||
|
## Major Features and Improvements
|
||||||
|
|
||||||
|
### GPU support for tenant Kubernetes clusters
|
||||||
|
|
||||||
|
Cozystack now integrates NVIDIA GPU Operator support for tenant Kubernetes clusters.
|
||||||
|
This enables platform users to run GPU-powered AI/ML applications in their own clusters.
|
||||||
|
To enable GPU Operator, set `addons.gpuOperator.enabled: true` in the cluster configuration.
|
||||||
|
(@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||||
|
|
||||||
|
Check out Andrei Kvapil's CNCF webinar [showcasing the GPU support by running Stable Diffusion in Cozystack](https://www.youtube.com/watch?v=S__h_QaoYEk).
|
||||||
|
|
||||||
|
<!--
|
||||||
|
* [kubernetes] Introduce GPU support for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Cilium Improvements
|
||||||
|
|
||||||
|
Cozystack’s Cilium integration received two significant enhancements.
|
||||||
|
First, Gateway API support in Cilium is now enabled, allowing advanced L4/L7 routing features via Kubernetes Gateway API.
|
||||||
|
We thank Zdenek Janda @zdenekjanda for contributing this feature in https://github.com/cozystack/cozystack/pull/924.
|
||||||
|
|
||||||
|
Second, Cozystack now permits custom user-provided parameters in the tenant cluster’s Cilium configuration.
|
||||||
|
(@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||||
|
|
||||||
|
<!--
|
||||||
|
* [cilium] Enable Cilium Gateway API. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/924)
|
||||||
|
* [cilium] Enable user-added parameters in a tenant cluster Cilium. (@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Cross-Architecture Builds (ARM Support Beta)
|
||||||
|
|
||||||
|
Cozystack's build system was refactored to support multi-architecture binaries and container images.
|
||||||
|
This paves the road to running Cozystack on ARM64 servers.
|
||||||
|
Changes include Makefile improvements (https://github.com/cozystack/cozystack/pull/907)
|
||||||
|
and multi-arch Docker image builds (https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970).
|
||||||
|
|
||||||
|
We thank Nikita Bykov @nbykov0 for his ongoing work on ARM support!
|
||||||
|
|
||||||
|
<!--
|
||||||
|
* Introduce support for cross-architecture builds and Cozystack on ARM:
|
||||||
|
* [build] Refactor Makefiles introducing build variables. (@nbykov0 in https://github.com/cozystack/cozystack/pull/907)
|
||||||
|
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970)
|
||||||
|
-->
|
||||||
|
|
||||||
|
### VerticalPodAutoscaler (VPA) Expansion
|
||||||
|
|
||||||
|
The VerticalPodAutoscaler is now enabled for more Cozystack components to automate resource tuning.
|
||||||
|
Specifically, VPA was added for tenant Kubernetes control planes (@klinch0 in https://github.com/cozystack/cozystack/pull/806),
|
||||||
|
the Cozystack Dashboard (https://github.com/cozystack/cozystack/pull/828),
|
||||||
|
and the Cozystack etcd-operator (https://github.com/cozystack/cozystack/pull/850).
|
||||||
|
All Cozystack components that have VPA enabled can automatically adjust their CPU and memory requests based on usage, improving platform and application stability.
|
||||||
|
|
||||||
|
<!--
|
||||||
|
* Add VerticalPodAutoscaler to a few more components:
|
||||||
|
* [kubernetes] Kubernetes clusters in user tenants. (@klinch0 in https://github.com/cozystack/cozystack/pull/806)
|
||||||
|
* [platform] Cozystack dashboard. (@klinch0 in https://github.com/cozystack/cozystack/pull/828)
|
||||||
|
* [platform] Cozystack etcd-operator (@klinch0 in https://github.com/cozystack/cozystack/pull/850)
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Tenant HelmRelease Reconcile Controller
|
||||||
|
|
||||||
|
A new controller was introduced to monitor and synchronize HelmRelease resources across tenants.
|
||||||
|
This controller propagates configuration changes to tenant workloads and ensures that any HelmRelease defined in a tenant
|
||||||
|
stays in sync with platform updates.
|
||||||
|
It improves the reliability of deploying managed applications in Cozystack.
|
||||||
|
(@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||||
|
|
||||||
|
<!--
|
||||||
|
* [platform] Introduce a new controller to synchronize tenant HelmReleases and propagate configuration changes. (@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Virtual Machine Improvements
|
||||||
|
|
||||||
|
**Configurable KubeVirt CPU Overcommit**: The CPU allocation ratio in KubeVirt (how virtual CPUs are overcommitted relative to physical) is now configurable
|
||||||
|
via the `cpu-allocation-ratio` value in the Cozystack configmap.
|
||||||
|
This means Cozystack administrators can now tune CPU overcommitment for VMs to balance performance vs. density.
|
||||||
|
(@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||||
|
|
||||||
|
**KubeVirt VM Export**: Cozystack now allows exporting KubeVirt virtual machines.
|
||||||
|
This feature, enabled via KubeVirt's `VirtualMachineExport` capability, lets users snapshot or back up VM images.
|
||||||
|
(@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||||
|
|
||||||
|
**Support for various storage classes in Virtual Machines**: The `virtual-machine` application (since version 0.9.2) lets you pick any StorageClass for a VM's
|
||||||
|
system disk instead of relying on a hard-coded PVC.
|
||||||
|
Refer to values `systemDisk.storage` and `systemDisk.storageClass` in the [application's configs](https://cozystack.io/docs/reference/applications/virtual-machine/#common-parameters).
|
||||||
|
(@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||||
|
|
||||||
|
<!--
|
||||||
|
* [kubevirt] Enable exporting VMs. (@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||||
|
* [kubevirt] Make KubeVirt's CPU allocation ratio configurable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||||
|
* [virtual-machine] Add support for various storages. (@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||||
|
-->
|
||||||
|
|
||||||
|
### Other Features and Improvements
|
||||||
|
|
||||||
|
* [platform] Introduce options `expose-services`, `expose-ingress`, and `expose-external-ips` to the ingress service. (@kvaps in https://github.com/cozystack/cozystack/pull/929)
|
||||||
|
* [cozystack-controller] Record the IP address pool and storage class in Workload objects. (@lllamnyp in https://github.com/cozystack/cozystack/pull/831)
|
||||||
|
* [apps] Remove user-facing config of limits and requests. (@lllamnyp in https://github.com/cozystack/cozystack/pull/935)
|
||||||
|
|
||||||
|
## New Release Lifecycle
|
||||||
|
|
||||||
|
Cozystack release lifecycle is changing to provide a more stable and predictable lifecycle to customers running Cozystack in mission-critical environments.
|
||||||
|
|
||||||
|
* **Gradual Release with Alpha, Beta, and Release Candidates**: Cozystack will now publish pre-release versions (alpha, beta, release candidates) before a stable release.
|
||||||
|
Starting with v0.31.0, the team made three release candidates before releasing version v0.31.0.
|
||||||
|
This allows more testing and feedback before marking a release as stable.
|
||||||
|
|
||||||
|
* **Prolonged Release Support with Patch Versions**: After the initial `vX.Y.0` release, a long-lived branch `release-X.Y` will be created to backport fixes.
|
||||||
|
For example, with 0.31.0’s release, a `release-0.31` branch will track patch fixes (`0.31.x`).
|
||||||
|
This strategy lets Cozystack users receive timely patch releases and updates with minimal risks.
|
||||||
|
|
||||||
|
To implement these new changes, we have rebuilt our CI/CD workflows and introduced automation, enabling automatic backports.
|
||||||
|
You can read more about how it's implemented in the Development section below.
|
||||||
|
|
||||||
|
For more information, read the [Cozystack Release Workflow](https://github.com/cozystack/cozystack/blob/main/docs/release.md) documentation.
|
||||||
|
|
||||||
|
## Fixes
|
||||||
|
|
||||||
|
* [virtual-machine] Add GPU names to the virtual machine specifications. (@kvaps in https://github.com/cozystack/cozystack/pull/862)
|
||||||
|
* [virtual-machine] Count Workload resources for pods by requests, not limits. Other improvements to VM resource tracking. (@lllamnyp in https://github.com/cozystack/cozystack/pull/904)
|
||||||
|
* [virtual-machine] Set PortList method by default. (@kvaps in https://github.com/cozystack/cozystack/pull/996)
|
||||||
|
* [virtual-machine] Specify ports even for wholeIP mode. (@kvaps in https://github.com/cozystack/cozystack/pull/1000)
|
||||||
|
* [platform] Fix installing HelmReleases on initial setup. (@kvaps in https://github.com/cozystack/cozystack/pull/833)
|
||||||
|
* [platform] Migration scripts update Kubernetes ConfigMap with the current stack version for improved version tracking. (@klinch0 in https://github.com/cozystack/cozystack/pull/840)
|
||||||
|
* [platform] Reduce requested CPU and RAM for the `kamaji` provider. (@klinch0 in https://github.com/cozystack/cozystack/pull/825)
|
||||||
|
* [platform] Improve the reconciliation loop for the Cozystack system HelmReleases logic. (@klinch0 in https://github.com/cozystack/cozystack/pull/809 and https://github.com/cozystack/cozystack/pull/810, @kvaps in https://github.com/cozystack/cozystack/pull/811)
|
||||||
|
* [platform] Remove extra dependencies for the Piraeus operator. (@klinch0 in https://github.com/cozystack/cozystack/pull/856)
|
||||||
|
* [platform] Refactor dashboard values. (@kvaps in https://github.com/cozystack/cozystack/pull/928, patched by @llamnyp in https://github.com/cozystack/cozystack/pull/952)
|
||||||
|
* [platform] Make FluxCD artifact disabled by default. (@klinch0 in https://github.com/cozystack/cozystack/pull/964)
|
||||||
|
* [kubernetes] Update garbage collection of HelmReleases in tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/835)
|
||||||
|
* [kubernetes] Fix merging `valuesOverride` for tenant clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/879)
|
||||||
|
* [kubernetes] Fix `ubuntu-container-disk` tag. (@kvaps in https://github.com/cozystack/cozystack/pull/887)
|
||||||
|
* [kubernetes] Refactor Helm manifests for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/866)
|
||||||
|
* [kubernetes] Fix Ingress-NGINX depends on Cert-Manager. (@kvaps in https://github.com/cozystack/cozystack/pull/976)
|
||||||
|
* [kubernetes, apps] Enable `topologySpreadConstraints` for tenant Kubernetes clusters and fix it for managed PostgreSQL. (@klinch0 in https://github.com/cozystack/cozystack/pull/995)
|
||||||
|
* [tenant] Fix an issue with accessing external IPs of a cluster from the cluster itself. (@kvaps in https://github.com/cozystack/cozystack/pull/854)
|
||||||
|
* [cluster-api] Remove the no longer necessary workaround for Kamaji. (@kvaps in https://github.com/cozystack/cozystack/pull/867, patched in https://github.com/cozystack/cozystack/pull/956)
|
||||||
|
* [monitoring] Remove legacy label "POD" from the exclude filter in metrics. (@xy2 in https://github.com/cozystack/cozystack/pull/826)
|
||||||
|
* [monitoring] Refactor management etcd monitoring config. Introduce a migration script for updating monitoring resources (`kube-rbac-proxy` daemonset). (@lllamnyp in https://github.com/cozystack/cozystack/pull/799 and https://github.com/cozystack/cozystack/pull/830)
|
||||||
|
* [monitoring] Fix VerticalPodAutoscaler resource allocation for VMagent. (@klinch0 in https://github.com/cozystack/cozystack/pull/820)
|
||||||
|
* [postgres] Remove duplicated `template` entry from backup manifest. (@etoshutka in https://github.com/cozystack/cozystack/pull/872)
|
||||||
|
* [kube-ovn] Fix versions mapping in Makefile. (@kvaps in https://github.com/cozystack/cozystack/pull/883)
|
||||||
|
* [dx] Automatically detect version for migrations in the installer.sh. (@kvaps in https://github.com/cozystack/cozystack/pull/837)
|
||||||
|
* [dx] remove version_map and building for library charts. (@kvaps in https://github.com/cozystack/cozystack/pull/998)
|
||||||
|
* [docs] Review the tenant Kubernetes cluster docs. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/969)
|
||||||
|
* [docs] Explain that tenants cannot have dashes in their names. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/980)
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
* MetalLB images are now built in-tree based on version 0.14.9 with additional critical patches. (@lllamnyp in https://github.com/cozystack/cozystack/pull/945)
|
||||||
|
* Update Kubernetes to v1.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/949)
|
||||||
|
* Update Talos Linux to v1.10.1. (@kvaps in https://github.com/cozystack/cozystack/pull/931)
|
||||||
|
* Update Cilium to v1.17.3. (@kvaps in https://github.com/cozystack/cozystack/pull/848)
|
||||||
|
* Update LINSTOR to v1.31.0. (@kvaps in https://github.com/cozystack/cozystack/pull/846)
|
||||||
|
* Update Kube-OVN to v1.13.11. (@kvaps in https://github.com/cozystack/cozystack/pull/847, @lllamnyp in https://github.com/cozystack/cozystack/pull/922)
|
||||||
|
* Update tenant Kubernetes to v1.32. (@kvaps in https://github.com/cozystack/cozystack/pull/871)
|
||||||
|
* Update flux-operator to 0.20.0. (@kingdonb in https://github.com/cozystack/cozystack/pull/880 and https://github.com/cozystack/cozystack/pull/934)
|
||||||
|
* Update multiple Cluster API components. (@kvaps in https://github.com/cozystack/cozystack/pull/867 and https://github.com/cozystack/cozystack/pull/947)
|
||||||
|
* Update KamajiControlPlane to edge-25.4.1. (@kvaps in https://github.com/cozystack/cozystack/pull/953, fixed by @nbykov0 in https://github.com/cozystack/cozystack/pull/983)
|
||||||
|
* Update cert-manager to v1.17.2. (@kvaps in https://github.com/cozystack/cozystack/pull/975)
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
* [Installing Talos in Air-Gapped Environment](https://cozystack.io/docs/operations/talos/configuration/air-gapped/):
|
||||||
|
new guide for configuring and bootstrapping Talos Linux clusters in air-gapped environments.
|
||||||
|
(@klinch0 in https://github.com/cozystack/website/pull/203)
|
||||||
|
|
||||||
|
* [Cozystack Bundles](https://cozystack.io/docs/guides/bundles/): new page in the learning section explaining how Cozystack bundles work and how to choose a bundle.
|
||||||
|
(@NickVolynkin in https://github.com/cozystack/website/pull/188, https://github.com/cozystack/website/pull/189, and others;
|
||||||
|
updated by @kvaps in https://github.com/cozystack/website/pull/192 and https://github.com/cozystack/website/pull/193)
|
||||||
|
|
||||||
|
* [Managed Application Reference](https://cozystack.io/docs/reference/applications/): A set of new pages in the docs, mirroring application docs from the Cozystack dashboard.
|
||||||
|
(@NickVolynkin in https://github.com/cozystack/website/pull/198, https://github.com/cozystack/website/pull/202, and https://github.com/cozystack/website/pull/204)
|
||||||
|
|
||||||
|
* **LINSTOR Networking**: Guides on [configuring dedicated network for LINSTOR](https://cozystack.io/docs/operations/storage/dedicated-network/)
|
||||||
|
and [configuring network for distributed storage in multi-datacenter setup](https://cozystack.io/docs/operations/stretched/linstor-dedicated-network/).
|
||||||
|
(@xy2, edited by @NickVolynkin in https://github.com/cozystack/website/pull/171, https://github.com/cozystack/website/pull/182, and https://github.com/cozystack/website/pull/184)
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* Correct error in the doc for the command to edit the configmap. (@lb0o in https://github.com/cozystack/website/pull/207)
|
||||||
|
* Fix group name in OIDC docs (@kingdonb in https://github.com/cozystack/website/pull/179)
|
||||||
|
* A bit more explanation of Docker buildx builders. (@nbykov0 in https://github.com/cozystack/website/pull/187)
|
||||||
|
|
||||||
|
## Development, Testing, and CI/CD
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
* Introduce `cozytest` — a new [BATS-based](https://github.com/bats-core/bats-core) testing framework. (@kvaps in https://github.com/cozystack/cozystack/pull/982)
|
||||||
|
|
||||||
|
Fixes:
|
||||||
|
|
||||||
|
* Fix `device_ownership_from_security_context` CRI. (@dtrdnk in https://github.com/cozystack/cozystack/pull/896)
|
||||||
|
* Increase timeout durations for `capi` and `keycloak` to improve reliability during e2e-tests. (@kvaps in https://github.com/cozystack/cozystack/pull/858)
|
||||||
|
* Return `genisoimage` to the e2e-test Dockerfile (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/962)
|
||||||
|
|
||||||
|
### CI/CD Changes
|
||||||
|
|
||||||
|
Improvements:
|
||||||
|
|
||||||
|
* Use release branches `release-X.Y` for gathering and releasing fixes after initial `vX.Y.0` release. (@kvaps in https://github.com/cozystack/cozystack/pull/816)
|
||||||
|
* Automatically create release branches after initial `vX.Y.0` release is published. (@kvaps in https://github.com/cozystack/cozystack/pull/886)
|
||||||
|
* Introduce Release Candidate versions. Automate patch backporting by applying patches from pull requests labeled `[backport]` to the current release branch. (@kvaps in https://github.com/cozystack/cozystack/pull/841 and https://github.com/cozystack/cozystack/pull/901, @nickvolynkin in https://github.com/cozystack/cozystack/pull/890)
|
||||||
|
* Support alpha and beta pre-releases. (@kvaps in https://github.com/cozystack/cozystack/pull/978)
|
||||||
|
* Commit changes in release pipelines under `github-actions <github-actions@github.com>`. (@kvaps in https://github.com/cozystack/cozystack/pull/823)
|
||||||
|
* Describe the Cozystack release workflow. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/817 and https://github.com/cozystack/cozystack/pull/897)
|
||||||
|
|
||||||
|
Fixes:
|
||||||
|
|
||||||
|
* Improve the check for `versions_map` running on pull requests. (@kvaps and @klinch0 in https://github.com/cozystack/cozystack/pull/836, https://github.com/cozystack/cozystack/pull/842, and https://github.com/cozystack/cozystack/pull/845)
|
||||||
|
* If the release step was skipped on a tag, skip tests as well. (@kvaps in https://github.com/cozystack/cozystack/pull/822)
|
||||||
|
* Allow CI to cancel the previous job if a new one is scheduled. (@kvaps in https://github.com/cozystack/cozystack/pull/873)
|
||||||
|
* Use the correct version name when uploading build assets to the release page. (@kvaps in https://github.com/cozystack/cozystack/pull/876)
|
||||||
|
* Stop using `ok-to-test` label to trigger CI in pull requests. (@kvaps in https://github.com/cozystack/cozystack/pull/875)
|
||||||
|
* Do not run tests in the release building pipeline. (@kvaps in https://github.com/cozystack/cozystack/pull/882)
|
||||||
|
* Fix release branch creation. (@kvaps in https://github.com/cozystack/cozystack/pull/884)
|
||||||
|
* Reduce noise in the test logs by suppressing the `wget` progress bar. (@lllamnyp in https://github.com/cozystack/cozystack/pull/865)
|
||||||
|
* Revert "automatically trigger tests in releasing PR". (@kvaps in https://github.com/cozystack/cozystack/pull/900)
|
||||||
|
* Force-update release branch on tagged main commits. (@kvaps in https://github.com/cozystack/cozystack/pull/977)
|
||||||
|
* Show detailed errors in the `pull-request-release` workflow. (@lllamnyp in https://github.com/cozystack/cozystack/pull/992)
|
||||||
|
|
||||||
|
## Community and Maintenance
|
||||||
|
|
||||||
|
### Repository Maintenance
|
||||||
|
|
||||||
|
Added @klinch0 to CODEOWNERS. (@kvaps in https://github.com/cozystack/cozystack/pull/838)
|
||||||
|
|
||||||
|
### New Contributors
|
||||||
|
|
||||||
|
* @etoshutka made their first contribution in https://github.com/cozystack/cozystack/pull/872
|
||||||
|
* @dtrdnk made their first contribution in https://github.com/cozystack/cozystack/pull/896
|
||||||
|
* @zdenekjanda made their first contribution in https://github.com/cozystack/cozystack/pull/924
|
||||||
|
* @gwynbleidd2106 made their first contribution in https://github.com/cozystack/cozystack/pull/962
|
||||||
|
|
||||||
|
## Full Changelog
|
||||||
|
|
||||||
|
See https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0
|
||||||
8
docs/changelogs/v0.31.1.md
Normal file
8
docs/changelogs/v0.31.1.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
## Fixes
|
||||||
|
|
||||||
|
* [build] Update Talos Linux v1.10.3 and fix assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
|
||||||
|
* [ci] Fix uploading released artifacts to GitHub. (@kvaps in https://github.com/cozystack/cozystack/pull/1009)
|
||||||
|
* [ci] Separate build and testing jobs. (@kvaps in https://github.com/cozystack/cozystack/pull/1005)
|
||||||
|
* [docs] Write a full release post for v0.31.1. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/999)
|
||||||
|
|
||||||
|
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.31.1
|
||||||
12
docs/changelogs/v0.31.2.md
Normal file
12
docs/changelogs/v0.31.2.md
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
## Security
|
||||||
|
|
||||||
|
* Resolve a security problem that allowed a tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062, backported in https://github.com/cozystack/cozystack/pull/1066)
|
||||||
|
|
||||||
|
## Fixes
|
||||||
|
|
||||||
|
* [platform] Fix dependencies in `distro-full` bundle. (@klinch0 in https://github.com/cozystack/cozystack/pull/1056, backported in https://github.com/cozystack/cozystack/pull/1064)
|
||||||
|
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031, backported in https://github.com/cozystack/cozystack/pull/1037)
|
||||||
|
* [platform] Reduce system resource consumption by using smaller resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054, backported in https://github.com/cozystack/cozystack/pull/1058)
|
||||||
|
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042, backported in https://github.com/cozystack/cozystack/pull/1066)
|
||||||
|
* [apps] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040, backported in https://github.com/cozystack/cozystack/pull/1041)
|
||||||
|
* [apps] Update built-in documentation and configuration reference for managed Clickhouse application. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1059, backported in https://github.com/cozystack/cozystack/pull/1065)
|
||||||
71
docs/changelogs/v0.32.0.md
Normal file
71
docs/changelogs/v0.32.0.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
Cozystack v0.32.0 is a significant release that brings new features, key fixes, and updates to underlying components.
|
||||||
|
|
||||||
|
## Major Features and Improvements
|
||||||
|
|
||||||
|
* [platform] Use `cozypkg` instead of Helm (@kvaps in https://github.com/cozystack/cozystack/pull/1057)
|
||||||
|
* [platform] Introduce the HelmRelease reconciler for system components. (@kvaps in https://github.com/cozystack/cozystack/pull/1033)
|
||||||
|
* [kubernetes] Enable using container registry mirrors by tenant Kubernetes clusters. Configure containerd for tenant Kubernetes clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/979, patched by @lllamnyp in https://github.com/cozystack/cozystack/pull/1032)
|
||||||
|
* [platform] Allow users to specify CPU requests in VCPUs. Use a library chart for resource management. (@lllamnyp in https://github.com/cozystack/cozystack/pull/972 and https://github.com/cozystack/cozystack/pull/1025)
|
||||||
|
* [platform] Annotate all child objects of apps with uniform labels for tracking by WorkloadMonitors. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1018 and https://github.com/cozystack/cozystack/pull/1024)
|
||||||
|
* [platform] Introduce `cluster-domain` option and un-hardcode `cozy.local`. (@kvaps in https://github.com/cozystack/cozystack/pull/1039)
|
||||||
|
* [platform] Get instance type when reconciling WorkloadMonitor (https://github.com/cozystack/cozystack/pull/1030)
|
||||||
|
* [virtual-machine] Add RBAC rules to allow port forwarding in KubeVirt for SSH via `virtctl`. (@mattia-eleuteri in https://github.com/cozystack/cozystack/pull/1027, patched by @klinch0 in https://github.com/cozystack/cozystack/pull/1028)
|
||||||
|
* [monitoring] Add events and audit inputs (@kevin880202 in https://github.com/cozystack/cozystack/pull/948)
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
* Resolve a security problem that allowed tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062)
|
||||||
|
|
||||||
|
## Fixes
|
||||||
|
|
||||||
|
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042)
|
||||||
|
* [kafka] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040)
|
||||||
|
* [cilium] Fixed Gateway API manifest. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/1016)
|
||||||
|
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031)
|
||||||
|
* [platform] Fix dependencies for paas-hosted bundle. (@kvaps in https://github.com/cozystack/cozystack/pull/1034)
|
||||||
|
* [platform] Reduce system resource consumption by using lesser resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054)
|
||||||
|
* [virtual-machine] Fix handling of cloudinit and ssh-key input for `virtual-machine` and `vm-instance` applications. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1019 and https://github.com/cozystack/cozystack/pull/1020)
|
||||||
|
* [apps] Fix Clickhouse version parsing. (@kvaps in https://github.com/cozystack/cozystack/commit/28302e776e9d2bb8f424cf467619fa61d71ac49a)
|
||||||
|
* [apps] Add resource quotas for PostgreSQL jobs and fix application readme generation check in CI. (@klinch0 in https://github.com/cozystack/cozystack/pull/1051)
|
||||||
|
* [kube-ovn] Enable database health check. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
|
||||||
|
* [kubernetes] Fix upstream issue by updating Kubevirt-CCM. (@kvaps in https://github.com/cozystack/cozystack/pull/1052)
|
||||||
|
* [kubernetes] Fix resources and introduce a migration when upgrading tenant Kubernetes to v0.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1073)
|
||||||
|
* [cluster-api] Add a missing migration for `capi-providers`. (@kvaps in https://github.com/cozystack/cozystack/pull/1072)
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
* Introduce cozykpg, update to v1.1.0. (@kvaps in https://github.com/cozystack/cozystack/pull/1057 and https://github.com/cozystack/cozystack/pull/1063)
|
||||||
|
* Update flux-operator to 0.22.0, Flux to 2.6.x. (@kingdonb in https://github.com/cozystack/cozystack/pull/1035)
|
||||||
|
* Update Talos Linux to v1.10.3. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
|
||||||
|
* Update Cilium to v1.17.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1046)
|
||||||
|
* Update MetalLB to v0.15.2. (@kvaps in https://github.com/cozystack/cozystack/pull/1045)
|
||||||
|
* Update Kube-OVN to v1.13.13. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
* [Oracle Cloud Infrastructure installation guide](https://cozystack.io/docs/operations/talos/installation/oracle-cloud/). (@kvaps, @lllamnyp, and @NickVolynkin in https://github.com/cozystack/website/pull/168)
|
||||||
|
* [Cluster configuration with `talosctl`](https://cozystack.io/docs/operations/talos/configuration/talosctl/). (@NickVolynkin in https://github.com/cozystack/website/pull/211)
|
||||||
|
* [Configuring container registry mirrors for tenant Kubernetes clusters](https://cozystack.io/docs/operations/talos/configuration/air-gapped/#5-configure-container-registry-mirrors-for-tenant-kubernetes). (@klinch0 in https://github.com/cozystack/website/pull/210)
|
||||||
|
* [Explain application management strategies and available versions for managed applications.](https://cozystack.io/docs/guides/applications/). (@NickVolynkin in https://github.com/cozystack/website/pull/219)
|
||||||
|
* [How to clean up etcd state](https://cozystack.io/docs/operations/faq/#how-to-clean-up-etcd-state). (@gwynbleidd2106 in https://github.com/cozystack/website/pull/214)
|
||||||
|
* [State that Cozystack is a CNCF Sandbox project](https://github.com/cozystack/cozystack?tab=readme-ov-file#cozystack). (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1055)
|
||||||
|
|
||||||
|
## Development, Testing, and CI/CD
|
||||||
|
|
||||||
|
* [tests] Add tests for applications `virtual-machine`, `vm-disk`, `vm-instance`, `postgresql`, `mysql`, and `clickhouse`. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1048, patched by @kvaps in https://github.com/cozystack/cozystack/pull/1074)
|
||||||
|
* [tests] Fix concurrency for the `docker login` action. (@kvaps in https://github.com/cozystack/cozystack/pull/1014)
|
||||||
|
* [tests] Increase QEMU system disk size in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1011)
|
||||||
|
* [tests] Increase the waiting timeout for VMs in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1038)
|
||||||
|
* [ci] Separate build and testing jobs in CI. (@kvaps in https://github.com/cozystack/cozystack/pull/1005 and https://github.com/cozystack/cozystack/pull/1010)
|
||||||
|
* [ci] Fix the release assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006 and https://github.com/cozystack/cozystack/pull/1009)
|
||||||
|
|
||||||
|
## New Contributors
|
||||||
|
|
||||||
|
* @kevin880202 made their first contribution in https://github.com/cozystack/cozystack/pull/948
|
||||||
|
* @mattia-eleuteri made their first contribution in https://github.com/cozystack/cozystack/pull/1027
|
||||||
|
|
||||||
|
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.32.0
|
||||||
|
|
||||||
|
<!--
|
||||||
|
HEAD https://github.com/cozystack/cozystack/commit/3ce6dbe8
|
||||||
|
-->
|
||||||
38
docs/changelogs/v0.32.1.md
Normal file
38
docs/changelogs/v0.32.1.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
## Major Features and Improvements
|
||||||
|
|
||||||
|
* [postgres] Introduce new functionality for backup and restore in PostgreSQL. (@klinch0 in https://github.com/cozystack/cozystack/pull/1086)
|
||||||
|
* [apps] Refactor resources in managed applications. (@kvaps in https://github.com/cozystack/cozystack/pull/1106)
|
||||||
|
* [system] Make VMAgent's `extraArgs` tunable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1091)
|
||||||
|
|
||||||
|
## Fixes
|
||||||
|
|
||||||
|
* [postgres] Escape users and database names. (@kvaps in https://github.com/cozystack/cozystack/pull/1087)
|
||||||
|
* [tenant] Fix monitoring agents HelmReleases for tenant clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/1079)
|
||||||
|
* [kubernetes] Wrap cert-manager CRDs in a conditional. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1076)
|
||||||
|
* [kubernetes] Remove `useCustomSecretForPatchContainerd` option and enable it by default. (@kvaps in https://github.com/cozystack/cozystack/pull/1104)
|
||||||
|
* [apps] Increase default resource presets for Clickhouse and Kafka from `nano` to `small`. Update OpenAPI specs and readme's. (@kvaps in https://github.com/cozystack/cozystack/pull/1103 and https://github.com/cozystack/cozystack/pull/1105)
|
||||||
|
* [linstor] Add configurable DRBD network options for connection and timeout settings, replacing scripted logic for detecting devices that lost connection. (@kvaps in https://github.com/cozystack/cozystack/pull/1094)
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
* Update cozy-proxy to v0.2.0 (@kvaps in https://github.com/cozystack/cozystack/pull/1081)
|
||||||
|
* Update Kafka Operator to 0.45.1-rc1 (@kvaps in https://github.com/cozystack/cozystack/pull/1082 and https://github.com/cozystack/cozystack/pull/1102)
|
||||||
|
* Update Flux Operator to 0.23.0 (@kingdonb in https://github.com/cozystack/cozystack/pull/1078)
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
* [docs] Release notes for v0.32.0 and two beta-versions. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1043)
|
||||||
|
|
||||||
|
## Development, Testing, and CI/CD
|
||||||
|
|
||||||
|
* [tests] Add Kafka, Redis. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1077)
|
||||||
|
* [tests] Increase disk space for VMs in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1097)
|
||||||
|
* [tests] Upd Kubernetes v1.33. (@kvaps in https://github.com/cozystack/cozystack/pull/1083)
|
||||||
|
* [tests] increase postgres timeouts. (@kvaps in https://github.com/cozystack/cozystack/pull/1108)
|
||||||
|
* [tests] don't wait for postgres ro service. (@kvaps in https://github.com/cozystack/cozystack/pull/1109)
|
||||||
|
* [ci] Setup systemd timer to tear down sandbox. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1092)
|
||||||
|
* [ci] Split testing job into several. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1075)
|
||||||
|
* [ci] Run E2E tests as separate parallel jobs. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1093)
|
||||||
|
* [ci] Refactor GitHub workflows. (@kvaps in https://github.com/cozystack/cozystack/pull/1107)
|
||||||
|
|
||||||
|
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.32.0...v0.32.1
|
||||||
91
docs/changelogs/v0.33.0.md
Normal file
91
docs/changelogs/v0.33.0.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
> [!WARNING]
|
||||||
|
> A patch release [0.33.2](github.com/cozystack/cozystack/releases/tag/v0.33.2) fixing a regression in 0.33.0 has been released.
|
||||||
|
> It is recommended to skip this version and upgrade to [0.33.2](github.com/cozystack/cozystack/releases/tag/v0.33.2) instead.
|
||||||
|
|
||||||
|
## Feature Highlights
|
||||||
|
|
||||||
|
### Unified CPU and Memory Allocation Management
|
||||||
|
|
||||||
|
Since version 0.31.0, Cozystack introduced a single-point-of-truth configuration variable `cpu-allocation-ratio`,
|
||||||
|
making CPU resource requests and limits uniform in Virtual Machines managed by KubeVirt.
|
||||||
|
The new release 0.33.0 introduces `memory-allocation-ratio` and expands both variables to all managed applications and tenant resource quotas.
|
||||||
|
|
||||||
|
Resource presets also respect the allocation ratios and behave in the same way as explicit resource definitions.
|
||||||
|
The new resource definition format is concise and simple for platform users.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# resource definition in the configuration
|
||||||
|
resources:
|
||||||
|
cpu: <defined cpu value>
|
||||||
|
memory: <defined memory value>
|
||||||
|
```
|
||||||
|
|
||||||
|
It results in Kubernetes resource requests and limits, based on defined values and the universal allocation ratios:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# actual requests and limits, provided to the application
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: <defined cpu value>
|
||||||
|
memory: <defined memory value>
|
||||||
|
requests:
|
||||||
|
cpu: <defined cpu value / cpu-allocation-ratio>
|
||||||
|
memory: <defined memory value / memory-allocation-ratio>
|
||||||
|
```
|
||||||
|
|
||||||
|
When updating from earlier Cozystack versions, resource configuration in managed applications will be automatically migrated to the new format.
|
||||||
|
|
||||||
|
### Backing up and Restoring Data in Tenant Kubernetes
|
||||||
|
|
||||||
|
One of the main features of the release is backup capability for PVCs in tenant Kubernetes clusters.
|
||||||
|
It enables platform and tenant administrators to back up and restore data used by services in the tenant clusters.
|
||||||
|
|
||||||
|
This new functionality in Cozystack is powered by [Velero](https://velero.io/) and needs an external S3-compatible storage.
|
||||||
|
|
||||||
|
## Support for NFS Storage
|
||||||
|
|
||||||
|
Cozystack now supports using NFS shared storage with a new optional system module.
|
||||||
|
See the documentation: https://cozystack.io/docs/operations/storage/nfs/.
|
||||||
|
|
||||||
|
## Features and Improvements
|
||||||
|
|
||||||
|
* [kubernetes] Enable PVC backups in tenant Kubernetes clusters, powered by [Velero](https://velero.io/). (@klinch0 in https://github.com/cozystack/cozystack/pull/1132)
|
||||||
|
* [nfs-driver] Enable NFS support by introducing a new optional system module `nfs-driver`. (@kvaps in https://github.com/cozystack/cozystack/pull/1133)
|
||||||
|
* [virtual-machine] Configure CPU sockets available to VMs with the `resources.cpu.sockets` configuration value. (@klinch0 in https://github.com/cozystack/cozystack/pull/1131)
|
||||||
|
* [virtual-machine] Add support for using pre-imported "golden image" disks for virtual machines, enabling faster provisioning by referencing existing images instead of downloading via HTTP. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1112)
|
||||||
|
* [kubernetes] Add an option to expose the Ingress-NGINX controller in tenant Kubernetes cluster via LoadBalancer. New configuration value `exposeMethod` offers a choice of `Proxied` and `LoadBalancer`. (@kvaps in https://github.com/cozystack/cozystack/pull/1114)
|
||||||
|
* [apps] When updating from earlier Cozystack versions, automatically migrate to the new resource definition format: from `resources.requests.[cpu,memory]` and `resources.limits.[cpu,memory]` to `resources.[cpu,memory]`. (@kvaps in https://github.com/cozystack/cozystack/pull/1127)
|
||||||
|
* [apps] Give examples of new resource definitions in the managed app README's. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1120)
|
||||||
|
* [tenant] Respect `cpu-allocation-ratio` in tenant's `resourceQuotas`.(@kvaps in https://github.com/cozystack/cozystack/pull/1119)
|
||||||
|
* [cozy-lib] Introduce helper function to calculate Java heap params based on memory requests and limits. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1157)
|
||||||
|
|
||||||
|
## Security
|
||||||
|
|
||||||
|
* [monitoring] Disable sign up in Alerta. (@klinch0 in https://github.com/cozystack/cozystack/pull/1129)
|
||||||
|
|
||||||
|
## Fixes
|
||||||
|
|
||||||
|
* [platform] Always set resources for managed apps . (@lllamnyp in https://github.com/cozystack/cozystack/pull/1156)
|
||||||
|
* [platform] Remove the memory limit for Keycloak deployment. (@klinch0 in https://github.com/cozystack/cozystack/pull/1122)
|
||||||
|
* [kubernetes] Fix a condition in the ingress template for tenant Kubernetes. (@kvaps in https://github.com/cozystack/cozystack/pull/1143)
|
||||||
|
* [kubernetes] Fix a deadlock on reattaching a KubeVirt-CSI volume. (@kvaps in https://github.com/cozystack/cozystack/pull/1135)
|
||||||
|
* [mysql] MySQL applications with a single replica now correctly create a `LoadBalancer` service. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1113)
|
||||||
|
* [etcd] Fix resources and headless services in the etcd application. (@kvaps in https://github.com/cozystack/cozystack/pull/1128)
|
||||||
|
* [apps] Enable selecting `resourcePreset` from a drop-down list for all applications by adding enum of allowed values in the config scheme. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1117)
|
||||||
|
* [apps] Refactor resource presets provided to managed apps by `cozy-lib`. (@kvaps in https://github.com/cozystack/cozystack/pull/1155)
|
||||||
|
* [keycloak] Calculate and pass Java heap parameters explicitly to prevent OOM errors. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1157)
|
||||||
|
|
||||||
|
|
||||||
|
## Development, Testing, and CI/CD
|
||||||
|
|
||||||
|
* [dx] Introduce cozyreport tool and gather reports in CI. (@kvaps in https://github.com/cozystack/cozystack/pull/1139)
|
||||||
|
* [ci] Use Nexus as a pull-through cache for CI. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1124)
|
||||||
|
* [ci] Save a list of observed images after each workflow run. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1089)
|
||||||
|
* [ci] Skip Cozystack tests on PRs that only change the docs. Don't restart CI when a PR is labeled. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1136)
|
||||||
|
* [dx] Fix Makefile variables for `capi-providers`. (@kvaps in https://github.com/cozystack/cozystack/pull/1115)
|
||||||
|
* [tests] Introduce self-destructing testing environments. (@kvaps in https://github.com/cozystack/cozystack/pull/1138, https://github.com/cozystack/cozystack/pull/1140, https://github.com/cozystack/cozystack/pull/1141, https://github.com/cozystack/cozystack/pull/1142)
|
||||||
|
* [e2e] Retry flaky application tests to improve total test time. (@kvaps in https://github.com/cozystack/cozystack/pull/1123)
|
||||||
|
* [maintenance] Add a PR template. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1121)
|
||||||
|
|
||||||
|
|
||||||
|
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.32.1...v0.33.0
|
||||||
3
docs/changelogs/v0.33.1.md
Normal file
3
docs/changelogs/v0.33.1.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
## Fixes
|
||||||
|
|
||||||
|
* [kubevirt-csi] Fix a regression by updating the role of the CSI controller. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1165)
|
||||||
19
docs/changelogs/v0.33.2.md
Normal file
19
docs/changelogs/v0.33.2.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
## Features and Improvements
|
||||||
|
|
||||||
|
* [vm-instance] Enable running [Windows](https://cozystack.io/docs/operations/virtualization/windows/) and [MikroTik RouterOS](https://cozystack.io/docs/operations/virtualization/mikrotik/) in Cozystack. Add `bus` option and always specify `bootOrder` for all disks. (@kvaps in https://github.com/cozystack/cozystack/pull/1168)
|
||||||
|
* [cozystack-api] Refactor OpenAPI Schema and support reading it from config. (@kvaps in https://github.com/cozystack/cozystack/pull/1173)
|
||||||
|
* [cozystack-api] Enable using singular resource names in Cozystack API. For example, `kubectl get tenant` is now a valid command, in addition to `kubectl get tenants`. (@kvaps in https://github.com/cozystack/cozystack/pull/1169)
|
||||||
|
* [postgres] Explain how to back up and restore PostgreSQL using Velero backups. (@klinch0 and @NickVolynkin in https://github.com/cozystack/cozystack/pull/1141)
|
||||||
|
|
||||||
|
## Fixes
|
||||||
|
|
||||||
|
* [virtual-machine,vm-instance] Adjusted RBAC role to let users read the service associated with the VMs they create. Consequently, users can now see details of the service in the dashboard and therefore read the IP address of the VM. (@klinch0 in https://github.com/cozystack/cozystack/pull/1161)
|
||||||
|
* [cozystack-api] Fix an error with `resourceVersion` which resulted in message 'failed to update HelmRelease: helmreleases.helm.toolkit.fluxcd.io "xxx" is invalid...'. (@kvaps in https://github.com/cozystack/cozystack/pull/1170)
|
||||||
|
* [cozystack-api] Fix an error in updating lists in Cozystack objects, which resulted in message "Warning: resource ... is missing the kubectl.kubernetes.io/last-applied-configuration annotation". (@kvaps in https://github.com/cozystack/cozystack/pull/1171)
|
||||||
|
* [cozystack-api] Disable `startegic-json-patch` support. (@kvaps in https://github.com/cozystack/cozystack/pull/1179)
|
||||||
|
* [dashboard] Fix the code for removing dashboard comments which used to mistakenly remove shebang from cloudInit scripts. (@kvaps in https://github.com/cozystack/cozystack/pull/1175).
|
||||||
|
* [virtual-machine] Fix cloudInit and sshKeys processing. (@kvaps in https://github.com/cozystack/cozystack/pull/1175 and https://github.com/cozystack/cozystack/commit/da3ee5d0ea9e87529c8adc4fcccffabe8782292e)
|
||||||
|
* [applications] Fix a typo in preset resource tables in the built-in documentation of managed applications. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1172)
|
||||||
|
* [kubernetes] Enable deleting Velero component from a tenant Kubernetes cluster. (@klinch0 in https://github.com/cozystack/cozystack/pull/1176)
|
||||||
|
|
||||||
|
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.33.1...v0.33.2
|
||||||
166
docs/release.md
Normal file
166
docs/release.md
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
# Release Workflow
|
||||||
|
|
||||||
|
This document describes Cozystack’s release process.
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
Cozystack uses a staged release process to ensure stability and flexibility during development.
|
||||||
|
|
||||||
|
There are three types of releases:
|
||||||
|
|
||||||
|
- **Release Candidates (RC)** – Preview versions (e.g., `v0.42.0-rc.1`) used for final testing and validation.
|
||||||
|
- **Regular Releases** – Final versions (e.g., `v0.42.0`) that are feature-complete and thoroughly tested.
|
||||||
|
- **Patch Releases** – Bugfix-only updates (e.g., `v0.42.1`) made after a stable release, based on a dedicated release branch.
|
||||||
|
|
||||||
|
Each type plays a distinct role in delivering reliable and tested updates while allowing ongoing development to continue smoothly.
|
||||||
|
|
||||||
|
## Release Candidates
|
||||||
|
|
||||||
|
Release candidates are Cozystack versions that introduce new features and are published before a stable release.
|
||||||
|
Their purpose is to help validate stability before finalizing a new feature release.
|
||||||
|
They allow for final rounds of testing and bug fixes without freezing development.
|
||||||
|
|
||||||
|
Release candidates are given numbers `vX.Y.0-rc.N`, for example, `v0.42.0-rc.1`.
|
||||||
|
They are created directly in the `main` branch.
|
||||||
|
An RC is typically tagged when all major features for the upcoming release have been merged into main and the release enters its testing phase.
|
||||||
|
However, new features and changes can still be added before the regular release `vX.Y.0`.
|
||||||
|
|
||||||
|
Each RC contributes to a cumulative set of release notes that will be finalized when `vX.Y.0` is released.
|
||||||
|
After testing, if no critical issues remain, the regular release (`vX.Y.0`) is tagged from the last RC or a later commit in main.
|
||||||
|
This begins the regular release process, creates a dedicated `release-X.Y` branch, and opens the way for patch releases.
|
||||||
|
|
||||||
|
## Regular Releases
|
||||||
|
|
||||||
|
When making a regular release, we tag the latest RC or a subsequent minimal-change commit as `vX.Y.0`.
|
||||||
|
In this explanation, we'll use version `v0.42.0` as an example:
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "feature"
|
||||||
|
commit id: "feature 2"
|
||||||
|
commit id: "feature 3" tag: "v0.42.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
A regular release sequence starts in the following way:
|
||||||
|
|
||||||
|
1. Maintainer tags a commit in `main` with `v0.42.0` and pushes it to GitHub.
|
||||||
|
2. CI workflow triggers on tag push:
|
||||||
|
1. Creates a draft page for release `v0.42.0`, if it wasn't created before.
|
||||||
|
2. Takes code from tag `v0.42.0`, builds images, and pushes them to ghcr.io.
|
||||||
|
3. Makes a new commit `Prepare release v0.42.0` with updated digests, pushes it to the new branch `release-0.42.0`, and opens a PR to `main`.
|
||||||
|
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.0` and uploads them to the release draft page.
|
||||||
|
3. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "feature"
|
||||||
|
commit id: "feature 2"
|
||||||
|
commit id: "feature 3" tag: "v0.42.0"
|
||||||
|
branch release-0.42.0
|
||||||
|
checkout release-0.42.0
|
||||||
|
commit id: "Prepare release v0.42.0"
|
||||||
|
checkout main
|
||||||
|
merge release-0.42.0 id: "Pull Request"
|
||||||
|
```
|
||||||
|
|
||||||
|
When testing and editing are completed, the sequence goes on.
|
||||||
|
|
||||||
|
4. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.0`.
|
||||||
|
5. CI workflow triggers on merge:
|
||||||
|
1. Moves the tag `v0.42.0` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||||
|
2. Publishes the release page (`draft` → `latest`).
|
||||||
|
6. The maintainer can now announce the release to the community.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "feature"
|
||||||
|
commit id: "feature 2"
|
||||||
|
commit id: "feature 3"
|
||||||
|
branch release-0.42.0
|
||||||
|
checkout release-0.42.0
|
||||||
|
commit id: "Prepare release v0.42.0"
|
||||||
|
checkout main
|
||||||
|
merge release-0.42.0 id: "Release v0.42.0" tag: "v0.42.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Patch Releases
|
||||||
|
|
||||||
|
Making a patch release has a lot in common with a regular release, with a couple of differences:
|
||||||
|
|
||||||
|
* A release branch is used instead of `main`
|
||||||
|
* Patch commits are cherry-picked to the release branch.
|
||||||
|
* A pull request is opened against the release branch.
|
||||||
|
|
||||||
|
|
||||||
|
Let's assume that we've released `v0.42.0` and that development is ongoing.
|
||||||
|
We have introduced a couple of new features and some fixes to features that we have released
|
||||||
|
in `v0.42.0`.
|
||||||
|
|
||||||
|
Once problems were found and fixed, a patch release is due.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||||
|
checkout main
|
||||||
|
commit id: "feature 4"
|
||||||
|
commit id: "patch 1"
|
||||||
|
commit id: "feature 5"
|
||||||
|
commit id: "patch 2"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
1. The maintainer creates a release branch, `release-0.42,` and cherry-picks patch commits from `main` to `release-0.42`.
|
||||||
|
These must be only patches to features that were present in version `v0.42.0`.
|
||||||
|
|
||||||
|
Cherry-picking can be done as soon as each patch is merged into `main`,
|
||||||
|
or directly before the release.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||||
|
branch release-0.42
|
||||||
|
checkout main
|
||||||
|
commit id: "feature 4"
|
||||||
|
commit id: "patch 1"
|
||||||
|
commit id: "feature 5"
|
||||||
|
commit id: "patch 2"
|
||||||
|
checkout release-0.42
|
||||||
|
cherry-pick id: "patch 1"
|
||||||
|
cherry-pick id: "patch 2"
|
||||||
|
```
|
||||||
|
|
||||||
|
When all relevant patch commits are cherry-picked, the branch is ready for release.
|
||||||
|
|
||||||
|
2. The maintainer tags the `HEAD` commit of branch `release-0.42` as `v0.42.1` and then pushes it to GitHub.
|
||||||
|
3. CI workflow triggers on tag push:
|
||||||
|
1. Creates a draft page for release `v0.42.1`, if it wasn't created before.
|
||||||
|
2. Takes code from tag `v0.42.1`, builds images, and pushes them to ghcr.io.
|
||||||
|
3. Makes a new commit `Prepare release v0.42.1` with updated digests, pushes it to the new branch `release-0.42.1`, and opens a PR to `release-0.42`.
|
||||||
|
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.1` and uploads them to the release draft page.
|
||||||
|
4. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
gitGraph
|
||||||
|
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||||
|
branch release-0.42
|
||||||
|
checkout main
|
||||||
|
commit id: "feature 4"
|
||||||
|
commit id: "patch 1"
|
||||||
|
commit id: "feature 5"
|
||||||
|
commit id: "patch 2"
|
||||||
|
checkout release-0.42
|
||||||
|
cherry-pick id: "patch 1"
|
||||||
|
cherry-pick id: "patch 2" tag: "v0.42.1"
|
||||||
|
branch release-0.42.1
|
||||||
|
commit id: "Prepare release v0.42.1"
|
||||||
|
checkout release-0.42
|
||||||
|
merge release-0.42.1 id: "Pull request"
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, when release is confirmed, the release sequence goes on.
|
||||||
|
|
||||||
|
5. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.1`.
|
||||||
|
6. CI workflow triggers on merge:
|
||||||
|
1. Moves the tag `v0.42.1` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||||
|
2. Publishes the release page (`draft` → `latest`).
|
||||||
|
7. The maintainer can now announce the release to the community.
|
||||||
13
go.mod
13
go.mod
@@ -37,6 +37,7 @@ require (
|
|||||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||||
|
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
||||||
@@ -91,14 +92,14 @@ require (
|
|||||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
golang.org/x/crypto v0.28.0 // indirect
|
golang.org/x/crypto v0.31.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||||
golang.org/x/net v0.30.0 // indirect
|
golang.org/x/net v0.33.0 // indirect
|
||||||
golang.org/x/oauth2 v0.23.0 // indirect
|
golang.org/x/oauth2 v0.23.0 // indirect
|
||||||
golang.org/x/sync v0.8.0 // indirect
|
golang.org/x/sync v0.10.0 // indirect
|
||||||
golang.org/x/sys v0.26.0 // indirect
|
golang.org/x/sys v0.28.0 // indirect
|
||||||
golang.org/x/term v0.25.0 // indirect
|
golang.org/x/term v0.27.0 // indirect
|
||||||
golang.org/x/text v0.19.0 // indirect
|
golang.org/x/text v0.21.0 // indirect
|
||||||
golang.org/x/time v0.7.0 // indirect
|
golang.org/x/time v0.7.0 // indirect
|
||||||
golang.org/x/tools v0.26.0 // indirect
|
golang.org/x/tools v0.26.0 // indirect
|
||||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||||
|
|||||||
28
go.sum
28
go.sum
@@ -26,8 +26,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
|
|||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
@@ -212,8 +212,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
|||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
@@ -222,26 +222,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
|||||||
32
hack/cdi_golden_image_create.sh
Normal file
32
hack/cdi_golden_image_create.sh
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
name="$1"
|
||||||
|
url="$2"
|
||||||
|
|
||||||
|
if [ -z "$name" ] || [ -z "$url" ]; then
|
||||||
|
echo "Usage: <name> <url>"
|
||||||
|
echo "Example: 'ubuntu' 'https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
#### create DV ubuntu source for CDI image cloning
|
||||||
|
kubectl create -f - <<EOF
|
||||||
|
apiVersion: cdi.kubevirt.io/v1beta1
|
||||||
|
kind: DataVolume
|
||||||
|
metadata:
|
||||||
|
name: "vm-image-$name"
|
||||||
|
namespace: cozy-public
|
||||||
|
annotations:
|
||||||
|
cdi.kubevirt.io/storage.bind.immediate.requested: "true"
|
||||||
|
spec:
|
||||||
|
source:
|
||||||
|
http:
|
||||||
|
url: "$url"
|
||||||
|
storage:
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 5Gi
|
||||||
|
storageClassName: replicated
|
||||||
|
EOF
|
||||||
8
hack/collect-images.sh
Executable file
8
hack/collect-images.sh
Executable file
@@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
for node in 11 12 13; do
|
||||||
|
talosctl -n 192.168.123.${node} -e 192.168.123.${node} images ls >> images.tmp
|
||||||
|
talosctl -n 192.168.123.${node} -e 192.168.123.${node} images --namespace system ls >> images.tmp
|
||||||
|
done
|
||||||
|
|
||||||
|
while read _ name sha _ ; do echo $sha $name ; done < images.tmp | sort -u > images.txt
|
||||||
147
hack/cozyreport.sh
Executable file
147
hack/cozyreport.sh
Executable file
@@ -0,0 +1,147 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
REPORT_DATE=$(date +%Y-%m-%d_%H-%M-%S)
|
||||||
|
REPORT_NAME=${1:-cozyreport-$REPORT_DATE}
|
||||||
|
REPORT_PDIR=$(mktemp -d)
|
||||||
|
REPORT_DIR=$REPORT_PDIR/$REPORT_NAME
|
||||||
|
|
||||||
|
# -- check dependencies
|
||||||
|
command -V kubectl >/dev/null || exit $?
|
||||||
|
command -V tar >/dev/null || exit $?
|
||||||
|
|
||||||
|
# -- cozystack module
|
||||||
|
|
||||||
|
echo "Collecting Cozystack information..."
|
||||||
|
mkdir -p $REPORT_DIR/cozystack
|
||||||
|
kubectl get deploy -n cozy-system cozystack -o jsonpath='{.spec.template.spec.containers[0].image}' > $REPORT_DIR/cozystack/image.txt 2>&1
|
||||||
|
kubectl get cm -n cozy-system --no-headers | awk '$1 ~ /^cozystack/' |
|
||||||
|
while read NAME _; do
|
||||||
|
DIR=$REPORT_DIR/cozystack/configs
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl get cm -n cozy-system $NAME -o yaml > $DIR/$NAME.yaml 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
# -- kubernetes module
|
||||||
|
|
||||||
|
echo "Collecting Kubernetes information..."
|
||||||
|
mkdir -p $REPORT_DIR/kubernetes
|
||||||
|
kubectl version > $REPORT_DIR/kubernetes/version.txt 2>&1
|
||||||
|
|
||||||
|
echo "Collecting nodes..."
|
||||||
|
kubectl get nodes -o wide > $REPORT_DIR/kubernetes/nodes.txt 2>&1
|
||||||
|
kubectl get nodes --no-headers | awk '$2 != "Ready"' |
|
||||||
|
while read NAME _; do
|
||||||
|
DIR=$REPORT_DIR/kubernetes/nodes/$NAME
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl get node $NAME -o yaml > $DIR/node.yaml 2>&1
|
||||||
|
kubectl describe node $NAME > $DIR/describe.txt 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Collecting namespaces..."
|
||||||
|
kubectl get ns -o wide > $REPORT_DIR/kubernetes/namespaces.txt 2>&1
|
||||||
|
kubectl get ns --no-headers | awk '$2 != "Active"' |
|
||||||
|
while read NAME _; do
|
||||||
|
DIR=$REPORT_DIR/kubernetes/namespaces/$NAME
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl get ns $NAME -o yaml > $DIR/namespace.yaml 2>&1
|
||||||
|
kubectl describe ns $NAME > $DIR/describe.txt 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Collecting helmreleases..."
|
||||||
|
kubectl get hr -A > $REPORT_DIR/kubernetes/helmreleases.txt 2>&1
|
||||||
|
kubectl get hr -A | awk '$4 != "True"' | \
|
||||||
|
while read NAMESPACE NAME _; do
|
||||||
|
DIR=$REPORT_DIR/kubernetes/helmreleases/$NAMESPACE/$NAME
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl get hr -n $NAMESPACE $NAME -o yaml > $DIR/hr.yaml 2>&1
|
||||||
|
kubectl describe hr -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Collecting pods..."
|
||||||
|
kubectl get pod -A -o wide > $REPORT_DIR/kubernetes/pods.txt 2>&1
|
||||||
|
kubectl get pod -A --no-headers | awk '$4 !~ /Running|Succeeded|Completed/' |
|
||||||
|
while read NAMESPACE NAME _ STATE _; do
|
||||||
|
DIR=$REPORT_DIR/kubernetes/pods/$NAMESPACE/$NAME
|
||||||
|
mkdir -p $DIR
|
||||||
|
CONTAINERS=$(kubectl get pod -o jsonpath='{.spec.containers[*].name}' -n $NAMESPACE $NAME)
|
||||||
|
kubectl get pod -n $NAMESPACE $NAME -o yaml > $DIR/pod.yaml 2>&1
|
||||||
|
kubectl describe pod -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||||
|
if [ "$STATE" != "Pending" ]; then
|
||||||
|
for CONTAINER in $CONTAINERS; do
|
||||||
|
kubectl logs -n $NAMESPACE $NAME $CONTAINER > $DIR/logs-$CONTAINER.txt 2>&1
|
||||||
|
kubectl logs -n $NAMESPACE $NAME $CONTAINER --previous > $DIR/logs-$CONTAINER-previous.txt 2>&1
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Collecting virtualmachines..."
|
||||||
|
kubectl get vm -A > $REPORT_DIR/kubernetes/vms.txt 2>&1
|
||||||
|
kubectl get vm -A --no-headers | awk '$5 != "True"' |
|
||||||
|
while read NAMESPACE NAME _; do
|
||||||
|
DIR=$REPORT_DIR/kubernetes/vm/$NAMESPACE/$NAME
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl get vm -n $NAMESPACE $NAME -o yaml > $DIR/vm.yaml 2>&1
|
||||||
|
kubectl describe vm -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Collecting virtualmachine instances..."
|
||||||
|
kubectl get vmi -A > $REPORT_DIR/kubernetes/vmis.txt 2>&1
|
||||||
|
kubectl get vmi -A --no-headers | awk '$4 != "Running"' |
|
||||||
|
while read NAMESPACE NAME _; do
|
||||||
|
DIR=$REPORT_DIR/kubernetes/vmi/$NAMESPACE/$NAME
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl get vmi -n $NAMESPACE $NAME -o yaml > $DIR/vmi.yaml 2>&1
|
||||||
|
kubectl describe vmi -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Collecting services..."
|
||||||
|
kubectl get svc -A > $REPORT_DIR/kubernetes/services.txt 2>&1
|
||||||
|
kubectl get svc -A --no-headers | awk '$4 == "<pending>"' |
|
||||||
|
while read NAMESPACE NAME _; do
|
||||||
|
DIR=$REPORT_DIR/kubernetes/services/$NAMESPACE/$NAME
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl get svc -n $NAMESPACE $NAME -o yaml > $DIR/service.yaml 2>&1
|
||||||
|
kubectl describe svc -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Collecting pvcs..."
|
||||||
|
kubectl get pvc -A > $REPORT_DIR/kubernetes/pvcs.txt 2>&1
|
||||||
|
kubectl get pvc -A | awk '$3 != "Bound"' |
|
||||||
|
while read NAMESPACE NAME _; do
|
||||||
|
DIR=$REPORT_DIR/kubernetes/pvc/$NAMESPACE/$NAME
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl get pvc -n $NAMESPACE $NAME -o yaml > $DIR/pvc.yaml 2>&1
|
||||||
|
kubectl describe pvc -n $NAMESPACE $NAME > $DIR/describe.txt 2>&1
|
||||||
|
done
|
||||||
|
|
||||||
|
# -- kamaji module
|
||||||
|
|
||||||
|
if kubectl get deploy -n cozy-linstor linstor-controller >/dev/null 2>&1; then
|
||||||
|
echo "Collecting kamaji resources..."
|
||||||
|
DIR=$REPORT_DIR/kamaji
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl logs -n cozy-kamaji deployment/kamaji > $DIR/kamaji-controller.log 2>&1
|
||||||
|
kubectl get kamajicontrolplanes.controlplane.cluster.x-k8s.io -A > $DIR/kamajicontrolplanes.txt 2>&1
|
||||||
|
kubectl get kamajicontrolplanes.controlplane.cluster.x-k8s.io -A -o yaml > $DIR/kamajicontrolplanes.yaml 2>&1
|
||||||
|
kubectl get tenantcontrolplanes.kamaji.clastix.io -A > $DIR/tenantcontrolplanes.txt 2>&1
|
||||||
|
kubectl get tenantcontrolplanes.kamaji.clastix.io -A -o yaml > $DIR/tenantcontrolplanes.yaml 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -- linstor module
|
||||||
|
|
||||||
|
if kubectl get deploy -n cozy-linstor linstor-controller >/dev/null 2>&1; then
|
||||||
|
echo "Collecting linstor resources..."
|
||||||
|
DIR=$REPORT_DIR/linstor
|
||||||
|
mkdir -p $DIR
|
||||||
|
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color n l > $DIR/nodes.txt 2>&1
|
||||||
|
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color sp l > $DIR/storage-pools.txt 2>&1
|
||||||
|
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor --no-color r l > $DIR/resources.txt 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -- finalization
|
||||||
|
|
||||||
|
echo "Creating archive..."
|
||||||
|
tar -czf $REPORT_NAME.tgz -C $REPORT_PDIR .
|
||||||
|
echo "Report created: $REPORT_NAME.tgz"
|
||||||
|
|
||||||
|
echo "Cleaning up..."
|
||||||
|
rm -rf $REPORT_PDIR
|
||||||
117
hack/cozytest.sh
Executable file
117
hack/cozytest.sh
Executable file
@@ -0,0 +1,117 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
###############################################################################
|
||||||
|
# cozytest.sh - Bats-compatible test runner with live trace and enhanced #
|
||||||
|
# output, written in pure shell #
|
||||||
|
###############################################################################
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
TEST_FILE=${1:?Usage: ./cozytest.sh <file.bats> [pattern]}
|
||||||
|
PATTERN=${2:-*}
|
||||||
|
LINE='----------------------------------------------------------------'
|
||||||
|
|
||||||
|
cols() { stty size 2>/dev/null | awk '{print $2}' || echo 80; }
|
||||||
|
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
|
||||||
|
BEGIN=$(date +%s)
|
||||||
|
timestamp() { s=$(( $(date +%s) - BEGIN )); printf '[%02d:%02d]' $((s/60)) $((s%60)); }
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# run_one <fn> <title> #
|
||||||
|
###############################################################################
|
||||||
|
run_one() {
|
||||||
|
fn=$1 title=$2
|
||||||
|
tmp=$(mktemp -d) || { echo "Failed to create temp directory" >&2; exit 1; }
|
||||||
|
log="$tmp/log"
|
||||||
|
|
||||||
|
echo "╭ » Run test: $title"
|
||||||
|
START=$(date +%s)
|
||||||
|
skip_next="+ $fn" # первую строку трассировки с именем функции пропустим
|
||||||
|
|
||||||
|
{
|
||||||
|
(
|
||||||
|
PS4='+ ' # prefix for set -x
|
||||||
|
set -eu -x # strict + trace
|
||||||
|
"$fn"
|
||||||
|
)
|
||||||
|
printf '__RC__%s\n' "$?"
|
||||||
|
} 2>&1 | tee "$log" | while IFS= read -r line; do
|
||||||
|
case "$line" in
|
||||||
|
'__RC__'*) : ;;
|
||||||
|
'+ '*) cmd=${line#'+ '}
|
||||||
|
[ "$cmd" = "${skip_next#+ }" ] && continue
|
||||||
|
case "$cmd" in
|
||||||
|
'set -e'|'set -x'|'set -u'|'return 0') continue ;;
|
||||||
|
esac
|
||||||
|
out=$cmd ;;
|
||||||
|
*) out=$line ;;
|
||||||
|
esac
|
||||||
|
now=$(( $(date +%s) - START ))
|
||||||
|
[ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
|
||||||
|
printf '┊[%02d:%02d] %s\n' $((now/60)) $((now%60)) "$out"
|
||||||
|
done
|
||||||
|
|
||||||
|
rc=$(awk '/^__RC__/ {print substr($0,7)}' "$log" | tail -n1)
|
||||||
|
[ -z "$rc" ] && rc=1
|
||||||
|
now=$(( $(date +%s) - START ))
|
||||||
|
|
||||||
|
if [ "$rc" -eq 0 ]; then
|
||||||
|
printf '╰[%02d:%02d] ✅ Test OK: %s\n' $((now/60)) $((now%60)) "$title"
|
||||||
|
else
|
||||||
|
printf '╰[%02d:%02d] ❌ Test failed: %s (exit %s)\n' \
|
||||||
|
$((now/60)) $((now%60)) "$title" "$rc"
|
||||||
|
echo "----- captured output -----------------------------------------"
|
||||||
|
grep -v '^__RC__' "$log"
|
||||||
|
echo "$LINE"
|
||||||
|
exit "$rc"
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf "$tmp"
|
||||||
|
}
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# convert .bats -> shell-functions #
|
||||||
|
###############################################################################
|
||||||
|
TMP_SH=$(mktemp) || { echo "Failed to create temp file" >&2; exit 1; }
|
||||||
|
trap 'rm -f "$TMP_SH"' EXIT
|
||||||
|
awk '
|
||||||
|
/^@test[[:space:]]+"/ {
|
||||||
|
line = substr($0, index($0, "\"") + 1)
|
||||||
|
title = substr(line, 1, index(line, "\"") - 1)
|
||||||
|
fname = "test_"
|
||||||
|
for (i = 1; i <= length(title); i++) {
|
||||||
|
c = substr(title, i, 1)
|
||||||
|
fname = fname (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||||
|
}
|
||||||
|
printf("### %s\n", title)
|
||||||
|
printf("%s() {\n", fname)
|
||||||
|
print " set -e" # ошибка → падение теста
|
||||||
|
next
|
||||||
|
}
|
||||||
|
/^}$/ {
|
||||||
|
print " return 0" # если автор не сделал exit 1 — тест ОК
|
||||||
|
print "}"
|
||||||
|
next
|
||||||
|
}
|
||||||
|
{ print }
|
||||||
|
' "$TEST_FILE" > "$TMP_SH"
|
||||||
|
|
||||||
|
[ -f "$TMP_SH" ] || { echo "Failed to generate test functions" >&2; exit 1; }
|
||||||
|
# shellcheck disable=SC1090
|
||||||
|
. "$TMP_SH"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# run selected tests #
|
||||||
|
###############################################################################
|
||||||
|
awk -v pat="$PATTERN" '
|
||||||
|
/^### / {
|
||||||
|
title = substr($0, 5)
|
||||||
|
name = "test_"
|
||||||
|
for (i = 1; i <= length(title); i++) {
|
||||||
|
c = substr(title, i, 1)
|
||||||
|
name = name (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||||
|
}
|
||||||
|
if (pat == "*" || index(title, pat) > 0)
|
||||||
|
printf("%s %s\n", name, title)
|
||||||
|
}
|
||||||
|
' "$TMP_SH" | while IFS=' ' read -r fn title; do
|
||||||
|
run_one "$fn" "$title"
|
||||||
|
done
|
||||||
41
hack/e2e-apps/clickhouse.bats
Normal file
41
hack/e2e-apps/clickhouse.bats
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
@test "Create DB ClickHouse" {
|
||||||
|
name='test'
|
||||||
|
kubectl apply -f- <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: ClickHouse
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
size: 10Gi
|
||||||
|
logStorageSize: 2Gi
|
||||||
|
shards: 1
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
logTTL: 15
|
||||||
|
users:
|
||||||
|
testuser:
|
||||||
|
password: xai7Wepo
|
||||||
|
backup:
|
||||||
|
enabled: false
|
||||||
|
s3Region: us-east-1
|
||||||
|
s3Bucket: s3.example.org/clickhouse-backups
|
||||||
|
schedule: "0 2 * * *"
|
||||||
|
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||||
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||||
|
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||||
|
}
|
||||||
51
hack/e2e-apps/kafka.bats
Normal file
51
hack/e2e-apps/kafka.bats
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
@test "Create Kafka" {
|
||||||
|
name='test'
|
||||||
|
kubectl apply -f- <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: Kafka
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
kafka:
|
||||||
|
size: 10Gi
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
zookeeper:
|
||||||
|
size: 5Gi
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
resources:
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
topics:
|
||||||
|
- name: testResults
|
||||||
|
partitions: 1
|
||||||
|
replicas: 2
|
||||||
|
config:
|
||||||
|
min.insync.replicas: 2
|
||||||
|
- name: testOrders
|
||||||
|
config:
|
||||||
|
cleanup.policy: compact
|
||||||
|
segment.ms: 3600000
|
||||||
|
max.compaction.lag.ms: 5400000
|
||||||
|
min.insync.replicas: 2
|
||||||
|
partitions: 1
|
||||||
|
replicas: 2
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
|
||||||
|
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
|
||||||
|
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
|
||||||
|
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
|
||||||
|
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
|
||||||
|
}
|
||||||
113
hack/e2e-apps/kubernetes.bats
Normal file
113
hack/e2e-apps/kubernetes.bats
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
run_kubernetes_test() {
|
||||||
|
local version_expr="$1"
|
||||||
|
local test_name="$2"
|
||||||
|
local port="$3"
|
||||||
|
local k8s_version=$(yq "$version_expr" packages/apps/kubernetes/files/versions.yaml)
|
||||||
|
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: Kubernetes
|
||||||
|
metadata:
|
||||||
|
name: "${test_name}"
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
addons:
|
||||||
|
certManager:
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
cilium:
|
||||||
|
valuesOverride: {}
|
||||||
|
fluxcd:
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
gatewayAPI:
|
||||||
|
enabled: false
|
||||||
|
gpuOperator:
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
ingressNginx:
|
||||||
|
enabled: true
|
||||||
|
hosts: []
|
||||||
|
valuesOverride: {}
|
||||||
|
monitoringAgents:
|
||||||
|
enabled: false
|
||||||
|
valuesOverride: {}
|
||||||
|
verticalPodAutoscaler:
|
||||||
|
valuesOverride: {}
|
||||||
|
controlPlane:
|
||||||
|
apiServer:
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: small
|
||||||
|
controllerManager:
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: micro
|
||||||
|
konnectivity:
|
||||||
|
server:
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: micro
|
||||||
|
replicas: 2
|
||||||
|
scheduler:
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: micro
|
||||||
|
host: ""
|
||||||
|
nodeGroups:
|
||||||
|
md0:
|
||||||
|
ephemeralStorage: 20Gi
|
||||||
|
gpus: []
|
||||||
|
instanceType: u1.medium
|
||||||
|
maxReplicas: 10
|
||||||
|
minReplicas: 0
|
||||||
|
resources:
|
||||||
|
cpu: ""
|
||||||
|
memory: ""
|
||||||
|
roles:
|
||||||
|
- ingress-nginx
|
||||||
|
storageClass: replicated
|
||||||
|
version: "${k8s_version}"
|
||||||
|
EOF
|
||||||
|
# Wait for the tenant-test namespace to be active
|
||||||
|
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||||
|
|
||||||
|
# Wait for the Kamaji control plane to be created (retry for up to 10 seconds)
|
||||||
|
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-'"${test_name}"'; do sleep 1; done'
|
||||||
|
|
||||||
|
# Wait for the tenant control plane to be fully created (timeout after 4 minutes)
|
||||||
|
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-${test_name} --timeout=4m
|
||||||
|
|
||||||
|
# Wait for Kubernetes resources to be ready (timeout after 2 minutes)
|
||||||
|
kubectl wait tcp -n tenant-test kubernetes-${test_name} --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||||
|
|
||||||
|
# Wait for all required deployments to be available (timeout after 4 minutes)
|
||||||
|
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-${test_name} kubernetes-${test_name}-cluster-autoscaler kubernetes-${test_name}-kccm kubernetes-${test_name}-kcsi-controller
|
||||||
|
|
||||||
|
# Wait for the machine deployment to scale to 2 replicas (timeout after 1 minute)
|
||||||
|
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||||
|
|
||||||
|
# Get the admin kubeconfig and save it to a file
|
||||||
|
kubectl get secret kubernetes-${test_name}-admin-kubeconfig -ojsonpath='{.data.super-admin\.conf}' -n tenant-test | base64 -d > tenantkubeconfig
|
||||||
|
|
||||||
|
# Update the kubeconfig to use localhost for the API server
|
||||||
|
yq -i ".clusters[0].cluster.server = \"https://localhost:${port}\"" tenantkubeconfig
|
||||||
|
|
||||||
|
# Set up port forwarding to the Kubernetes API server for a 40 second timeout
|
||||||
|
bash -c 'timeout 40s kubectl port-forward service/kubernetes-'"${test_name}"' -n tenant-test '"${port}"':6443 > /dev/null 2>&1 &'
|
||||||
|
|
||||||
|
# Verify the Kubernetes version matches what we expect (retry for up to 20 seconds)
|
||||||
|
timeout 20 sh -ec 'until kubectl --kubeconfig tenantkubeconfig version 2>/dev/null | grep -Fq "Server Version: ${k8s_version}"; do sleep 5; done'
|
||||||
|
|
||||||
|
# Wait for all machine deployment replicas to be ready (timeout after 10 minutes)
|
||||||
|
kubectl wait machinedeployment kubernetes-${test_name}-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||||
|
|
||||||
|
# Clean up by deleting the Kubernetes resource
|
||||||
|
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io $test_name
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create a tenant Kubernetes control plane with latest version" {
|
||||||
|
run_kubernetes_test 'keys | sort_by(.) | .[-1]' 'test-latest-version' '59991'
|
||||||
|
}
|
||||||
|
@test "Create a tenant Kubernetes control plane with previous version" {
|
||||||
|
run_kubernetes_test 'keys | sort_by(.) | .[-2]' 'test-previous-version' '59992'
|
||||||
|
}
|
||||||
46
hack/e2e-apps/mysql.bats
Normal file
46
hack/e2e-apps/mysql.bats
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
@test "Create DB MySQL" {
|
||||||
|
name='test'
|
||||||
|
kubectl apply -f- <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: MySQL
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
size: 10Gi
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
users:
|
||||||
|
testuser:
|
||||||
|
maxUserConnections: 1000
|
||||||
|
password: xai7Wepo
|
||||||
|
databases:
|
||||||
|
testdb:
|
||||||
|
roles:
|
||||||
|
admin:
|
||||||
|
- testuser
|
||||||
|
backup:
|
||||||
|
enabled: false
|
||||||
|
s3Region: us-east-1
|
||||||
|
s3Bucket: s3.example.org/postgres-backups
|
||||||
|
schedule: "0 2 * * *"
|
||||||
|
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||||
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||||
|
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||||
|
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||||
|
}
|
||||||
54
hack/e2e-apps/postgres.bats
Normal file
54
hack/e2e-apps/postgres.bats
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
@test "Create DB PostgreSQL" {
|
||||||
|
name='test'
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: Postgres
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
size: 10Gi
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
postgresql:
|
||||||
|
parameters:
|
||||||
|
max_connections: 100
|
||||||
|
quorum:
|
||||||
|
minSyncReplicas: 0
|
||||||
|
maxSyncReplicas: 0
|
||||||
|
users:
|
||||||
|
testuser:
|
||||||
|
password: xai7Wepo
|
||||||
|
databases:
|
||||||
|
testdb:
|
||||||
|
roles:
|
||||||
|
admin:
|
||||||
|
- testuser
|
||||||
|
backup:
|
||||||
|
enabled: false
|
||||||
|
s3Region: us-east-1
|
||||||
|
s3Bucket: s3.example.org/postgres-backups
|
||||||
|
schedule: "0 2 * * *"
|
||||||
|
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||||
|
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||||
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||||
|
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||||
|
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
# for some reason it takes longer for the read-only endpoint to be ready
|
||||||
|
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||||
|
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
|
||||||
|
}
|
||||||
26
hack/e2e-apps/redis.bats
Normal file
26
hack/e2e-apps/redis.bats
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
@test "Create Redis" {
|
||||||
|
name='test'
|
||||||
|
kubectl apply -f- <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: Redis
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
size: 1Gi
|
||||||
|
replicas: 2
|
||||||
|
storageClass: ""
|
||||||
|
authEnabled: true
|
||||||
|
resources: {}
|
||||||
|
resourcesPreset: "nano"
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||||
|
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
|
||||||
|
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
|
||||||
|
kubectl -n tenant-test delete redis.apps.cozystack.io $name
|
||||||
|
}
|
||||||
47
hack/e2e-apps/virtualmachine.bats
Normal file
47
hack/e2e-apps/virtualmachine.bats
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
@test "Create a Virtual Machine" {
|
||||||
|
name='test'
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: VirtualMachine
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
externalMethod: PortList
|
||||||
|
externalPorts:
|
||||||
|
- 22
|
||||||
|
instanceType: "u1.medium"
|
||||||
|
instanceProfile: ubuntu
|
||||||
|
systemDisk:
|
||||||
|
image: ubuntu
|
||||||
|
storage: 5Gi
|
||||||
|
storageClass: replicated
|
||||||
|
gpus: []
|
||||||
|
resources:
|
||||||
|
cpu: ""
|
||||||
|
memory: ""
|
||||||
|
sshKeys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||||
|
test@test
|
||||||
|
cloudInit: |
|
||||||
|
#cloud-config
|
||||||
|
users:
|
||||||
|
- name: test
|
||||||
|
shell: /bin/bash
|
||||||
|
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||||
|
groups: sudo
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||||
|
cloudInitSeed: ""
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||||
|
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||||
|
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||||
|
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||||
|
}
|
||||||
68
hack/e2e-apps/vminstance.bats
Normal file
68
hack/e2e-apps/vminstance.bats
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
@test "Create a VM Disk" {
|
||||||
|
name='test'
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: VMDisk
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
source:
|
||||||
|
http:
|
||||||
|
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||||
|
optical: false
|
||||||
|
storage: 5Gi
|
||||||
|
storageClass: replicated
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create a VM Instance" {
|
||||||
|
diskName='test'
|
||||||
|
name='test'
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: VMInstance
|
||||||
|
metadata:
|
||||||
|
name: $name
|
||||||
|
namespace: tenant-test
|
||||||
|
spec:
|
||||||
|
external: false
|
||||||
|
externalMethod: PortList
|
||||||
|
externalPorts:
|
||||||
|
- 22
|
||||||
|
running: true
|
||||||
|
instanceType: "u1.medium"
|
||||||
|
instanceProfile: ubuntu
|
||||||
|
disks:
|
||||||
|
- name: $diskName
|
||||||
|
gpus: []
|
||||||
|
resources:
|
||||||
|
cpu: ""
|
||||||
|
memory: ""
|
||||||
|
sshKeys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||||
|
test@test
|
||||||
|
cloudInit: |
|
||||||
|
#cloud-config
|
||||||
|
users:
|
||||||
|
- name: test
|
||||||
|
shell: /bin/bash
|
||||||
|
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||||
|
groups: sudo
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||||
|
cloudInitSeed: ""
|
||||||
|
EOF
|
||||||
|
sleep 5
|
||||||
|
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||||
|
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||||
|
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||||
|
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||||
|
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||||
|
}
|
||||||
189
hack/e2e-install-cozystack.bats
Normal file
189
hack/e2e-install-cozystack.bats
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
|
||||||
|
@test "Required installer assets exist" {
|
||||||
|
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||||
|
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Install Cozystack" {
|
||||||
|
# Create namespace & configmap required by installer
|
||||||
|
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
kubectl create configmap cozystack -n cozy-system \
|
||||||
|
--from-literal=bundle-name=paas-full \
|
||||||
|
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||||
|
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||||
|
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||||
|
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||||
|
--from-literal=root-host=example.org \
|
||||||
|
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||||
|
--dry-run=client -o yaml | kubectl apply -f -
|
||||||
|
|
||||||
|
# Apply installer manifests from file
|
||||||
|
kubectl apply -f _out/assets/cozystack-installer.yaml
|
||||||
|
|
||||||
|
# Wait for the installer deployment to become available
|
||||||
|
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||||
|
|
||||||
|
# Wait until HelmReleases appear & reconcile them
|
||||||
|
timeout 60 sh -ec 'until kubectl get hr -A -l cozystack.io/system-app=true | grep -q cozys; do sleep 1; done'
|
||||||
|
sleep 5
|
||||||
|
kubectl get hr -A -l cozystack.io/system-app=true | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||||
|
|
||||||
|
# Fail the test if any HelmRelease is not Ready
|
||||||
|
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||||
|
kubectl get hr -A
|
||||||
|
echo "Some HelmReleases failed to reconcile" >&2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for Cluster‑API provider deployments" {
|
||||||
|
# Wait for Cluster‑API provider deployments
|
||||||
|
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for LINSTOR and configure storage" {
|
||||||
|
# Linstor controller and nodes
|
||||||
|
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||||
|
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||||
|
|
||||||
|
created_pools=$(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor sp l -s data --pastable | awk '$2 == "data" {printf " " $4} END{printf " "}')
|
||||||
|
for node in srv1 srv2 srv3; do
|
||||||
|
case $created_pools in
|
||||||
|
*" $node "*) echo "Storage pool 'data' already exists on node $node"; continue;;
|
||||||
|
esac
|
||||||
|
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||||
|
done
|
||||||
|
|
||||||
|
# Storage classes
|
||||||
|
kubectl apply -f - <<'EOF'
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: local
|
||||||
|
annotations:
|
||||||
|
storageclass.kubernetes.io/is-default-class: "true"
|
||||||
|
provisioner: linstor.csi.linbit.com
|
||||||
|
parameters:
|
||||||
|
linstor.csi.linbit.com/storagePool: "data"
|
||||||
|
linstor.csi.linbit.com/layerList: "storage"
|
||||||
|
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||||
|
volumeBindingMode: WaitForFirstConsumer
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
---
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: replicated
|
||||||
|
provisioner: linstor.csi.linbit.com
|
||||||
|
parameters:
|
||||||
|
linstor.csi.linbit.com/storagePool: "data"
|
||||||
|
linstor.csi.linbit.com/autoPlace: "3"
|
||||||
|
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||||
|
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||||
|
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||||
|
volumeBindingMode: Immediate
|
||||||
|
allowVolumeExpansion: true
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait for MetalLB and configure address pool" {
|
||||||
|
# MetalLB address pool
|
||||||
|
kubectl apply -f - <<'EOF'
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: L2Advertisement
|
||||||
|
metadata:
|
||||||
|
name: cozystack
|
||||||
|
namespace: cozy-metallb
|
||||||
|
spec:
|
||||||
|
ipAddressPools: [cozystack]
|
||||||
|
---
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: IPAddressPool
|
||||||
|
metadata:
|
||||||
|
name: cozystack
|
||||||
|
namespace: cozy-metallb
|
||||||
|
spec:
|
||||||
|
addresses: [192.168.123.200-192.168.123.250]
|
||||||
|
autoAssign: true
|
||||||
|
avoidBuggyIPs: false
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Check Cozystack API service" {
|
||||||
|
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Configure Tenant and wait for applications" {
|
||||||
|
# Patch root tenant and wait for its releases
|
||||||
|
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||||
|
|
||||||
|
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||||
|
|
||||||
|
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||||
|
flux reconcile hr monitoring -n tenant-root --force
|
||||||
|
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Expose Cozystack services through ingress
|
||||||
|
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||||
|
|
||||||
|
# NGINX ingress controller
|
||||||
|
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||||
|
|
||||||
|
# etcd statefulset
|
||||||
|
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||||
|
|
||||||
|
# VictoriaMetrics components
|
||||||
|
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||||
|
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||||
|
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||||
|
|
||||||
|
# Grafana
|
||||||
|
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||||
|
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||||
|
|
||||||
|
# Verify Grafana via ingress
|
||||||
|
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||||
|
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
|
||||||
|
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Keycloak OIDC stack is healthy" {
|
||||||
|
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||||
|
|
||||||
|
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create tenant with isolated mode enabled" {
|
||||||
|
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: apps.cozystack.io/v1alpha1
|
||||||
|
kind: Tenant
|
||||||
|
metadata:
|
||||||
|
name: test
|
||||||
|
namespace: tenant-root
|
||||||
|
spec:
|
||||||
|
etcd: false
|
||||||
|
host: ""
|
||||||
|
ingress: false
|
||||||
|
isolated: true
|
||||||
|
monitoring: false
|
||||||
|
resourceQuotas: {}
|
||||||
|
seaweedfs: false
|
||||||
|
EOF
|
||||||
|
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||||
|
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||||
|
}
|
||||||
248
hack/e2e-prepare-cluster.bats
Normal file
248
hack/e2e-prepare-cluster.bats
Normal file
@@ -0,0 +1,248 @@
|
|||||||
|
#!/usr/bin/env bats
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Cozystack end‑to‑end provisioning test (Bats)
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@test "Required installer assets exist" {
|
||||||
|
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||||
|
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "IPv4 forwarding is enabled" {
|
||||||
|
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||||
|
echo "IPv4 forwarding is disabled!" >&2
|
||||||
|
echo >&2
|
||||||
|
echo "Enable it with:" >&2
|
||||||
|
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Clean previous VMs" {
|
||||||
|
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||||
|
rm -rf srv1 srv2 srv3
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare networking and masquerading" {
|
||||||
|
ip link del cozy-br0 2>/dev/null || true
|
||||||
|
ip link add cozy-br0 type bridge
|
||||||
|
ip link set cozy-br0 up
|
||||||
|
ip address add 192.168.123.1/24 dev cozy-br0
|
||||||
|
|
||||||
|
# Masquerading rule – idempotent (delete first, then add)
|
||||||
|
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||||
|
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare cloud‑init drive for VMs" {
|
||||||
|
mkdir -p srv1 srv2 srv3
|
||||||
|
|
||||||
|
# Generate cloud‑init ISOs
|
||||||
|
for i in 1 2 3; do
|
||||||
|
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||||
|
|
||||||
|
cat > "srv${i}/user-data" <<'EOF'
|
||||||
|
#cloud-config
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > "srv${i}/network-config" <<EOF
|
||||||
|
version: 2
|
||||||
|
ethernets:
|
||||||
|
eth0:
|
||||||
|
dhcp4: false
|
||||||
|
addresses:
|
||||||
|
- "192.168.123.1${i}/26"
|
||||||
|
gateway4: "192.168.123.1"
|
||||||
|
nameservers:
|
||||||
|
search: [cluster.local]
|
||||||
|
addresses: [8.8.8.8]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
( cd "srv${i}" && genisoimage \
|
||||||
|
-output seed.img \
|
||||||
|
-volid cidata -rational-rock -joliet \
|
||||||
|
user-data meta-data network-config )
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Use Talos NoCloud image from assets" {
|
||||||
|
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||||
|
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f nocloud-amd64.raw
|
||||||
|
cp _out/assets/nocloud-amd64.raw.xz .
|
||||||
|
xz --decompress nocloud-amd64.raw.xz
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Prepare VM disks" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
cp nocloud-amd64.raw srv${i}/system.img
|
||||||
|
qemu-img resize srv${i}/system.img 50G
|
||||||
|
qemu-img create srv${i}/data.img 100G
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Create tap devices" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
ip link del cozy-srv${i} 2>/dev/null || true
|
||||||
|
ip tuntap add dev cozy-srv${i} mode tap
|
||||||
|
ip link set cozy-srv${i} up
|
||||||
|
ip link set cozy-srv${i} master cozy-br0
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Boot QEMU VMs" {
|
||||||
|
for i in 1 2 3; do
|
||||||
|
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 24576 \
|
||||||
|
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||||
|
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||||
|
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||||
|
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||||
|
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||||
|
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||||
|
done
|
||||||
|
|
||||||
|
# Give qemu a few seconds to start up networking
|
||||||
|
sleep 5
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||||
|
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Generate Talos cluster configuration" {
|
||||||
|
# Cluster‑wide patches
|
||||||
|
cat > patch.yaml <<'EOF'
|
||||||
|
machine:
|
||||||
|
kubelet:
|
||||||
|
nodeIP:
|
||||||
|
validSubnets:
|
||||||
|
- 192.168.123.0/24
|
||||||
|
extraConfig:
|
||||||
|
maxPods: 512
|
||||||
|
kernel:
|
||||||
|
modules:
|
||||||
|
- name: openvswitch
|
||||||
|
- name: drbd
|
||||||
|
parameters:
|
||||||
|
- usermode_helper=disabled
|
||||||
|
- name: zfs
|
||||||
|
- name: spl
|
||||||
|
registries:
|
||||||
|
mirrors:
|
||||||
|
docker.io:
|
||||||
|
endpoints:
|
||||||
|
- https://dockerio.nexus.lllamnyp.su
|
||||||
|
cr.fluentbit.io:
|
||||||
|
endpoints:
|
||||||
|
- https://fluentbit.nexus.lllamnyp.su
|
||||||
|
docker-registry3.mariadb.com:
|
||||||
|
endpoints:
|
||||||
|
- https://mariadb.nexus.lllamnyp.su
|
||||||
|
gcr.io:
|
||||||
|
endpoints:
|
||||||
|
- https://gcr.nexus.lllamnyp.su
|
||||||
|
ghcr.io:
|
||||||
|
endpoints:
|
||||||
|
- https://ghcr.nexus.lllamnyp.su
|
||||||
|
quay.io:
|
||||||
|
endpoints:
|
||||||
|
- https://quay.nexus.lllamnyp.su
|
||||||
|
registry.k8s.io:
|
||||||
|
endpoints:
|
||||||
|
- https://k8s.nexus.lllamnyp.su
|
||||||
|
files:
|
||||||
|
- content: |
|
||||||
|
[plugins]
|
||||||
|
[plugins."io.containerd.cri.v1.runtime"]
|
||||||
|
device_ownership_from_security_context = true
|
||||||
|
path: /etc/cri/conf.d/20-customization.part
|
||||||
|
op: create
|
||||||
|
|
||||||
|
cluster:
|
||||||
|
apiServer:
|
||||||
|
extraArgs:
|
||||||
|
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||||
|
oidc-client-id: "kubernetes"
|
||||||
|
oidc-username-claim: "preferred_username"
|
||||||
|
oidc-groups-claim: "groups"
|
||||||
|
network:
|
||||||
|
cni:
|
||||||
|
name: none
|
||||||
|
dnsDomain: cozy.local
|
||||||
|
podSubnets:
|
||||||
|
- 10.244.0.0/16
|
||||||
|
serviceSubnets:
|
||||||
|
- 10.96.0.0/16
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Control‑plane‑only patches
|
||||||
|
cat > patch-controlplane.yaml <<'EOF'
|
||||||
|
machine:
|
||||||
|
nodeLabels:
|
||||||
|
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||||
|
$patch: delete
|
||||||
|
network:
|
||||||
|
interfaces:
|
||||||
|
- interface: eth0
|
||||||
|
vip:
|
||||||
|
ip: 192.168.123.10
|
||||||
|
cluster:
|
||||||
|
allowSchedulingOnControlPlanes: true
|
||||||
|
controllerManager:
|
||||||
|
extraArgs:
|
||||||
|
bind-address: 0.0.0.0
|
||||||
|
scheduler:
|
||||||
|
extraArgs:
|
||||||
|
bind-address: 0.0.0.0
|
||||||
|
apiServer:
|
||||||
|
certSANs:
|
||||||
|
- 127.0.0.1
|
||||||
|
proxy:
|
||||||
|
disabled: true
|
||||||
|
discovery:
|
||||||
|
enabled: false
|
||||||
|
etcd:
|
||||||
|
advertisedSubnets:
|
||||||
|
- 192.168.123.0/24
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Generate secrets once
|
||||||
|
if [ ! -f secrets.yaml ]; then
|
||||||
|
talosctl gen secrets
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||||
|
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||||
|
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Apply Talos configuration to the node" {
|
||||||
|
# Apply the configuration to all three nodes
|
||||||
|
for node in 11 12 13; do
|
||||||
|
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||||
|
done
|
||||||
|
|
||||||
|
# Wait for Talos services to come up again
|
||||||
|
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "Bootstrap Talos cluster" {
|
||||||
|
# Bootstrap etcd on the first node
|
||||||
|
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||||
|
|
||||||
|
# Wait until etcd is healthy
|
||||||
|
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||||
|
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||||
|
|
||||||
|
# Retrieve kubeconfig
|
||||||
|
rm -f kubeconfig
|
||||||
|
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||||
|
|
||||||
|
# Wait until all three nodes register in Kubernetes
|
||||||
|
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||||
|
}
|
||||||
@@ -1,165 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
RED='\033[0;31m'
|
|
||||||
GREEN='\033[0;32m'
|
|
||||||
RESET='\033[0m'
|
|
||||||
YELLOW='\033[0;33m'
|
|
||||||
|
|
||||||
|
|
||||||
ROOT_NS="tenant-root"
|
|
||||||
TEST_TENANT="tenant-e2e"
|
|
||||||
|
|
||||||
values_base_path="/hack/testdata/"
|
|
||||||
checks_base_path="/hack/testdata/"
|
|
||||||
|
|
||||||
function delete_hr() {
|
|
||||||
local release_name="$1"
|
|
||||||
local namespace="$2"
|
|
||||||
|
|
||||||
if [[ -z "$release_name" ]]; then
|
|
||||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "$namespace" ]]; then
|
|
||||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ "$release_name" == "tenant-e2e" ]]; then
|
|
||||||
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
|
|
||||||
return 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
kubectl delete helmrelease $release_name -n $namespace
|
|
||||||
}
|
|
||||||
|
|
||||||
function install_helmrelease() {
|
|
||||||
local release_name="$1"
|
|
||||||
local namespace="$2"
|
|
||||||
local chart_path="$3"
|
|
||||||
local repo_name="$4"
|
|
||||||
local repo_ns="$5"
|
|
||||||
local values_file="$6"
|
|
||||||
|
|
||||||
if [[ -z "$release_name" ]]; then
|
|
||||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "$namespace" ]]; then
|
|
||||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -z "$chart_path" ]]; then
|
|
||||||
echo -e "${RED}Error: Chart path name is required.${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -n "$values_file" && -f "$values_file" ]]; then
|
|
||||||
local values_section
|
|
||||||
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
|
|
||||||
fi
|
|
||||||
|
|
||||||
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
|
|
||||||
{
|
|
||||||
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
|
|
||||||
echo "kind: HelmRelease"
|
|
||||||
echo "metadata:"
|
|
||||||
echo " labels:"
|
|
||||||
echo " cozystack.io/ui: \"true\""
|
|
||||||
echo " name: \"$release_name\""
|
|
||||||
echo " namespace: \"$namespace\""
|
|
||||||
echo "spec:"
|
|
||||||
echo " chart:"
|
|
||||||
echo " spec:"
|
|
||||||
echo " chart: \"$chart_path\""
|
|
||||||
echo " reconcileStrategy: Revision"
|
|
||||||
echo " sourceRef:"
|
|
||||||
echo " kind: HelmRepository"
|
|
||||||
echo " name: \"$repo_name\""
|
|
||||||
echo " namespace: \"$repo_ns\""
|
|
||||||
echo " version: '*'"
|
|
||||||
echo " interval: 1m0s"
|
|
||||||
echo " timeout: 5m0s"
|
|
||||||
[[ -n "$values_section" ]] && echo "$values_section"
|
|
||||||
} > "$helmrelease_file"
|
|
||||||
|
|
||||||
kubectl apply -f "$helmrelease_file"
|
|
||||||
|
|
||||||
rm -f "$helmrelease_file"
|
|
||||||
}
|
|
||||||
|
|
||||||
function install_tenant (){
|
|
||||||
local release_name="$1"
|
|
||||||
local namespace="$2"
|
|
||||||
local values_file="${values_base_path}tenant/values.yaml"
|
|
||||||
local repo_name="cozystack-apps"
|
|
||||||
local repo_ns="cozy-public"
|
|
||||||
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
|
|
||||||
}
|
|
||||||
|
|
||||||
function make_extra_checks(){
|
|
||||||
local checks_file="$1"
|
|
||||||
echo "after exec make $checks_file"
|
|
||||||
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
|
|
||||||
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
|
|
||||||
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function check_helmrelease_status() {
|
|
||||||
local release_name="$1"
|
|
||||||
local namespace="$2"
|
|
||||||
local checks_file="$3"
|
|
||||||
local timeout=300 # Timeout in seconds
|
|
||||||
local interval=5 # Interval between checks in seconds
|
|
||||||
local elapsed=0
|
|
||||||
|
|
||||||
|
|
||||||
while [[ $elapsed -lt $timeout ]]; do
|
|
||||||
local status_output
|
|
||||||
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
|
|
||||||
|
|
||||||
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
|
|
||||||
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
|
|
||||||
make_extra_checks "$checks_file"
|
|
||||||
delete_hr $release_name $namespace
|
|
||||||
return 0
|
|
||||||
elif [[ "$status_output" == "InstallFailed" ]]; then
|
|
||||||
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
sleep "$interval"
|
|
||||||
elapsed=$((elapsed + interval))
|
|
||||||
done
|
|
||||||
|
|
||||||
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
chart_name="$1"
|
|
||||||
|
|
||||||
if [ -z "$chart_name" ]; then
|
|
||||||
echo -e "${RED}No chart name provided. Exiting...${RESET}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
checks_file="${checks_base_path}${chart_name}/check.sh"
|
|
||||||
repo_name="cozystack-apps"
|
|
||||||
repo_ns="cozy-public"
|
|
||||||
release_name="$chart_name-e2e"
|
|
||||||
values_file="${values_base_path}${chart_name}/values.yaml"
|
|
||||||
|
|
||||||
install_tenant $TEST_TENANT $ROOT_NS
|
|
||||||
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
|
|
||||||
|
|
||||||
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
|
|
||||||
|
|
||||||
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
|
|
||||||
check_helmrelease_status $release_name $TEST_TENANT $checks_file
|
|
||||||
351
hack/e2e.sh
351
hack/e2e.sh
@@ -1,351 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
|
|
||||||
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
|
|
||||||
echo 'please set it with following command:' >&2
|
|
||||||
echo >&2
|
|
||||||
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
|
|
||||||
echo >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
|
||||||
echo "IPv4 forwarding is not enabled!" >&2
|
|
||||||
echo 'please enable forwarding with the following command:' >&2
|
|
||||||
echo >&2
|
|
||||||
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
|
|
||||||
echo >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -x
|
|
||||||
set -e
|
|
||||||
|
|
||||||
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
|
|
||||||
|
|
||||||
ip link del cozy-br0 || true
|
|
||||||
ip link add cozy-br0 type bridge
|
|
||||||
ip link set cozy-br0 up
|
|
||||||
ip addr add 192.168.123.1/24 dev cozy-br0
|
|
||||||
|
|
||||||
# Enable masquerading
|
|
||||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
|
||||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
|
||||||
|
|
||||||
rm -rf srv1 srv2 srv3
|
|
||||||
mkdir -p srv1 srv2 srv3
|
|
||||||
|
|
||||||
# Prepare cloud-init
|
|
||||||
for i in 1 2 3; do
|
|
||||||
echo "hostname: srv$i" > "srv$i/meta-data"
|
|
||||||
echo '#cloud-config' > "srv$i/user-data"
|
|
||||||
cat > "srv$i/network-config" <<EOT
|
|
||||||
version: 2
|
|
||||||
ethernets:
|
|
||||||
eth0:
|
|
||||||
dhcp4: false
|
|
||||||
addresses:
|
|
||||||
- "192.168.123.1$i/26"
|
|
||||||
gateway4: "192.168.123.1"
|
|
||||||
nameservers:
|
|
||||||
search: [cluster.local]
|
|
||||||
addresses: [8.8.8.8]
|
|
||||||
EOT
|
|
||||||
|
|
||||||
( cd srv$i && genisoimage \
|
|
||||||
-output seed.img \
|
|
||||||
-volid cidata -rational-rock -joliet \
|
|
||||||
user-data meta-data network-config
|
|
||||||
)
|
|
||||||
done
|
|
||||||
|
|
||||||
# Prepare system drive
|
|
||||||
if [ ! -f nocloud-amd64.raw ]; then
|
|
||||||
wget https://github.com/cozystack/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
|
||||||
rm -f nocloud-amd64.raw
|
|
||||||
xz --decompress nocloud-amd64.raw.xz
|
|
||||||
fi
|
|
||||||
for i in 1 2 3; do
|
|
||||||
cp nocloud-amd64.raw srv$i/system.img
|
|
||||||
qemu-img resize srv$i/system.img 20G
|
|
||||||
done
|
|
||||||
|
|
||||||
# Prepare data drives
|
|
||||||
for i in 1 2 3; do
|
|
||||||
qemu-img create srv$i/data.img 100G
|
|
||||||
done
|
|
||||||
|
|
||||||
# Prepare networking
|
|
||||||
for i in 1 2 3; do
|
|
||||||
ip link del cozy-srv$i || true
|
|
||||||
ip tuntap add dev cozy-srv$i mode tap
|
|
||||||
ip link set cozy-srv$i up
|
|
||||||
ip link set cozy-srv$i master cozy-br0
|
|
||||||
done
|
|
||||||
|
|
||||||
# Start VMs
|
|
||||||
for i in 1 2 3; do
|
|
||||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 16384 \
|
|
||||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
|
||||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
|
||||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
|
||||||
-drive file=srv$i/data.img,if=virtio,format=raw \
|
|
||||||
-display none -daemonize -pidfile srv$i/qemu.pid
|
|
||||||
done
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
# Wait for VM to start up
|
|
||||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
|
||||||
|
|
||||||
cat > patch.yaml <<\EOT
|
|
||||||
machine:
|
|
||||||
kubelet:
|
|
||||||
nodeIP:
|
|
||||||
validSubnets:
|
|
||||||
- 192.168.123.0/24
|
|
||||||
extraConfig:
|
|
||||||
maxPods: 512
|
|
||||||
kernel:
|
|
||||||
modules:
|
|
||||||
- name: openvswitch
|
|
||||||
- name: drbd
|
|
||||||
parameters:
|
|
||||||
- usermode_helper=disabled
|
|
||||||
- name: zfs
|
|
||||||
- name: spl
|
|
||||||
files:
|
|
||||||
- content: |
|
|
||||||
[plugins]
|
|
||||||
[plugins."io.containerd.grpc.v1.cri"]
|
|
||||||
device_ownership_from_security_context = true
|
|
||||||
path: /etc/cri/conf.d/20-customization.part
|
|
||||||
op: create
|
|
||||||
|
|
||||||
cluster:
|
|
||||||
apiServer:
|
|
||||||
extraArgs:
|
|
||||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
|
||||||
oidc-client-id: "kubernetes"
|
|
||||||
oidc-username-claim: "preferred_username"
|
|
||||||
oidc-groups-claim: "groups"
|
|
||||||
network:
|
|
||||||
cni:
|
|
||||||
name: none
|
|
||||||
dnsDomain: cozy.local
|
|
||||||
podSubnets:
|
|
||||||
- 10.244.0.0/16
|
|
||||||
serviceSubnets:
|
|
||||||
- 10.96.0.0/16
|
|
||||||
EOT
|
|
||||||
|
|
||||||
cat > patch-controlplane.yaml <<\EOT
|
|
||||||
machine:
|
|
||||||
nodeLabels:
|
|
||||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
|
||||||
$patch: delete
|
|
||||||
network:
|
|
||||||
interfaces:
|
|
||||||
- interface: eth0
|
|
||||||
vip:
|
|
||||||
ip: 192.168.123.10
|
|
||||||
cluster:
|
|
||||||
allowSchedulingOnControlPlanes: true
|
|
||||||
controllerManager:
|
|
||||||
extraArgs:
|
|
||||||
bind-address: 0.0.0.0
|
|
||||||
scheduler:
|
|
||||||
extraArgs:
|
|
||||||
bind-address: 0.0.0.0
|
|
||||||
apiServer:
|
|
||||||
certSANs:
|
|
||||||
- 127.0.0.1
|
|
||||||
proxy:
|
|
||||||
disabled: true
|
|
||||||
discovery:
|
|
||||||
enabled: false
|
|
||||||
etcd:
|
|
||||||
advertisedSubnets:
|
|
||||||
- 192.168.123.0/24
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# Gen configuration
|
|
||||||
if [ ! -f secrets.yaml ]; then
|
|
||||||
talosctl gen secrets
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
|
||||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
|
||||||
export TALOSCONFIG=$PWD/talosconfig
|
|
||||||
|
|
||||||
# Apply configuration
|
|
||||||
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
|
|
||||||
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
|
|
||||||
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
|
|
||||||
|
|
||||||
# Wait for VM to be configured
|
|
||||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
|
||||||
|
|
||||||
# Bootstrap
|
|
||||||
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
|
||||||
|
|
||||||
# Wait for etcd
|
|
||||||
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
|
|
||||||
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
|
||||||
|
|
||||||
rm -f kubeconfig
|
|
||||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
|
||||||
export KUBECONFIG=$PWD/kubeconfig
|
|
||||||
|
|
||||||
# Wait for kubernetes nodes appear
|
|
||||||
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
|
||||||
kubectl create ns cozy-system -o yaml | kubectl apply -f -
|
|
||||||
kubectl create -f - <<\EOT
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-system
|
|
||||||
data:
|
|
||||||
bundle-name: "paas-full"
|
|
||||||
ipv4-pod-cidr: "10.244.0.0/16"
|
|
||||||
ipv4-pod-gateway: "10.244.0.1"
|
|
||||||
ipv4-svc-cidr: "10.96.0.0/16"
|
|
||||||
ipv4-join-cidr: "100.64.0.0/16"
|
|
||||||
root-host: example.org
|
|
||||||
api-server-endpoint: https://192.168.123.10:6443
|
|
||||||
EOT
|
|
||||||
|
|
||||||
#
|
|
||||||
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
|
|
||||||
|
|
||||||
# wait for cozystack pod to start
|
|
||||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
|
|
||||||
|
|
||||||
# wait for helmreleases appear
|
|
||||||
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
|
||||||
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
|
||||||
|
|
||||||
# Wait for Cluster-API providers
|
|
||||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
|
||||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
|
||||||
|
|
||||||
# Wait for linstor controller
|
|
||||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
|
||||||
|
|
||||||
# Wait for all linstor nodes become Online
|
|
||||||
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
|
|
||||||
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
|
|
||||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
|
|
||||||
|
|
||||||
kubectl create -f- <<EOT
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: local
|
|
||||||
annotations:
|
|
||||||
storageclass.kubernetes.io/is-default-class: "true"
|
|
||||||
provisioner: linstor.csi.linbit.com
|
|
||||||
parameters:
|
|
||||||
linstor.csi.linbit.com/storagePool: "data"
|
|
||||||
linstor.csi.linbit.com/layerList: "storage"
|
|
||||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
---
|
|
||||||
apiVersion: storage.k8s.io/v1
|
|
||||||
kind: StorageClass
|
|
||||||
metadata:
|
|
||||||
name: replicated
|
|
||||||
provisioner: linstor.csi.linbit.com
|
|
||||||
parameters:
|
|
||||||
linstor.csi.linbit.com/storagePool: "data"
|
|
||||||
linstor.csi.linbit.com/autoPlace: "3"
|
|
||||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
|
||||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
|
||||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
|
||||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
|
||||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
|
||||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
|
||||||
volumeBindingMode: WaitForFirstConsumer
|
|
||||||
allowVolumeExpansion: true
|
|
||||||
EOT
|
|
||||||
kubectl create -f- <<EOT
|
|
||||||
---
|
|
||||||
apiVersion: metallb.io/v1beta1
|
|
||||||
kind: L2Advertisement
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-metallb
|
|
||||||
spec:
|
|
||||||
ipAddressPools:
|
|
||||||
- cozystack
|
|
||||||
---
|
|
||||||
apiVersion: metallb.io/v1beta1
|
|
||||||
kind: IPAddressPool
|
|
||||||
metadata:
|
|
||||||
name: cozystack
|
|
||||||
namespace: cozy-metallb
|
|
||||||
spec:
|
|
||||||
addresses:
|
|
||||||
- 192.168.123.200-192.168.123.250
|
|
||||||
autoAssign: true
|
|
||||||
avoidBuggyIPs: false
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# Wait for cozystack-api
|
|
||||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
|
||||||
|
|
||||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
|
||||||
"host": "example.org",
|
|
||||||
"ingress": true,
|
|
||||||
"monitoring": true,
|
|
||||||
"etcd": true,
|
|
||||||
"isolated": true
|
|
||||||
}}'
|
|
||||||
|
|
||||||
# Wait for HelmRelease be created
|
|
||||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
|
||||||
|
|
||||||
# Wait for HelmReleases be installed
|
|
||||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
|
||||||
|
|
||||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
|
||||||
"dashboard": true
|
|
||||||
}}'
|
|
||||||
|
|
||||||
# Wait for nginx-ingress-controller
|
|
||||||
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
|
||||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
|
|
||||||
|
|
||||||
# Wait for etcd
|
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
|
||||||
|
|
||||||
# Wait for Victoria metrics
|
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
|
||||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
|
||||||
|
|
||||||
# Wait for grafana
|
|
||||||
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
|
|
||||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
|
|
||||||
|
|
||||||
# Get IP of nginx-ingress
|
|
||||||
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
|
|
||||||
|
|
||||||
# Check Grafana
|
|
||||||
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
|
||||||
|
|
||||||
|
|
||||||
# Test OIDC
|
|
||||||
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
|
||||||
"oidc-enabled": "true"
|
|
||||||
}}'
|
|
||||||
|
|
||||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
|
||||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
|
||||||
@@ -16,24 +16,24 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
|||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file")
|
miss_map=$(mktemp)
|
||||||
|
trap 'rm -f "$miss_map"' EXIT
|
||||||
|
echo -n "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file" > $miss_map
|
||||||
|
|
||||||
# search accross all tags sorted by version
|
# search accross all tags sorted by version
|
||||||
search_commits=$(git ls-remote --tags origin | grep 'refs/tags/v' | sort -k2,2 -rV | awk '{print $1}')
|
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||||
# add latest main commit to search
|
|
||||||
search_commits="${search_commits} $(git rev-parse "origin/main")"
|
|
||||||
|
|
||||||
resolved_miss_map=$(
|
resolved_miss_map=$(
|
||||||
echo "$miss_map" | while read -r chart version commit; do
|
while read -r chart version commit; do
|
||||||
# if version is found in HEAD, it's HEAD
|
# if version is found in HEAD, it's HEAD
|
||||||
if grep -q "^version: $version$" ./${chart}/Chart.yaml; then
|
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||||
echo "$chart $version HEAD"
|
echo "$chart $version HEAD"
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if commit is not HEAD, check if it's valid
|
# if commit is not HEAD, check if it's valid
|
||||||
if [ $commit != "HEAD" ]; then
|
if [ "$commit" != "HEAD" ]; then
|
||||||
if ! git show "${commit}:./${chart}/Chart.yaml" 2>/dev/null | grep -q "^version: $version$"; then
|
if [ "$(git show "${commit}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" != "${version}" ]; then
|
||||||
echo "Commit $commit for $chart $version is not valid" >&2
|
echo "Commit $commit for $chart $version is not valid" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -46,19 +46,19 @@ resolved_miss_map=$(
|
|||||||
# if commit is HEAD, but version is not found in HEAD, check all tags
|
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||||
found_tag=""
|
found_tag=""
|
||||||
for tag in $search_commits; do
|
for tag in $search_commits; do
|
||||||
if git show "${tag}:./${chart}/Chart.yaml" 2>/dev/null | grep -q "^version: $version$"; then
|
if [ "$(git show "${tag}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" = "${version}" ]; then
|
||||||
found_tag=$(git rev-parse --short "${tag}")
|
found_tag=$(git rev-parse --short "${tag}")
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -z "$found_tag" ]; then
|
if [ -z "$found_tag" ]; then
|
||||||
echo "Can't find $chart $version in any version tag or in the latest main commit" >&2
|
echo "Can't find $chart $version in any version tag, removing it" >&2
|
||||||
exit 1
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$chart $version $found_tag"
|
echo "$chart $version $found_tag"
|
||||||
done
|
done < $miss_map
|
||||||
)
|
)
|
||||||
|
|
||||||
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"
|
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"
|
||||||
|
|||||||
65
hack/package_chart.sh
Executable file
65
hack/package_chart.sh
Executable file
@@ -0,0 +1,65 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
printf "%s\n" "Usage:" >&2 ;
|
||||||
|
printf -- "%s\n" '---' >&2 ;
|
||||||
|
printf "%s %s\n" "$0" "INPUT_DIR OUTPUT_DIR TMP_DIR [DEPENDENCY_DIR]" >&2 ;
|
||||||
|
printf -- "%s\n" '---' >&2 ;
|
||||||
|
printf "%s\n" "Takes a helm repository from INPUT_DIR, with an optional library repository in" >&2 ;
|
||||||
|
printf "%s\n" "DEPENDENCY_DIR, prepares a view of the git archive at select points in history" >&2 ;
|
||||||
|
printf "%s\n" "in TMP_DIR and packages helm charts, outputting the tarballs to OUTPUT_DIR" >&2 ;
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "x$(basename $PWD)" != "xpackages" ]
|
||||||
|
then
|
||||||
|
echo "Error: This script must run from the ./packages/ directory" >&2
|
||||||
|
echo >&2
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "x$#" != "x3" ] && [ "x$#" != "x4" ]
|
||||||
|
then
|
||||||
|
echo "Error: This script takes 3 or 4 arguments" >&2
|
||||||
|
echo "Got $# arguments:" "$@" >&2
|
||||||
|
echo >&2
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
input_dir=$1
|
||||||
|
output_dir=$2
|
||||||
|
tmp_dir=$3
|
||||||
|
|
||||||
|
if [ "x$#" = "x4" ]
|
||||||
|
then
|
||||||
|
dependency_dir=$4
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf "${output_dir:?}"
|
||||||
|
mkdir -p "${output_dir}"
|
||||||
|
while read package _ commit
|
||||||
|
do
|
||||||
|
# this lets devs build the packages from a dirty repo for quick local testing
|
||||||
|
if [ "x$commit" = "xHEAD" ]
|
||||||
|
then
|
||||||
|
helm package "${input_dir}/${package}" -d "${output_dir}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
git archive --format tar "${commit}" "${input_dir}/${package}" | tar -xf- -C "${tmp_dir}/"
|
||||||
|
|
||||||
|
# the library chart is not present in older commits and git archive doesn't fail gracefully if the path is not found
|
||||||
|
if [ "x${dependency_dir}" != "x" ] && git ls-tree --name-only "${commit}" "${dependency_dir}" | grep -qx "${dependency_dir}"
|
||||||
|
then
|
||||||
|
git archive --format tar "${commit}" "${dependency_dir}" | tar -xf- -C "${tmp_dir}/"
|
||||||
|
fi
|
||||||
|
helm package "${tmp_dir}/${input_dir}/${package}" -d "${output_dir}"
|
||||||
|
rm -rf "${tmp_dir:?}/${input_dir:?}/${package:?}"
|
||||||
|
if [ "x${dependency_dir}" != "x" ]
|
||||||
|
then
|
||||||
|
rm -rf "${tmp_dir:?}/${dependency_dir:?}"
|
||||||
|
fi
|
||||||
|
done < "${input_dir}/versions_map"
|
||||||
|
helm repo index "${output_dir}"
|
||||||
1
hack/testdata/http-cache/check.sh
vendored
1
hack/testdata/http-cache/check.sh
vendored
@@ -1 +0,0 @@
|
|||||||
return 0
|
|
||||||
2
hack/testdata/http-cache/values.yaml
vendored
2
hack/testdata/http-cache/values.yaml
vendored
@@ -1,2 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
- 8.8.8.8:443
|
|
||||||
1
hack/testdata/kubernetes/check.sh
vendored
1
hack/testdata/kubernetes/check.sh
vendored
@@ -1 +0,0 @@
|
|||||||
return 0
|
|
||||||
62
hack/testdata/kubernetes/values.yaml
vendored
62
hack/testdata/kubernetes/values.yaml
vendored
@@ -1,62 +0,0 @@
|
|||||||
## @section Common parameters
|
|
||||||
|
|
||||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
|
||||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
|
||||||
## @param storageClass StorageClass used to store user data
|
|
||||||
##
|
|
||||||
host: ""
|
|
||||||
controlPlane:
|
|
||||||
replicas: 2
|
|
||||||
storageClass: replicated
|
|
||||||
|
|
||||||
## @param nodeGroups [object] nodeGroups configuration
|
|
||||||
##
|
|
||||||
nodeGroups:
|
|
||||||
md0:
|
|
||||||
minReplicas: 0
|
|
||||||
maxReplicas: 10
|
|
||||||
instanceType: "u1.medium"
|
|
||||||
ephemeralStorage: 20Gi
|
|
||||||
roles:
|
|
||||||
- ingress-nginx
|
|
||||||
|
|
||||||
resources:
|
|
||||||
cpu: ""
|
|
||||||
memory: ""
|
|
||||||
|
|
||||||
## @section Cluster Addons
|
|
||||||
##
|
|
||||||
addons:
|
|
||||||
|
|
||||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
|
||||||
##
|
|
||||||
certManager:
|
|
||||||
## @param addons.certManager.enabled Enables the cert-manager
|
|
||||||
## @param addons.certManager.valuesOverride Custom values to override
|
|
||||||
enabled: true
|
|
||||||
valuesOverride: {}
|
|
||||||
|
|
||||||
## Ingress-NGINX Controller
|
|
||||||
##
|
|
||||||
ingressNginx:
|
|
||||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
|
||||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
|
||||||
##
|
|
||||||
enabled: true
|
|
||||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
|
||||||
## e.g:
|
|
||||||
## hosts:
|
|
||||||
## - example.org
|
|
||||||
## - foo.example.net
|
|
||||||
##
|
|
||||||
hosts: []
|
|
||||||
valuesOverride: {}
|
|
||||||
|
|
||||||
## Flux CD
|
|
||||||
##
|
|
||||||
fluxcd:
|
|
||||||
## @param addons.fluxcd.enabled Enables Flux CD
|
|
||||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
|
||||||
##
|
|
||||||
enabled: true
|
|
||||||
valuesOverride: {}
|
|
||||||
1
hack/testdata/nats/check.sh
vendored
1
hack/testdata/nats/check.sh
vendored
@@ -1 +0,0 @@
|
|||||||
return 0
|
|
||||||
10
hack/testdata/nats/values.yaml
vendored
10
hack/testdata/nats/values.yaml
vendored
@@ -1,10 +0,0 @@
|
|||||||
|
|
||||||
## @section Common parameters
|
|
||||||
|
|
||||||
## @param external Enable external access from outside the cluster
|
|
||||||
## @param replicas Persistent Volume size for NATS
|
|
||||||
## @param storageClass StorageClass used to store the data
|
|
||||||
##
|
|
||||||
external: false
|
|
||||||
replicas: 2
|
|
||||||
storageClass: ""
|
|
||||||
1
hack/testdata/tenant/check.sh
vendored
1
hack/testdata/tenant/check.sh
vendored
@@ -1 +0,0 @@
|
|||||||
return 0
|
|
||||||
6
hack/testdata/tenant/values.yaml
vendored
6
hack/testdata/tenant/values.yaml
vendored
@@ -1,6 +0,0 @@
|
|||||||
host: ""
|
|
||||||
etcd: false
|
|
||||||
monitoring: false
|
|
||||||
ingress: false
|
|
||||||
seaweedfs: false
|
|
||||||
isolated: true
|
|
||||||
@@ -7,3 +7,5 @@ gh release upload --clobber $version _out/assets/cozystack-installer.yaml
|
|||||||
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
||||||
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
|
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
|
||||||
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
|
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
|
||||||
|
gh release upload --clobber $version _out/assets/kernel-amd64
|
||||||
|
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
|
||||||
|
|||||||
139
internal/controller/system_helm_reconciler.go
Normal file
139
internal/controller/system_helm_reconciler.go
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CozystackConfigReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
var configMapNames = []string{"cozystack", "cozystack-branding", "cozystack-scheduling"}
|
||||||
|
|
||||||
|
const configMapNamespace = "cozy-system"
|
||||||
|
const digestAnnotation = "cozystack.io/cozy-config-digest"
|
||||||
|
const forceReconcileKey = "reconcile.fluxcd.io/forceAt"
|
||||||
|
const requestedAt = "reconcile.fluxcd.io/requestedAt"
|
||||||
|
|
||||||
|
func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
|
||||||
|
log := log.FromContext(ctx)
|
||||||
|
|
||||||
|
digest, err := r.computeDigest(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Error(err, "failed to compute config digest")
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var helmList helmv2.HelmReleaseList
|
||||||
|
if err := r.List(ctx, &helmList); err != nil {
|
||||||
|
return ctrl.Result{}, fmt.Errorf("failed to list HelmReleases: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().Format(time.RFC3339Nano)
|
||||||
|
updated := 0
|
||||||
|
|
||||||
|
for _, hr := range helmList.Items {
|
||||||
|
isSystemApp := hr.Labels["cozystack.io/system-app"] == "true"
|
||||||
|
isTenantRoot := hr.Namespace == "tenant-root" && hr.Name == "tenant-root"
|
||||||
|
if !isSystemApp && !isTenantRoot {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
patchTarget := hr.DeepCopy()
|
||||||
|
|
||||||
|
if hr.Annotations == nil {
|
||||||
|
hr.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hr.Annotations[digestAnnotation] == digest {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
patchTarget.Annotations[digestAnnotation] = digest
|
||||||
|
patchTarget.Annotations[forceReconcileKey] = now
|
||||||
|
patchTarget.Annotations[requestedAt] = now
|
||||||
|
|
||||||
|
patch := client.MergeFrom(hr.DeepCopy())
|
||||||
|
if err := r.Patch(ctx, patchTarget, patch); err != nil {
|
||||||
|
log.Error(err, "failed to patch HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
updated++
|
||||||
|
log.Info("patched HelmRelease with new config digest", "name", hr.Name, "namespace", hr.Namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("finished reconciliation", "updatedHelmReleases", updated)
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *CozystackConfigReconciler) computeDigest(ctx context.Context) (string, error) {
|
||||||
|
hash := sha256.New()
|
||||||
|
|
||||||
|
for _, name := range configMapNames {
|
||||||
|
var cm corev1.ConfigMap
|
||||||
|
err := r.Get(ctx, client.ObjectKey{Namespace: configMapNamespace, Name: name}, &cm)
|
||||||
|
if err != nil {
|
||||||
|
if kerrors.IsNotFound(err) {
|
||||||
|
continue // ignore missing
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort keys for consistent hashing
|
||||||
|
var keys []string
|
||||||
|
for k := range cm.Data {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
for _, k := range keys {
|
||||||
|
v := cm.Data[k]
|
||||||
|
fmt.Fprintf(hash, "%s:%s=%s\n", name, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *CozystackConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
WithEventFilter(predicate.Funcs{
|
||||||
|
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||||
|
cm, ok := e.ObjectNew.(*corev1.ConfigMap)
|
||||||
|
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||||
|
},
|
||||||
|
CreateFunc: func(e event.CreateEvent) bool {
|
||||||
|
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||||
|
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||||
|
},
|
||||||
|
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||||
|
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||||
|
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||||
|
},
|
||||||
|
}).
|
||||||
|
For(&corev1.ConfigMap{}).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func contains(slice []string, val string) bool {
|
||||||
|
for _, s := range slice {
|
||||||
|
if s == val {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
158
internal/controller/tenant_helm_reconciler.go
Normal file
158
internal/controller/tenant_helm_reconciler.go
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
e "errors"
|
||||||
|
|
||||||
|
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||||
|
"gopkg.in/yaml.v2"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TenantHelmReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
|
||||||
|
hr := &helmv2.HelmRelease{}
|
||||||
|
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
logger.Error(err, "unable to fetch HelmRelease")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(hr.Name, "tenant-") {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hr.Status.Conditions) == 0 || hr.Status.Conditions[0].Type != "Ready" {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hr.Status.History) == 0 {
|
||||||
|
logger.Info("no history in HelmRelease status", "name", hr.Name)
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hr.Status.History[0].Status != "deployed" {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newDigest := hr.Status.History[0].Digest
|
||||||
|
var hrList helmv2.HelmReleaseList
|
||||||
|
childNamespace := getChildNamespace(hr.Namespace, hr.Name)
|
||||||
|
if childNamespace == "tenant-root" && hr.Name == "tenant-root" {
|
||||||
|
if hr.Spec.Values == nil {
|
||||||
|
logger.Error(e.New("hr.Spec.Values is nil"), "cant annotate tenant-root ns")
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
err := annotateTenantRootNs(*hr.Spec.Values, r.Client)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "cant annotate tenant-root ns")
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
logger.Info("namespace 'tenant-root' annotated")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.List(ctx, &hrList, client.InNamespace(childNamespace)); err != nil {
|
||||||
|
logger.Error(err, "unable to list HelmReleases in namespace", "namespace", hr.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range hrList.Items {
|
||||||
|
if item.Name == hr.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
oldDigest := item.GetAnnotations()["cozystack.io/tenant-config-digest"]
|
||||||
|
if oldDigest == newDigest {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
patchTarget := item.DeepCopy()
|
||||||
|
|
||||||
|
if patchTarget.Annotations == nil {
|
||||||
|
patchTarget.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
ts := time.Now().Format(time.RFC3339Nano)
|
||||||
|
|
||||||
|
patchTarget.Annotations["cozystack.io/tenant-config-digest"] = newDigest
|
||||||
|
patchTarget.Annotations["reconcile.fluxcd.io/forceAt"] = ts
|
||||||
|
patchTarget.Annotations["reconcile.fluxcd.io/requestedAt"] = ts
|
||||||
|
|
||||||
|
patch := client.MergeFrom(item.DeepCopy())
|
||||||
|
if err := r.Patch(ctx, patchTarget, patch); err != nil {
|
||||||
|
logger.Error(err, "failed to patch HelmRelease", "name", patchTarget.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Info("patched HelmRelease with new digest", "name", patchTarget.Name, "digest", newDigest, "version", hr.Status.History[0].Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *TenantHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
For(&helmv2.HelmRelease{}).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getChildNamespace(currentNamespace, hrName string) string {
|
||||||
|
tenantName := strings.TrimPrefix(hrName, "tenant-")
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case currentNamespace == "tenant-root" && hrName == "tenant-root":
|
||||||
|
// 1) root tenant inside root namespace
|
||||||
|
return "tenant-root"
|
||||||
|
|
||||||
|
case currentNamespace == "tenant-root":
|
||||||
|
// 2) any other tenant in root namespace
|
||||||
|
return fmt.Sprintf("tenant-%s", tenantName)
|
||||||
|
|
||||||
|
default:
|
||||||
|
// 3) tenant in a dedicated namespace
|
||||||
|
return fmt.Sprintf("%s-%s", currentNamespace, tenantName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func annotateTenantRootNs(values apiextensionsv1.JSON, c client.Client) error {
|
||||||
|
var data map[string]interface{}
|
||||||
|
if err := yaml.Unmarshal(values.Raw, &data); err != nil {
|
||||||
|
return fmt.Errorf("failed to parse HelmRelease values: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
host, ok := data["host"].(string)
|
||||||
|
if !ok || host == "" {
|
||||||
|
return fmt.Errorf("host field not found or not a string")
|
||||||
|
}
|
||||||
|
|
||||||
|
var ns corev1.Namespace
|
||||||
|
if err := c.Get(context.TODO(), client.ObjectKey{Name: "tenant-root"}, &ns); err != nil {
|
||||||
|
return fmt.Errorf("failed to get namespace tenant-root: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ns.Annotations == nil {
|
||||||
|
ns.Annotations = map[string]string{}
|
||||||
|
}
|
||||||
|
ns.Annotations["namespace.cozystack.io/host"] = host
|
||||||
|
|
||||||
|
if err := c.Update(context.TODO(), &ns); err != nil {
|
||||||
|
return fmt.Errorf("failed to update namespace: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
107
internal/controller/workload_controller.go
Normal file
107
internal/controller/workload_controller.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
ctrl "sigs.k8s.io/controller-runtime"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||||
|
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||||
|
|
||||||
|
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
deletionRequeueDelay = 30 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||||
|
type WorkloadReconciler struct {
|
||||||
|
client.Client
|
||||||
|
Scheme *runtime.Scheme
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
w := &cozyv1alpha1.Workload{}
|
||||||
|
err := r.Get(ctx, req.NamespacedName, w)
|
||||||
|
if err != nil {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
logger.Error(err, "Unable to fetch Workload")
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// it's being deleted, nothing to handle
|
||||||
|
if w.DeletionTimestamp != nil {
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t := getMonitoredObject(w)
|
||||||
|
|
||||||
|
if t == nil {
|
||||||
|
err = r.Delete(ctx, w)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "failed to delete workload")
|
||||||
|
}
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||||
|
|
||||||
|
// found object, nothing to do
|
||||||
|
if err == nil {
|
||||||
|
if !t.GetDeletionTimestamp().IsZero() {
|
||||||
|
return ctrl.Result{RequeueAfter: deletionRequeueDelay}, nil
|
||||||
|
}
|
||||||
|
return ctrl.Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// error getting object but not 404 -- requeue
|
||||||
|
if !apierrors.IsNotFound(err) {
|
||||||
|
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = r.Delete(ctx, w)
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "failed to delete workload")
|
||||||
|
}
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||||
|
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||||
|
return ctrl.NewControllerManagedBy(mgr).
|
||||||
|
// Watch WorkloadMonitor objects
|
||||||
|
For(&cozyv1alpha1.Workload{}).
|
||||||
|
Complete(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(w.Name, "pvc-"):
|
||||||
|
obj := &corev1.PersistentVolumeClaim{}
|
||||||
|
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||||
|
obj.Namespace = w.Namespace
|
||||||
|
return obj
|
||||||
|
case strings.HasPrefix(w.Name, "svc-"):
|
||||||
|
obj := &corev1.Service{}
|
||||||
|
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||||
|
obj.Namespace = w.Namespace
|
||||||
|
return obj
|
||||||
|
case strings.HasPrefix(w.Name, "pod-"):
|
||||||
|
obj := &corev1.Pod{}
|
||||||
|
obj.Name = strings.TrimPrefix(w.Name, "pod-")
|
||||||
|
obj.Namespace = w.Namespace
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
var obj client.Object
|
||||||
|
return obj
|
||||||
|
}
|
||||||
26
internal/controller/workload_controller_test.go
Normal file
26
internal/controller/workload_controller_test.go
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
|
||||||
|
w := &cozyv1alpha1.Workload{}
|
||||||
|
w.Name = "unprefixed-name"
|
||||||
|
obj := getMonitoredObject(w)
|
||||||
|
if obj != nil {
|
||||||
|
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodMonitoredObject(t *testing.T) {
|
||||||
|
w := &cozyv1alpha1.Workload{}
|
||||||
|
w.Name = "pod-mypod"
|
||||||
|
obj := getMonitoredObject(w)
|
||||||
|
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
|
||||||
|
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package controller
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
@@ -33,6 +34,17 @@ type WorkloadMonitorReconciler struct {
|
|||||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||||
|
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch
|
||||||
|
|
||||||
|
// isServiceReady checks if the service has an external IP bound
|
||||||
|
func (r *WorkloadMonitorReconciler) isServiceReady(svc *corev1.Service) bool {
|
||||||
|
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPVCReady checks if the PVC is bound
|
||||||
|
func (r *WorkloadMonitorReconciler) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool {
|
||||||
|
return pvc.Status.Phase == corev1.ClaimBound
|
||||||
|
}
|
||||||
|
|
||||||
// isPodReady checks if the Pod is in the Ready condition.
|
// isPodReady checks if the Pod is in the Ready condition.
|
||||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||||
@@ -88,6 +100,110 @@ func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
|||||||
obj.SetOwnerReferences(owners)
|
obj.SetOwnerReferences(owners)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reconcileServiceForMonitor creates or updates a Workload object for the given Service and WorkloadMonitor.
|
||||||
|
func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||||
|
ctx context.Context,
|
||||||
|
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||||
|
svc corev1.Service,
|
||||||
|
) error {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
workload := &cozyv1alpha1.Workload{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("svc-%s", svc.Name),
|
||||||
|
Namespace: svc.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := make(map[string]resource.Quantity)
|
||||||
|
|
||||||
|
quantity := resource.MustParse("0")
|
||||||
|
|
||||||
|
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||||
|
if ing.IP != "" {
|
||||||
|
quantity.Add(resource.MustParse("1"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var resourceLabel string
|
||||||
|
if svc.Annotations != nil {
|
||||||
|
var ok bool
|
||||||
|
resourceLabel, ok = svc.Annotations["metallb.universe.tf/ip-allocated-from-pool"]
|
||||||
|
if !ok {
|
||||||
|
resourceLabel = "default"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resourceLabel = fmt.Sprintf("%s.ipaddresspool.metallb.io/requests.ipaddresses", resourceLabel)
|
||||||
|
resources[resourceLabel] = quantity
|
||||||
|
|
||||||
|
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||||
|
// Update owner references with the new monitor
|
||||||
|
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||||
|
|
||||||
|
workload.Labels = svc.Labels
|
||||||
|
|
||||||
|
// Fill Workload status fields:
|
||||||
|
workload.Status.Kind = monitor.Spec.Kind
|
||||||
|
workload.Status.Type = monitor.Spec.Type
|
||||||
|
workload.Status.Resources = resources
|
||||||
|
workload.Status.Operational = r.isServiceReady(&svc)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reconcilePVCForMonitor creates or updates a Workload object for the given PVC and WorkloadMonitor.
|
||||||
|
func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||||
|
ctx context.Context,
|
||||||
|
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||||
|
pvc corev1.PersistentVolumeClaim,
|
||||||
|
) error {
|
||||||
|
logger := log.FromContext(ctx)
|
||||||
|
workload := &cozyv1alpha1.Workload{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("pvc-%s", pvc.Name),
|
||||||
|
Namespace: pvc.Namespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := make(map[string]resource.Quantity)
|
||||||
|
|
||||||
|
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||||
|
storageClass := "default"
|
||||||
|
if pvc.Spec.StorageClassName != nil || *pvc.Spec.StorageClassName == "" {
|
||||||
|
storageClass = *pvc.Spec.StorageClassName
|
||||||
|
}
|
||||||
|
resourceLabel := fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.%s", storageClass, resourceName.String())
|
||||||
|
resources[resourceLabel] = resourceQuantity
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||||
|
// Update owner references with the new monitor
|
||||||
|
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||||
|
|
||||||
|
workload.Labels = pvc.Labels
|
||||||
|
|
||||||
|
// Fill Workload status fields:
|
||||||
|
workload.Status.Kind = monitor.Spec.Kind
|
||||||
|
workload.Status.Type = monitor.Spec.Type
|
||||||
|
workload.Status.Resources = resources
|
||||||
|
workload.Status.Operational = r.isPVCReady(&pvc)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
@@ -96,15 +212,12 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
|||||||
) error {
|
) error {
|
||||||
logger := log.FromContext(ctx)
|
logger := log.FromContext(ctx)
|
||||||
|
|
||||||
// Combine both init containers and normal containers to sum resources properly
|
// totalResources will store the sum of all container resource requests
|
||||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
|
||||||
|
|
||||||
// totalResources will store the sum of all container resource limits
|
|
||||||
totalResources := make(map[string]resource.Quantity)
|
totalResources := make(map[string]resource.Quantity)
|
||||||
|
|
||||||
// Iterate over all containers to aggregate their Limits
|
// Iterate over all containers to aggregate their requests
|
||||||
for _, container := range combinedContainers {
|
for _, container := range pod.Spec.Containers {
|
||||||
for name, qty := range container.Resources.Limits {
|
for name, qty := range container.Resources.Requests {
|
||||||
if existing, exists := totalResources[name.String()]; exists {
|
if existing, exists := totalResources[name.String()]; exists {
|
||||||
existing.Add(qty)
|
existing.Add(qty)
|
||||||
totalResources[name.String()] = existing
|
totalResources[name.String()] = existing
|
||||||
@@ -133,17 +246,26 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
|||||||
|
|
||||||
workload := &cozyv1alpha1.Workload{
|
workload := &cozyv1alpha1.Workload{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: pod.Name,
|
Name: fmt.Sprintf("pod-%s", pod.Name),
|
||||||
Namespace: pod.Namespace,
|
Namespace: pod.Namespace,
|
||||||
|
Labels: map[string]string{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metaLabels := r.getWorkloadMetadata(&pod)
|
||||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||||
// Update owner references with the new monitor
|
// Update owner references with the new monitor
|
||||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||||
|
|
||||||
// Copy labels from the Pod if needed
|
// Copy labels from the Pod if needed
|
||||||
workload.Labels = pod.Labels
|
for k, v := range pod.Labels {
|
||||||
|
workload.Labels[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add workload meta to labels
|
||||||
|
for k, v := range metaLabels {
|
||||||
|
workload.Labels[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
// Fill Workload status fields:
|
// Fill Workload status fields:
|
||||||
workload.Status.Kind = monitor.Spec.Kind
|
workload.Status.Kind = monitor.Spec.Kind
|
||||||
@@ -205,6 +327,45 @@ func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pvcList := &corev1.PersistentVolumeClaimList{}
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
pvcList,
|
||||||
|
client.InNamespace(monitor.Namespace),
|
||||||
|
client.MatchingLabels(monitor.Spec.Selector),
|
||||||
|
); err != nil {
|
||||||
|
logger.Error(err, "Unable to list PVCs for WorkloadMonitor", "monitor", monitor.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pvc := range pvcList.Items {
|
||||||
|
if err := r.reconcilePVCForMonitor(ctx, monitor, pvc); err != nil {
|
||||||
|
logger.Error(err, "Failed to reconcile Workload for PVC", "PVC", pvc.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
svcList := &corev1.ServiceList{}
|
||||||
|
if err := r.List(
|
||||||
|
ctx,
|
||||||
|
svcList,
|
||||||
|
client.InNamespace(monitor.Namespace),
|
||||||
|
client.MatchingLabels(monitor.Spec.Selector),
|
||||||
|
); err != nil {
|
||||||
|
logger.Error(err, "Unable to list Services for WorkloadMonitor", "monitor", monitor.Name)
|
||||||
|
return ctrl.Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, svc := range svcList.Items {
|
||||||
|
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := r.reconcileServiceForMonitor(ctx, monitor, svc); err != nil {
|
||||||
|
logger.Error(err, "Failed to reconcile Workload for Service", "Service", svc.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Update WorkloadMonitor status based on observed pods
|
// Update WorkloadMonitor status based on observed pods
|
||||||
monitor.Status.ObservedReplicas = observedReplicas
|
monitor.Status.ObservedReplicas = observedReplicas
|
||||||
monitor.Status.AvailableReplicas = availableReplicas
|
monitor.Status.AvailableReplicas = availableReplicas
|
||||||
@@ -233,41 +394,60 @@ func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
|||||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||||
Watches(
|
Watches(
|
||||||
&corev1.Pod{},
|
&corev1.Pod{},
|
||||||
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.Pod{}, r.Client)),
|
||||||
pod, ok := obj.(*corev1.Pod)
|
).
|
||||||
if !ok {
|
// Watch PVCs as well
|
||||||
return nil
|
Watches(
|
||||||
}
|
&corev1.PersistentVolumeClaim{},
|
||||||
|
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.PersistentVolumeClaim{}, r.Client)),
|
||||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
|
||||||
// List all WorkloadMonitors in the same namespace
|
|
||||||
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match each monitor's selector with the Pod's labels
|
|
||||||
var requests []reconcile.Request
|
|
||||||
for _, m := range monitorList.Items {
|
|
||||||
matches := true
|
|
||||||
for k, v := range m.Spec.Selector {
|
|
||||||
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
|
||||||
matches = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if matches {
|
|
||||||
requests = append(requests, reconcile.Request{
|
|
||||||
NamespacedName: types.NamespacedName{
|
|
||||||
Namespace: m.Namespace,
|
|
||||||
Name: m.Name,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return requests
|
|
||||||
}),
|
|
||||||
).
|
).
|
||||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||||
Owns(&cozyv1alpha1.Workload{}).
|
Owns(&cozyv1alpha1.Workload{}).
|
||||||
Complete(r)
|
Complete(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||||
|
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||||
|
concrete, ok := obj.(T)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||||
|
// List all WorkloadMonitors in the same namespace
|
||||||
|
if err := c.List(ctx, &monitorList, client.InNamespace(concrete.GetNamespace())); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
labels := concrete.GetLabels()
|
||||||
|
// Match each monitor's selector with the Pod's labels
|
||||||
|
var requests []reconcile.Request
|
||||||
|
for _, m := range monitorList.Items {
|
||||||
|
matches := true
|
||||||
|
for k, v := range m.Spec.Selector {
|
||||||
|
if labelVal, exists := labels[k]; !exists || labelVal != v {
|
||||||
|
matches = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if matches {
|
||||||
|
requests = append(requests, reconcile.Request{
|
||||||
|
NamespacedName: types.NamespacedName{
|
||||||
|
Namespace: m.Namespace,
|
||||||
|
Name: m.Name,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return requests
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *WorkloadMonitorReconciler) getWorkloadMetadata(obj client.Object) map[string]string {
|
||||||
|
labels := make(map[string]string)
|
||||||
|
annotations := obj.GetAnnotations()
|
||||||
|
if instanceType, ok := annotations["kubevirt.io/cluster-instancetype-name"]; ok {
|
||||||
|
labels["workloads.cozystack.io/kubevirt-vmi-instance-type"] = instanceType
|
||||||
|
}
|
||||||
|
return labels
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,14 +1,8 @@
|
|||||||
OUT=../../_out/repos/apps
|
OUT=../_out/repos/apps
|
||||||
TMP=../../_out/repos/apps/historical
|
TMP := $(shell mktemp -d)
|
||||||
|
|
||||||
repo:
|
repo:
|
||||||
rm -rf "$(OUT)"
|
cd .. && ../hack/package_chart.sh apps $(OUT) $(TMP) library
|
||||||
mkdir -p "$(OUT)"
|
|
||||||
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
|
|
||||||
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
|
|
||||||
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
|
|
||||||
cd "$(OUT)" && helm repo index . --url http://cozystack.cozy-system.svc/repos/apps
|
|
||||||
rm -rf "$(TMP)"
|
|
||||||
|
|
||||||
fix-chartnames:
|
fix-chartnames:
|
||||||
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
||||||
|
|||||||
@@ -16,10 +16,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.1.0
|
version: 0.2.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "0.1.0"
|
appVersion: "0.2.0"
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
include ../../../scripts/package.mk
|
include ../../../scripts/package.mk
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
|
||||||
|
|||||||
3
packages/apps/bucket/README.md
Normal file
3
packages/apps/bucket/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# S3 bucket
|
||||||
|
|
||||||
|
## Parameters
|
||||||
1
packages/apps/bucket/charts/cozy-lib
Symbolic link
1
packages/apps/bucket/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../library/cozy-lib
|
||||||
@@ -18,3 +18,14 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}-ui
|
- {{ .Release.Name }}-ui
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-dashboard-resources
|
||||||
|
subjects:
|
||||||
|
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: {{ .Release.Name }}-dashboard-resources
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ spec:
|
|||||||
kind: HelmRepository
|
kind: HelmRepository
|
||||||
name: cozystack-system
|
name: cozystack-system
|
||||||
namespace: cozy-system
|
namespace: cozy-system
|
||||||
version: '*'
|
version: '>= 0.0.0-0'
|
||||||
interval: 1m0s
|
interval: 1m0s
|
||||||
timeout: 5m0s
|
timeout: 5m0s
|
||||||
values:
|
values:
|
||||||
|
|||||||
5
packages/apps/bucket/values.schema.json
Normal file
5
packages/apps/bucket/values.schema.json
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"properties": {},
|
||||||
|
"title": "Chart Values",
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
1
packages/apps/bucket/values.yaml
Normal file
1
packages/apps/bucket/values.yaml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{}
|
||||||
@@ -16,7 +16,7 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.7.0
|
version: 0.11.1
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
|
|||||||
@@ -1,14 +1,18 @@
|
|||||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$0 ~ /^version:/ {print $$2}' Chart.yaml)
|
||||||
|
PRESET_ENUM := ["nano","micro","small","medium","large","xlarge","2xlarge"]
|
||||||
|
|
||||||
include ../../../scripts/common-envs.mk
|
include ../../../scripts/common-envs.mk
|
||||||
include ../../../scripts/package.mk
|
include ../../../scripts/package.mk
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
|
||||||
|
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
|
||||||
|
|
||||||
image:
|
image:
|
||||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/clickhouse-backup \
|
docker buildx build images/clickhouse-backup \
|
||||||
--provenance false \
|
--provenance false \
|
||||||
|
--builder=$(BUILDER) \
|
||||||
|
--platform=$(PLATFORM) \
|
||||||
--tag $(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG)) \
|
--tag $(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG)) \
|
||||||
--cache-from type=registry,ref=$(REGISTRY)/clickhouse-backup:latest \
|
--cache-from type=registry,ref=$(REGISTRY)/clickhouse-backup:latest \
|
||||||
--cache-to type=inline \
|
--cache-to type=inline \
|
||||||
|
|||||||
@@ -1,50 +1,80 @@
|
|||||||
# Managed Clickhouse Service
|
# Managed ClickHouse Service
|
||||||
|
|
||||||
### How to restore backup:
|
ClickHouse is an open source high-performance and column-oriented SQL database management system (DBMS).
|
||||||
|
It is used for online analytical processing (OLAP).
|
||||||
|
|
||||||
find snapshot:
|
### How to restore backup from S3
|
||||||
```
|
|
||||||
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
|
||||||
```
|
|
||||||
|
|
||||||
restore:
|
1. Find the snapshot:
|
||||||
```
|
|
||||||
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
|
||||||
```
|
|
||||||
|
|
||||||
more details:
|
```bash
|
||||||
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Restore it:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
||||||
|
```
|
||||||
|
|
||||||
|
For more details, read [Restic: Effective Backup from Stdin](https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1).
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ---------------- | ----------------------------------- | ------ |
|
| ----------------- | --------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `size` | Persistent Volume size | `10Gi` |
|
| `replicas` | Number of Clickhouse replicas | `2` |
|
||||||
| `logStorageSize` | Persistent Volume for logs size | `2Gi` |
|
| `shards` | Number of Clickhouse shards | `1` |
|
||||||
| `shards` | Number of Clickhouse replicas | `1` |
|
| `resources` | Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||||
| `replicas` | Number of Clickhouse shards | `2` |
|
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge. | `small` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `size` | Persistent Volume Claim size, available for application data | `10Gi` |
|
||||||
| `logTTL` | for query_log and query_thread_log | `15` |
|
| `storageClass` | StorageClass used to store the application data | `""` |
|
||||||
|
|
||||||
### Configuration parameters
|
### Application-specific parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------- | ------------------- | ----- |
|
| ---------------- | -------------------------------------------------------- | ----- |
|
||||||
| `users` | Users configuration | `{}` |
|
| `logStorageSize` | Size of Persistent Volume for logs | `2Gi` |
|
||||||
|
| `logTTL` | TTL (expiration time) for query_log and query_thread_log | `15` |
|
||||||
|
| `users` | Users configuration | `{}` |
|
||||||
|
|
||||||
### Backup parameters
|
### Backup parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
||||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
| `backup.enabled` | Enable periodic backups | `false` |
|
||||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
| `backup.s3Region` | AWS S3 region where backups are stored | `us-east-1` |
|
||||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
| `backup.s3Bucket` | S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
| `backup.cleanupStrategy` | Retention strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
| `backup.resticPassword` | Password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||||
| `resources` | Resources | `{}` |
|
|
||||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
## Parameter examples and reference
|
||||||
|
|
||||||
|
### resources and resourcesPreset
|
||||||
|
|
||||||
|
`resources` sets explicit CPU and memory configurations for each replica.
|
||||||
|
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
resources:
|
||||||
|
cpu: 4000m
|
||||||
|
memory: 4Gi
|
||||||
|
```
|
||||||
|
|
||||||
|
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||||
|
This setting is ignored if the corresponding `resources` value is set.
|
||||||
|
|
||||||
|
| Preset name | CPU | memory |
|
||||||
|
|-------------|--------|---------|
|
||||||
|
| `nano` | `250m` | `128Mi` |
|
||||||
|
| `micro` | `500m` | `256Mi` |
|
||||||
|
| `small` | `1` | `512Mi` |
|
||||||
|
| `medium` | `1` | `1Gi` |
|
||||||
|
| `large` | `2` | `2Gi` |
|
||||||
|
| `xlarge` | `4` | `4Gi` |
|
||||||
|
| `2xlarge` | `8` | `8Gi` |
|
||||||
|
|||||||
1
packages/apps/clickhouse/charts/cozy-lib
Symbolic link
1
packages/apps/clickhouse/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../library/cozy-lib
|
||||||
@@ -1 +1 @@
|
|||||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.7.0@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
ghcr.io/cozystack/cozystack/clickhouse-backup:0.11.1@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||||
|
|||||||
@@ -11,35 +11,34 @@ These presets are for basic testing and not meant to be used in production
|
|||||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "resources.preset" -}}
|
{{- define "resources.preset" -}}
|
||||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
|
||||||
{{- $presets := dict
|
{{- $presets := dict
|
||||||
"nano" (dict
|
"nano" (dict
|
||||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"micro" (dict
|
"micro" (dict
|
||||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"small" (dict
|
"small" (dict
|
||||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"medium" (dict
|
"medium" (dict
|
||||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"large" (dict
|
"large" (dict
|
||||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"xlarge" (dict
|
"xlarge" (dict
|
||||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"2xlarge" (dict
|
"2xlarge" (dict
|
||||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
}}
|
}}
|
||||||
{{- if hasKey $presets .type -}}
|
{{- if hasKey $presets .type -}}
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||||
|
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||||
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
|
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
|
||||||
{{- $passwords := dict }}
|
{{- $passwords := dict }}
|
||||||
{{- $users := .Values.users }}
|
{{- $users := .Values.users }}
|
||||||
@@ -32,7 +34,7 @@ kind: "ClickHouseInstallation"
|
|||||||
metadata:
|
metadata:
|
||||||
name: "{{ .Release.Name }}"
|
name: "{{ .Release.Name }}"
|
||||||
spec:
|
spec:
|
||||||
namespaceDomainPattern: "%s.svc.cozy.local"
|
namespaceDomainPattern: "%s.svc.{{ $clusterDomain }}"
|
||||||
defaults:
|
defaults:
|
||||||
templates:
|
templates:
|
||||||
dataVolumeClaimTemplate: data-volume-template
|
dataVolumeClaimTemplate: data-volume-template
|
||||||
@@ -92,6 +94,9 @@ spec:
|
|||||||
templates:
|
templates:
|
||||||
volumeClaimTemplates:
|
volumeClaimTemplates:
|
||||||
- name: data-volume-template
|
- name: data-volume-template
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteOnce
|
- ReadWriteOnce
|
||||||
@@ -99,6 +104,9 @@ spec:
|
|||||||
requests:
|
requests:
|
||||||
storage: {{ .Values.size }}
|
storage: {{ .Values.size }}
|
||||||
- name: log-volume-template
|
- name: log-volume-template
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteOnce
|
- ReadWriteOnce
|
||||||
@@ -107,6 +115,9 @@ spec:
|
|||||||
storage: {{ .Values.logStorageSize }}
|
storage: {{ .Values.logStorageSize }}
|
||||||
podTemplates:
|
podTemplates:
|
||||||
- name: clickhouse-per-host
|
- name: clickhouse-per-host
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
@@ -121,11 +132,7 @@ spec:
|
|||||||
containers:
|
containers:
|
||||||
- name: clickhouse
|
- name: clickhouse
|
||||||
image: clickhouse/clickhouse-server:24.9.2.42
|
image: clickhouse/clickhouse-server:24.9.2.42
|
||||||
{{- if .Values.resources }}
|
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 16 }}
|
||||||
resources: {{- toYaml .Values.resources | nindent 16 }}
|
|
||||||
{{- else if ne .Values.resourcesPreset "none" }}
|
|
||||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 16 }}
|
|
||||||
{{- end }}
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: data-volume-template
|
- name: data-volume-template
|
||||||
mountPath: /var/lib/clickhouse
|
mountPath: /var/lib/clickhouse
|
||||||
@@ -133,6 +140,9 @@ spec:
|
|||||||
mountPath: /var/log/clickhouse-server
|
mountPath: /var/log/clickhouse-server
|
||||||
serviceTemplates:
|
serviceTemplates:
|
||||||
- name: svc-template
|
- name: svc-template
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
generateName: chendpoint-{chi}
|
generateName: chendpoint-{chi}
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
@@ -24,3 +24,14 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}
|
- {{ .Release.Name }}
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-dashboard-resources
|
||||||
|
subjects:
|
||||||
|
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: {{ .Release.Name }}-dashboard-resources
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|||||||
@@ -9,5 +9,5 @@ spec:
|
|||||||
kind: clickhouse
|
kind: clickhouse
|
||||||
type: clickhouse
|
type: clickhouse
|
||||||
selector:
|
selector:
|
||||||
clickhouse.altinity.com/chi: {{ $.Release.Name }}
|
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||||
version: {{ $.Chart.Version }}
|
version: {{ $.Chart.Version }}
|
||||||
|
|||||||
@@ -1,91 +1,100 @@
|
|||||||
{
|
{
|
||||||
"title": "Chart Values",
|
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
"properties": {
|
||||||
"size": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Persistent Volume size",
|
|
||||||
"default": "10Gi"
|
|
||||||
},
|
|
||||||
"logStorageSize": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Persistent Volume for logs size",
|
|
||||||
"default": "2Gi"
|
|
||||||
},
|
|
||||||
"shards": {
|
|
||||||
"type": "number",
|
|
||||||
"description": "Number of Clickhouse replicas",
|
|
||||||
"default": 1
|
|
||||||
},
|
|
||||||
"replicas": {
|
|
||||||
"type": "number",
|
|
||||||
"description": "Number of Clickhouse shards",
|
|
||||||
"default": 2
|
|
||||||
},
|
|
||||||
"storageClass": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "StorageClass used to store the data",
|
|
||||||
"default": ""
|
|
||||||
},
|
|
||||||
"logTTL": {
|
|
||||||
"type": "number",
|
|
||||||
"description": "for query_log and query_thread_log",
|
|
||||||
"default": 15
|
|
||||||
},
|
|
||||||
"backup": {
|
"backup": {
|
||||||
"type": "object",
|
|
||||||
"properties": {
|
"properties": {
|
||||||
"enabled": {
|
|
||||||
"type": "boolean",
|
|
||||||
"description": "Enable pereiodic backups",
|
|
||||||
"default": false
|
|
||||||
},
|
|
||||||
"s3Region": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The AWS S3 region where backups are stored",
|
|
||||||
"default": "us-east-1"
|
|
||||||
},
|
|
||||||
"s3Bucket": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The S3 bucket used for storing backups",
|
|
||||||
"default": "s3.example.org/clickhouse-backups"
|
|
||||||
},
|
|
||||||
"schedule": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "Cron schedule for automated backups",
|
|
||||||
"default": "0 2 * * *"
|
|
||||||
},
|
|
||||||
"cleanupStrategy": {
|
"cleanupStrategy": {
|
||||||
"type": "string",
|
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m",
|
||||||
"description": "The strategy for cleaning up old backups",
|
"description": "Retention strategy for cleaning up old backups",
|
||||||
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"s3AccessKey": {
|
"enabled": {
|
||||||
"type": "string",
|
"default": false,
|
||||||
"description": "The access key for S3, used for authentication",
|
"description": "Enable periodic backups",
|
||||||
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
|
"type": "boolean"
|
||||||
},
|
|
||||||
"s3SecretKey": {
|
|
||||||
"type": "string",
|
|
||||||
"description": "The secret key for S3, used for authentication",
|
|
||||||
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
|
||||||
},
|
},
|
||||||
"resticPassword": {
|
"resticPassword": {
|
||||||
"type": "string",
|
"default": "ChaXoveekoh6eigh4siesheeda2quai0",
|
||||||
"description": "The password for Restic backup encryption",
|
"description": "Password for Restic backup encryption",
|
||||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
"type": "string"
|
||||||
|
},
|
||||||
|
"s3AccessKey": {
|
||||||
|
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu",
|
||||||
|
"description": "Access key for S3, used for authentication",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"s3Bucket": {
|
||||||
|
"default": "s3.example.org/clickhouse-backups",
|
||||||
|
"description": "S3 bucket used for storing backups",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"s3Region": {
|
||||||
|
"default": "us-east-1",
|
||||||
|
"description": "AWS S3 region where backups are stored",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"s3SecretKey": {
|
||||||
|
"default": "ju3eum4dekeich9ahM1te8waeGai0oog",
|
||||||
|
"description": "Secret key for S3, used for authentication",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"schedule": {
|
||||||
|
"default": "0 2 * * *",
|
||||||
|
"description": "Cron schedule for automated backups",
|
||||||
|
"type": "string"
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"logStorageSize": {
|
||||||
|
"default": "2Gi",
|
||||||
|
"description": "Size of Persistent Volume for logs",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"logTTL": {
|
||||||
|
"default": 15,
|
||||||
|
"description": "TTL (expiration time) for query_log and query_thread_log",
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"replicas": {
|
||||||
|
"default": 2,
|
||||||
|
"description": "Number of Clickhouse replicas",
|
||||||
|
"type": "number"
|
||||||
},
|
},
|
||||||
"resources": {
|
"resources": {
|
||||||
"type": "object",
|
"default": {},
|
||||||
"description": "Resources",
|
"description": "Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.",
|
||||||
"default": {}
|
"type": "object"
|
||||||
},
|
},
|
||||||
"resourcesPreset": {
|
"resourcesPreset": {
|
||||||
|
"default": "small",
|
||||||
|
"description": "Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).",
|
"enum": [
|
||||||
"default": "nano"
|
"nano",
|
||||||
|
"micro",
|
||||||
|
"small",
|
||||||
|
"medium",
|
||||||
|
"large",
|
||||||
|
"xlarge",
|
||||||
|
"2xlarge"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"shards": {
|
||||||
|
"default": 1,
|
||||||
|
"description": "Number of Clickhouse shards",
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"size": {
|
||||||
|
"default": "10Gi",
|
||||||
|
"description": "Persistent Volume Claim size, available for application data",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"storageClass": {
|
||||||
|
"default": "",
|
||||||
|
"description": "StorageClass used to store the application data",
|
||||||
|
"type": "string"
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
}
|
"title": "Chart Values",
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,21 +1,29 @@
|
|||||||
## @section Common parameters
|
## @section Common parameters
|
||||||
|
|
||||||
## @param size Persistent Volume size
|
|
||||||
## @param logStorageSize Persistent Volume for logs size
|
|
||||||
## @param shards Number of Clickhouse replicas
|
|
||||||
## @param replicas Number of Clickhouse shards
|
|
||||||
## @param storageClass StorageClass used to store the data
|
|
||||||
## @param logTTL for query_log and query_thread_log
|
|
||||||
##
|
##
|
||||||
size: 10Gi
|
## @param replicas Number of Clickhouse replicas
|
||||||
logStorageSize: 2Gi
|
|
||||||
shards: 1
|
|
||||||
replicas: 2
|
replicas: 2
|
||||||
|
## @param shards Number of Clickhouse shards
|
||||||
|
shards: 1
|
||||||
|
## @param resources Explicit CPU and memory configuration for each ClickHouse replica. When left empty, the preset defined in `resourcesPreset` is applied.
|
||||||
|
resources: {}
|
||||||
|
# resources:
|
||||||
|
# cpu: 4000m
|
||||||
|
# memory: 4Gi
|
||||||
|
|
||||||
|
## @param resourcesPreset Default sizing preset used when `resources` is omitted. Allowed values: nano, micro, small, medium, large, xlarge, 2xlarge.
|
||||||
|
resourcesPreset: "small"
|
||||||
|
## @param size Persistent Volume Claim size, available for application data
|
||||||
|
size: 10Gi
|
||||||
|
## @param storageClass StorageClass used to store the application data
|
||||||
storageClass: ""
|
storageClass: ""
|
||||||
|
|
||||||
|
|
||||||
|
## @section Application-specific parameters
|
||||||
|
##
|
||||||
|
## @param logStorageSize Size of Persistent Volume for logs
|
||||||
|
logStorageSize: 2Gi
|
||||||
|
## @param logTTL TTL (expiration time) for query_log and query_thread_log
|
||||||
logTTL: 15
|
logTTL: 15
|
||||||
|
|
||||||
## @section Configuration parameters
|
|
||||||
|
|
||||||
## @param users [object] Users configuration
|
## @param users [object] Users configuration
|
||||||
## Example:
|
## Example:
|
||||||
## users:
|
## users:
|
||||||
@@ -27,16 +35,17 @@ logTTL: 15
|
|||||||
##
|
##
|
||||||
users: {}
|
users: {}
|
||||||
|
|
||||||
|
|
||||||
## @section Backup parameters
|
## @section Backup parameters
|
||||||
|
|
||||||
## @param backup.enabled Enable pereiodic backups
|
## @param backup.enabled Enable periodic backups
|
||||||
## @param backup.s3Region The AWS S3 region where backups are stored
|
## @param backup.s3Region AWS S3 region where backups are stored
|
||||||
## @param backup.s3Bucket The S3 bucket used for storing backups
|
## @param backup.s3Bucket S3 bucket used for storing backups
|
||||||
## @param backup.schedule Cron schedule for automated backups
|
## @param backup.schedule Cron schedule for automated backups
|
||||||
## @param backup.cleanupStrategy The strategy for cleaning up old backups
|
## @param backup.cleanupStrategy Retention strategy for cleaning up old backups
|
||||||
## @param backup.s3AccessKey The access key for S3, used for authentication
|
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||||
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||||
## @param backup.resticPassword The password for Restic backup encryption
|
## @param backup.resticPassword Password for Restic backup encryption
|
||||||
backup:
|
backup:
|
||||||
enabled: false
|
enabled: false
|
||||||
s3Region: us-east-1
|
s3Region: us-east-1
|
||||||
@@ -47,15 +56,3 @@ backup:
|
|||||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||||
|
|
||||||
## @param resources Resources
|
|
||||||
resources: {}
|
|
||||||
# resources:
|
|
||||||
# limits:
|
|
||||||
# cpu: 4000m
|
|
||||||
# memory: 4Gi
|
|
||||||
# requests:
|
|
||||||
# cpu: 100m
|
|
||||||
# memory: 512Mi
|
|
||||||
|
|
||||||
## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production).
|
|
||||||
resourcesPreset: "nano"
|
|
||||||
|
|||||||
@@ -16,10 +16,10 @@ type: application
|
|||||||
# This is the chart version. This version number should be incremented each time you make changes
|
# This is the chart version. This version number should be incremented each time you make changes
|
||||||
# to the chart and its templates, including the app version.
|
# to the chart and its templates, including the app version.
|
||||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||||
version: 0.5.0
|
version: 1.0.0
|
||||||
|
|
||||||
# This is the version number of the application being deployed. This version number should be
|
# This is the version number of the application being deployed. This version number should be
|
||||||
# incremented each time you make changes to the application. Versions are not expected to
|
# incremented each time you make changes to the application. Versions are not expected to
|
||||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||||
# It is recommended to use it with quotes.
|
# It is recommended to use it with quotes.
|
||||||
appVersion: "1.24.0"
|
appVersion: 2.4.0
|
||||||
|
|||||||
@@ -1,4 +1,13 @@
|
|||||||
include ../../../scripts/package.mk
|
include ../../../scripts/package.mk
|
||||||
|
PRESET_ENUM := ["nano","micro","small","medium","large","xlarge","2xlarge"]
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
readme-generator-for-helm -v values.yaml -s values.schema.json -r README.md
|
||||||
|
yq -i -o json --indent 4 '.properties.resourcesPreset.enum = $(PRESET_ENUM)' values.schema.json
|
||||||
|
|
||||||
|
update:
|
||||||
|
tag=$$(git ls-remote --tags --sort="v:refname" https://github.com/FerretDB/FerretDB | awk -F'[/^]' '{sub("^v", "", $$3)} END{print $$3}') && \
|
||||||
|
pgtag=$$(skopeo list-tags docker://ghcr.io/ferretdb/postgres-documentdb | jq -r --arg tag "$$tag" '.Tags[] | select(endswith("ferretdb-" + $$tag))' | sort -V | tail -n1) && \
|
||||||
|
sed -i "s|\(imageName: ghcr.io/ferretdb/postgres-documentdb:\).*|\1$$pgtag|" templates/postgres.yaml && \
|
||||||
|
sed -i "s|\(image: ghcr.io/ferretdb/ferretdb:\).*|\1$$tag|" templates/ferretdb.yaml && \
|
||||||
|
sed -i "s|\(appVersion: \).*|\1$$tag|" Chart.yaml
|
||||||
|
|||||||
@@ -1,37 +1,72 @@
|
|||||||
# Managed FerretDB Service
|
# Managed FerretDB Service
|
||||||
|
|
||||||
|
FerretDB is an open source MongoDB alternative.
|
||||||
|
It translates MongoDB wire protocol queries to SQL and can be used as a direct replacement for MongoDB 5.0+.
|
||||||
|
Internally, FerretDB service is backed by Postgres.
|
||||||
|
|
||||||
## Parameters
|
## Parameters
|
||||||
|
|
||||||
### Common parameters
|
### Common parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------- | ------- |
|
| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
| `external` | Enable external access from outside the cluster | `false` |
|
| `replicas` | Number of replicas | `2` |
|
||||||
| `size` | Persistent Volume size | `10Gi` |
|
| `resources` | Explicit CPU and memory configuration for each FerretDB replica. When left empty, the preset defined in `resourcesPreset` is applied. | `{}` |
|
||||||
| `replicas` | Number of Postgres replicas | `2` |
|
| `resourcesPreset` | Default sizing preset used when `resources` is omitted. Allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge. | `micro` |
|
||||||
| `storageClass` | StorageClass used to store the data | `""` |
|
| `size` | Persistent Volume size | `10Gi` |
|
||||||
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed. | `0` |
|
| `storageClass` | StorageClass used to store the data | `""` |
|
||||||
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the number of instances). | `0` |
|
| `external` | Enable external access from outside the cluster | `false` |
|
||||||
|
|
||||||
### Configuration parameters
|
### Application-specific parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------- | ------------------- | ----- |
|
| ------------------------ | --------------------------------------------------------------------------------------------------------------------------- | ----- |
|
||||||
| `users` | Users configuration | `{}` |
|
| `quorum.minSyncReplicas` | Minimum number of synchronous replicas that must acknowledge a transaction before it is considered committed | `0` |
|
||||||
|
| `quorum.maxSyncReplicas` | Maximum number of synchronous replicas that can acknowledge a transaction (must be lower than the total number of replicas) | `0` |
|
||||||
|
| `users` | Users configuration | `{}` |
|
||||||
|
|
||||||
### Backup parameters
|
### Backup parameters
|
||||||
|
|
||||||
| Name | Description | Value |
|
| Name | Description | Value |
|
||||||
| ------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
| ------------------------ | ---------------------------------------------------------- | ----------------------------------- |
|
||||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
| `backup.enabled` | Enable regular backups | `false` |
|
||||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * * *` |
|
||||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
| `backup.retentionPolicy` | Retention policy | `30d` |
|
||||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
| `backup.destinationPath` | Path to store the backup (i.e. s3://bucket/path/to/folder) | `s3://bucket/path/to/folder/` |
|
||||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
| `backup.endpointURL` | S3 Endpoint used to upload data to the cloud | `http://minio-gateway-service:9000` |
|
||||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
|
||||||
| `resources` | Resources | `{}` |
|
|
||||||
| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `nano` |
|
|
||||||
|
|
||||||
|
### Bootstrap (recovery) parameters
|
||||||
|
|
||||||
|
| Name | Description | Value |
|
||||||
|
| ------------------------ | -------------------------------------------------------------------------------------------------------------------- | ------- |
|
||||||
|
| `bootstrap.enabled` | Restore database cluster from a backup | `false` |
|
||||||
|
| `bootstrap.recoveryTime` | Timestamp (PITR) up to which recovery will proceed, expressed in RFC 3339 format. If left empty, will restore latest | `""` |
|
||||||
|
| `bootstrap.oldName` | Name of database cluster before deleting | `""` |
|
||||||
|
|
||||||
|
## Parameter examples and reference
|
||||||
|
|
||||||
|
### resources and resourcesPreset
|
||||||
|
|
||||||
|
`resources` sets explicit CPU and memory configurations for each replica.
|
||||||
|
When left empty, the preset defined in `resourcesPreset` is applied.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
resources:
|
||||||
|
cpu: 4000m
|
||||||
|
memory: 4Gi
|
||||||
|
```
|
||||||
|
|
||||||
|
`resourcesPreset` sets named CPU and memory configurations for each replica.
|
||||||
|
This setting is ignored if the corresponding `resources` value is set.
|
||||||
|
|
||||||
|
| Preset name | CPU | memory |
|
||||||
|
|-------------|--------|---------|
|
||||||
|
| `nano` | `250m` | `128Mi` |
|
||||||
|
| `micro` | `500m` | `256Mi` |
|
||||||
|
| `small` | `1` | `512Mi` |
|
||||||
|
| `medium` | `1` | `1Gi` |
|
||||||
|
| `large` | `2` | `2Gi` |
|
||||||
|
| `xlarge` | `4` | `4Gi` |
|
||||||
|
| `2xlarge` | `8` | `8Gi` |
|
||||||
|
|||||||
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../library/cozy-lib
|
||||||
@@ -1 +0,0 @@
|
|||||||
ghcr.io/cozystack/cozystack/postgres-backup:0.10.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
|
||||||
@@ -11,35 +11,34 @@ These presets are for basic testing and not meant to be used in production
|
|||||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||||
*/}}
|
*/}}
|
||||||
{{- define "resources.preset" -}}
|
{{- define "resources.preset" -}}
|
||||||
{{/* The limits are the requests increased by 50% (except ephemeral-storage and xlarge/2xlarge sizes)*/}}
|
|
||||||
{{- $presets := dict
|
{{- $presets := dict
|
||||||
"nano" (dict
|
"nano" (dict
|
||||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "150m" "memory" "192Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"micro" (dict
|
"micro" (dict
|
||||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "375m" "memory" "384Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"small" (dict
|
"small" (dict
|
||||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "750m" "memory" "768Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"medium" (dict
|
"medium" (dict
|
||||||
"requests" (dict "cpu" "500m" "memory" "1024Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "750m" "memory" "1536Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"large" (dict
|
"large" (dict
|
||||||
"requests" (dict "cpu" "1.0" "memory" "2048Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "1.5" "memory" "3072Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"xlarge" (dict
|
"xlarge" (dict
|
||||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "3.0" "memory" "6144Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
"2xlarge" (dict
|
"2xlarge" (dict
|
||||||
"requests" (dict "cpu" "1.0" "memory" "3072Mi" "ephemeral-storage" "50Mi")
|
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||||
"limits" (dict "cpu" "6.0" "memory" "12288Mi" "ephemeral-storage" "2Gi")
|
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||||
)
|
)
|
||||||
}}
|
}}
|
||||||
{{- if hasKey $presets .type -}}
|
{{- if hasKey $presets .type -}}
|
||||||
|
|||||||
@@ -1,99 +0,0 @@
|
|||||||
{{- if .Values.backup.enabled }}
|
|
||||||
{{ $image := .Files.Get "images/backup.json" | fromJson }}
|
|
||||||
|
|
||||||
apiVersion: batch/v1
|
|
||||||
kind: CronJob
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-backup
|
|
||||||
spec:
|
|
||||||
schedule: "{{ .Values.backup.schedule }}"
|
|
||||||
concurrencyPolicy: Forbid
|
|
||||||
successfulJobsHistoryLimit: 3
|
|
||||||
failedJobsHistoryLimit: 3
|
|
||||||
jobTemplate:
|
|
||||||
spec:
|
|
||||||
backoffLimit: 2
|
|
||||||
template:
|
|
||||||
spec:
|
|
||||||
restartPolicy: OnFailure
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
annotations:
|
|
||||||
checksum/config: {{ include (print $.Template.BasePath "/backup-script.yaml") . | sha256sum }}
|
|
||||||
checksum/secret: {{ include (print $.Template.BasePath "/backup-secret.yaml") . | sha256sum }}
|
|
||||||
spec:
|
|
||||||
restartPolicy: Never
|
|
||||||
containers:
|
|
||||||
- name: pgdump
|
|
||||||
image: "{{ $.Files.Get "images/postgres-backup.tag" | trim }}"
|
|
||||||
command:
|
|
||||||
- /bin/sh
|
|
||||||
- /scripts/backup.sh
|
|
||||||
env:
|
|
||||||
- name: REPO_PREFIX
|
|
||||||
value: {{ required "s3Bucket is not specified!" .Values.backup.s3Bucket | quote }}
|
|
||||||
- name: CLEANUP_STRATEGY
|
|
||||||
value: {{ required "cleanupStrategy is not specified!" .Values.backup.cleanupStrategy | quote }}
|
|
||||||
- name: PGUSER
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ .Release.Name }}-postgres-superuser
|
|
||||||
key: username
|
|
||||||
- name: PGPASSWORD
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ .Release.Name }}-postgres-superuser
|
|
||||||
key: password
|
|
||||||
- name: PGHOST
|
|
||||||
value: {{ .Release.Name }}-postgres-rw
|
|
||||||
- name: PGPORT
|
|
||||||
value: "5432"
|
|
||||||
- name: PGDATABASE
|
|
||||||
value: postgres
|
|
||||||
- name: AWS_ACCESS_KEY_ID
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ .Release.Name }}-backup
|
|
||||||
key: s3AccessKey
|
|
||||||
- name: AWS_SECRET_ACCESS_KEY
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ .Release.Name }}-backup
|
|
||||||
key: s3SecretKey
|
|
||||||
- name: AWS_DEFAULT_REGION
|
|
||||||
value: {{ .Values.backup.s3Region }}
|
|
||||||
- name: RESTIC_PASSWORD
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ .Release.Name }}-backup
|
|
||||||
key: resticPassword
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /scripts
|
|
||||||
name: scripts
|
|
||||||
- mountPath: /tmp
|
|
||||||
name: tmp
|
|
||||||
- mountPath: /.cache
|
|
||||||
name: cache
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
privileged: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
runAsNonRoot: true
|
|
||||||
volumes:
|
|
||||||
- name: scripts
|
|
||||||
secret:
|
|
||||||
secretName: {{ .Release.Name }}-backup-script
|
|
||||||
- name: tmp
|
|
||||||
emptyDir: {}
|
|
||||||
- name: cache
|
|
||||||
emptyDir: {}
|
|
||||||
securityContext:
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 9000
|
|
||||||
runAsGroup: 9000
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
{{- if .Values.backup.enabled }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-backup-script
|
|
||||||
stringData:
|
|
||||||
backup.sh: |
|
|
||||||
#!/bin/sh
|
|
||||||
set -e
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
JOB_ID="job-$(uuidgen|cut -f1 -d-)"
|
|
||||||
DB_LIST=$(psql -Atq -c 'SELECT datname FROM pg_catalog.pg_database;' | grep -v '^\(postgres\|app\|template.*\)$')
|
|
||||||
echo DB_LIST=$(echo "$DB_LIST" | shuf) # shuffle list
|
|
||||||
echo "Job ID: $JOB_ID"
|
|
||||||
echo "Target repo: $REPO_PREFIX"
|
|
||||||
echo "Cleanup strategy: $CLEANUP_STRATEGY"
|
|
||||||
echo "Start backup for:"
|
|
||||||
echo "$DB_LIST"
|
|
||||||
echo
|
|
||||||
echo "Backup started at `date +%Y-%m-%d\ %H:%M:%S`"
|
|
||||||
for db in $DB_LIST; do
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
restic -r "s3:${REPO_PREFIX}/$db" cat config >/dev/null 2>&1 || \
|
|
||||||
restic -r "s3:${REPO_PREFIX}/$db" init --repository-version 2
|
|
||||||
restic -r "s3:${REPO_PREFIX}/$db" unlock --remove-all >/dev/null 2>&1 || true # no locks, k8s takes care of it
|
|
||||||
pg_dump -Z0 -Ft -d "$db" | \
|
|
||||||
restic -r "s3:${REPO_PREFIX}/$db" backup --tag "$JOB_ID" --stdin --stdin-filename dump.tar
|
|
||||||
restic -r "s3:${REPO_PREFIX}/$db" tag --tag "$JOB_ID" --set "completed"
|
|
||||||
)
|
|
||||||
done
|
|
||||||
echo "Backup finished at `date +%Y-%m-%d\ %H:%M:%S`"
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "Run cleanup:"
|
|
||||||
echo
|
|
||||||
|
|
||||||
echo "Cleanup started at `date +%Y-%m-%d\ %H:%M:%S`"
|
|
||||||
for db in $DB_LIST; do
|
|
||||||
(
|
|
||||||
set -x
|
|
||||||
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags --keep-tag "completed" # keep completed snapshots only
|
|
||||||
restic forget -r "s3:${REPO_PREFIX}/$db" --group-by=tags $CLEANUP_STRATEGY
|
|
||||||
restic prune -r "s3:${REPO_PREFIX}/$db"
|
|
||||||
)
|
|
||||||
done
|
|
||||||
echo "Cleanup finished at `date +%Y-%m-%d\ %H:%M:%S`"
|
|
||||||
{{- end }}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
{{- if .Values.backup.enabled }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-backup
|
|
||||||
stringData:
|
|
||||||
s3AccessKey: {{ required "s3AccessKey is not specified!" .Values.backup.s3AccessKey }}
|
|
||||||
s3SecretKey: {{ required "s3SecretKey is not specified!" .Values.backup.s3SecretKey }}
|
|
||||||
resticPassword: {{ required "resticPassword is not specified!" .Values.backup.resticPassword }}
|
|
||||||
{{- end }}
|
|
||||||
12
packages/apps/ferretdb/templates/backup.yaml
Normal file
12
packages/apps/ferretdb/templates/backup.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{{- if .Values.backup.enabled }}
|
||||||
|
---
|
||||||
|
apiVersion: postgresql.cnpg.io/v1
|
||||||
|
kind: ScheduledBackup
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-postgres
|
||||||
|
spec:
|
||||||
|
schedule: {{ .Values.backup.schedule | quote }}
|
||||||
|
backupOwnerReference: self
|
||||||
|
cluster:
|
||||||
|
name: {{ .Release.Name }}-postgres
|
||||||
|
{{- end }}
|
||||||
@@ -24,3 +24,14 @@ rules:
|
|||||||
resourceNames:
|
resourceNames:
|
||||||
- {{ .Release.Name }}
|
- {{ .Release.Name }}
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: {{ .Release.Name }}-dashboard-resources
|
||||||
|
subjects:
|
||||||
|
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: {{ .Release.Name }}-dashboard-resources
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|||||||
@@ -2,6 +2,8 @@ apiVersion: v1
|
|||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: {{ .Release.Name }}
|
name: {{ .Release.Name }}
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||||
{{- if .Values.external }}
|
{{- if .Values.external }}
|
||||||
|
|||||||
@@ -12,15 +12,18 @@ spec:
|
|||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
app: {{ .Release.Name }}
|
app: {{ .Release.Name }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- name: ferretdb
|
- name: ferretdb
|
||||||
image: ghcr.io/ferretdb/ferretdb:1.24.0
|
image: ghcr.io/ferretdb/ferretdb:2.4.0
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 27017
|
- containerPort: 27017
|
||||||
env:
|
env:
|
||||||
- name: FERRETDB_POSTGRESQL_URL
|
- name: POSTGRESQL_PASSWORD
|
||||||
valueFrom:
|
valueFrom:
|
||||||
secretKeyRef:
|
secretKeyRef:
|
||||||
name: {{ .Release.Name }}-postgres-app
|
name: {{ .Release.Name }}-postgres-superuser
|
||||||
key: uri
|
key: password
|
||||||
|
- name: FERRETDB_POSTGRESQL_URL
|
||||||
|
value: "postgresql://postgres:$(POSTGRESQL_PASSWORD)@{{ .Release.Name }}-postgres-rw:5432/postgres"
|
||||||
|
|||||||
@@ -1,66 +0,0 @@
|
|||||||
apiVersion: batch/v1
|
|
||||||
kind: Job
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-init-job
|
|
||||||
annotations:
|
|
||||||
"helm.sh/hook": post-install,post-upgrade
|
|
||||||
"helm.sh/hook-weight": "-5"
|
|
||||||
"helm.sh/hook-delete-policy": before-hook-creation
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-init-job
|
|
||||||
annotations:
|
|
||||||
checksum/config: {{ include (print $.Template.BasePath "/init-script.yaml") . | sha256sum }}
|
|
||||||
spec:
|
|
||||||
restartPolicy: Never
|
|
||||||
containers:
|
|
||||||
- name: postgres
|
|
||||||
image: ghcr.io/cloudnative-pg/postgresql:15.3
|
|
||||||
command:
|
|
||||||
- bash
|
|
||||||
- /scripts/init.sh
|
|
||||||
env:
|
|
||||||
- name: PGUSER
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ .Release.Name }}-postgres-superuser
|
|
||||||
key: username
|
|
||||||
- name: PGPASSWORD
|
|
||||||
valueFrom:
|
|
||||||
secretKeyRef:
|
|
||||||
name: {{ .Release.Name }}-postgres-superuser
|
|
||||||
key: password
|
|
||||||
- name: PGHOST
|
|
||||||
value: {{ .Release.Name }}-postgres-rw
|
|
||||||
- name: PGPORT
|
|
||||||
value: "5432"
|
|
||||||
- name: PGDATABASE
|
|
||||||
value: postgres
|
|
||||||
securityContext:
|
|
||||||
allowPrivilegeEscalation: false
|
|
||||||
capabilities:
|
|
||||||
drop:
|
|
||||||
- ALL
|
|
||||||
privileged: false
|
|
||||||
readOnlyRootFilesystem: true
|
|
||||||
runAsNonRoot: true
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /etc/secret
|
|
||||||
name: secret
|
|
||||||
- mountPath: /scripts
|
|
||||||
name: scripts
|
|
||||||
securityContext:
|
|
||||||
fsGroup: 26
|
|
||||||
runAsGroup: 26
|
|
||||||
runAsNonRoot: true
|
|
||||||
runAsUser: 26
|
|
||||||
seccompProfile:
|
|
||||||
type: RuntimeDefault
|
|
||||||
volumes:
|
|
||||||
- name: secret
|
|
||||||
secret:
|
|
||||||
secretName: {{ .Release.Name }}-postgres-superuser
|
|
||||||
- name: scripts
|
|
||||||
secret:
|
|
||||||
secretName: {{ .Release.Name }}-init-script
|
|
||||||
@@ -1,131 +0,0 @@
|
|||||||
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
|
|
||||||
{{- $passwords := dict }}
|
|
||||||
|
|
||||||
{{- with (index $existingSecret "data") }}
|
|
||||||
{{- range $k, $v := . }}
|
|
||||||
{{- $_ := set $passwords $k (b64dec $v) }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{- range $user, $u := .Values.users }}
|
|
||||||
{{- if $u.password }}
|
|
||||||
{{- $_ := set $passwords $user $u.password }}
|
|
||||||
{{- else if not (index $passwords $user) }}
|
|
||||||
{{- $_ := set $passwords $user (randAlphaNum 16) }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
{{- if .Values.users }}
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-credentials
|
|
||||||
stringData:
|
|
||||||
{{- range $user, $u := .Values.users }}
|
|
||||||
{{ quote $user }}: {{ quote (index $passwords $user) }}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Secret
|
|
||||||
metadata:
|
|
||||||
name: {{ .Release.Name }}-init-script
|
|
||||||
stringData:
|
|
||||||
init.sh: |
|
|
||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
until pg_isready ; do sleep 5; done
|
|
||||||
|
|
||||||
echo "== create users"
|
|
||||||
{{- if .Values.users }}
|
|
||||||
psql -v ON_ERROR_STOP=1 <<\EOT
|
|
||||||
{{- range $user, $u := .Values.users }}
|
|
||||||
SELECT 'CREATE ROLE {{ $user }} LOGIN INHERIT;'
|
|
||||||
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ $user }}')\gexec
|
|
||||||
ALTER ROLE {{ $user }} WITH PASSWORD '{{ index $passwords $user }}' LOGIN INHERIT {{ ternary "REPLICATION" "NOREPLICATION" (default false $u.replication) }};
|
|
||||||
COMMENT ON ROLE {{ $user }} IS 'user managed by helm';
|
|
||||||
{{- end }}
|
|
||||||
EOT
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
echo "== delete users"
|
|
||||||
MANAGED_USERS=$(echo '\du+' | psql | awk -F'|' '$4 == " user managed by helm" {print $1}' | awk NF=NF RS= OFS=' ')
|
|
||||||
DEFINED_USERS="{{ join " " (keys .Values.users) }}"
|
|
||||||
DELETE_USERS=$(for user in $MANAGED_USERS; do case " $DEFINED_USERS " in *" $user "*) :;; *) echo $user;; esac; done)
|
|
||||||
|
|
||||||
echo "users to delete: $DELETE_USERS"
|
|
||||||
for user in $DELETE_USERS; do
|
|
||||||
# https://stackoverflow.com/a/51257346/2931267
|
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all <<EOT
|
|
||||||
REASSIGN OWNED BY $user TO postgres;
|
|
||||||
DROP OWNED BY $user;
|
|
||||||
DROP USER $user;
|
|
||||||
EOT
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "== create roles"
|
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
|
||||||
SELECT 'CREATE ROLE app_admin NOINHERIT;'
|
|
||||||
WHERE NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'app_admin')\gexec
|
|
||||||
COMMENT ON ROLE app_admin IS 'role managed by helm';
|
|
||||||
EOT
|
|
||||||
|
|
||||||
echo "== grant privileges on databases to roles"
|
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
|
|
||||||
ALTER DATABASE app OWNER TO app_admin;
|
|
||||||
|
|
||||||
DO $$
|
|
||||||
DECLARE
|
|
||||||
schema_record record;
|
|
||||||
BEGIN
|
|
||||||
-- Loop over all schemas
|
|
||||||
FOR schema_record IN SELECT schema_name FROM information_schema.schemata WHERE schema_name NOT IN ('pg_catalog', 'information_schema') LOOP
|
|
||||||
-- Changing Schema Ownership
|
|
||||||
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', schema_record.schema_name, 'app_admin');
|
|
||||||
|
|
||||||
-- Add rights for the admin role
|
|
||||||
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
|
||||||
EXECUTE format('GRANT ALL ON ALL TABLES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
|
||||||
EXECUTE format('GRANT ALL ON ALL SEQUENCES IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
|
||||||
EXECUTE format('GRANT ALL ON ALL FUNCTIONS IN SCHEMA %I TO %I', schema_record.schema_name, 'app_admin');
|
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', schema_record.schema_name, 'app_admin');
|
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', schema_record.schema_name, 'app_admin');
|
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', schema_record.schema_name, 'app_admin');
|
|
||||||
END LOOP;
|
|
||||||
END$$;
|
|
||||||
EOT
|
|
||||||
|
|
||||||
echo "== setup event trigger for schema creation"
|
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all -d "app" <<\EOT
|
|
||||||
CREATE OR REPLACE FUNCTION auto_grant_schema_privileges()
|
|
||||||
RETURNS event_trigger LANGUAGE plpgsql AS $$
|
|
||||||
DECLARE
|
|
||||||
obj record;
|
|
||||||
BEGIN
|
|
||||||
FOR obj IN SELECT * FROM pg_event_trigger_ddl_commands() WHERE command_tag = 'CREATE SCHEMA' LOOP
|
|
||||||
-- Set owner for schema
|
|
||||||
EXECUTE format('ALTER SCHEMA %I OWNER TO %I', obj.object_identity, 'app_admin');
|
|
||||||
|
|
||||||
-- Set privileges for admin role
|
|
||||||
EXECUTE format('GRANT ALL ON SCHEMA %I TO %I', obj.object_identity, 'app_admin');
|
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON TABLES TO %I', obj.object_identity, 'app_admin');
|
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON SEQUENCES TO %I', obj.object_identity, 'app_admin');
|
|
||||||
EXECUTE format('ALTER DEFAULT PRIVILEGES IN SCHEMA %I GRANT ALL ON FUNCTIONS TO %I', obj.object_identity, 'app_admin');
|
|
||||||
END LOOP;
|
|
||||||
END;
|
|
||||||
$$;
|
|
||||||
|
|
||||||
DROP EVENT TRIGGER IF EXISTS trigger_auto_grant;
|
|
||||||
CREATE EVENT TRIGGER trigger_auto_grant ON ddl_command_end
|
|
||||||
WHEN TAG IN ('CREATE SCHEMA')
|
|
||||||
EXECUTE PROCEDURE auto_grant_schema_privileges();
|
|
||||||
EOT
|
|
||||||
|
|
||||||
echo "== assign roles to users"
|
|
||||||
psql -v ON_ERROR_STOP=1 --echo-all <<\EOT
|
|
||||||
GRANT app_admin TO app;
|
|
||||||
{{- range $user, $u := $.Values.users }}
|
|
||||||
GRANT app_admin TO {{ $user }};
|
|
||||||
{{- end }}
|
|
||||||
EOT
|
|
||||||
@@ -5,24 +5,78 @@ metadata:
|
|||||||
name: {{ .Release.Name }}-postgres
|
name: {{ .Release.Name }}-postgres
|
||||||
spec:
|
spec:
|
||||||
instances: {{ .Values.replicas }}
|
instances: {{ .Values.replicas }}
|
||||||
|
{{- if .Values.backup.enabled }}
|
||||||
|
backup:
|
||||||
|
barmanObjectStore:
|
||||||
|
destinationPath: {{ .Values.backup.destinationPath }}
|
||||||
|
endpointURL: {{ .Values.backup.endpointURL }}
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: {{ .Release.Name }}-s3-creds
|
||||||
|
key: AWS_ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: {{ .Release.Name }}-s3-creds
|
||||||
|
key: AWS_SECRET_ACCESS_KEY
|
||||||
|
retentionPolicy: {{ .Values.backup.retentionPolicy }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
bootstrap:
|
||||||
|
initdb:
|
||||||
|
postInitSQL:
|
||||||
|
- 'CREATE EXTENSION IF NOT EXISTS documentdb CASCADE;'
|
||||||
|
{{- if .Values.bootstrap.enabled }}
|
||||||
|
recovery:
|
||||||
|
source: {{ .Values.bootstrap.oldName }}
|
||||||
|
{{- if .Values.bootstrap.recoveryTime }}
|
||||||
|
recoveryTarget:
|
||||||
|
targetTime: {{ .Values.bootstrap.recoveryTime }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.bootstrap.enabled }}
|
||||||
|
externalClusters:
|
||||||
|
- name: {{ .Values.bootstrap.oldName }}
|
||||||
|
barmanObjectStore:
|
||||||
|
destinationPath: {{ .Values.backup.destinationPath }}
|
||||||
|
endpointURL: {{ .Values.backup.endpointURL }}
|
||||||
|
s3Credentials:
|
||||||
|
accessKeyId:
|
||||||
|
name: {{ .Release.Name }}-s3-creds
|
||||||
|
key: AWS_ACCESS_KEY_ID
|
||||||
|
secretAccessKey:
|
||||||
|
name: {{ .Release.Name }}-s3-creds
|
||||||
|
key: AWS_SECRET_ACCESS_KEY
|
||||||
|
{{- end }}
|
||||||
|
imageName: ghcr.io/ferretdb/postgres-documentdb:17-0.105.0-ferretdb-2.4.0
|
||||||
|
postgresUID: 999
|
||||||
|
postgresGID: 999
|
||||||
enableSuperuserAccess: true
|
enableSuperuserAccess: true
|
||||||
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
{{- $configMap := lookup "v1" "ConfigMap" "cozy-system" "cozystack-scheduling" }}
|
||||||
{{- if $configMap }}
|
{{- if $configMap }}
|
||||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||||
{{- if $rawConstraints }}
|
{{- if $rawConstraints }}
|
||||||
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||||
|
labelSelector:
|
||||||
|
matchLabels:
|
||||||
|
cnpg.io/cluster: {{ .Release.Name }}-postgres
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||||
{{- if .Values.resources }}
|
resources: {{- include "cozy-lib.resources.defaultingSanitize" (list .Values.resourcesPreset .Values.resources $) | nindent 4 }}
|
||||||
resources: {{- toYaml .Values.resources | nindent 4 }}
|
|
||||||
{{- else if ne .Values.resourcesPreset "none" }}
|
|
||||||
resources: {{- include "resources.preset" (dict "type" .Values.resourcesPreset "Release" .Release) | nindent 4 }}
|
|
||||||
{{- end }}
|
|
||||||
monitoring:
|
monitoring:
|
||||||
enablePodMonitor: true
|
enablePodMonitor: true
|
||||||
|
|
||||||
|
postgresql:
|
||||||
|
shared_preload_libraries:
|
||||||
|
- pg_cron
|
||||||
|
- pg_documentdb_core
|
||||||
|
- pg_documentdb
|
||||||
|
parameters:
|
||||||
|
cron.database_name: 'postgres'
|
||||||
|
pg_hba:
|
||||||
|
- host postgres postgres 127.0.0.1/32 trust
|
||||||
|
- host postgres postgres ::1/128 trust
|
||||||
|
|
||||||
storage:
|
storage:
|
||||||
size: {{ required ".Values.size is required" .Values.size }}
|
size: {{ required ".Values.size is required" .Values.size }}
|
||||||
{{- with .Values.storageClass }}
|
{{- with .Values.storageClass }}
|
||||||
@@ -32,6 +86,7 @@ spec:
|
|||||||
inheritedMetadata:
|
inheritedMetadata:
|
||||||
labels:
|
labels:
|
||||||
policy.cozystack.io/allow-to-apiserver: "true"
|
policy.cozystack.io/allow-to-apiserver: "true"
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
|
||||||
{{- if .Values.users }}
|
{{- if .Values.users }}
|
||||||
managed:
|
managed:
|
||||||
@@ -42,8 +97,6 @@ spec:
|
|||||||
passwordSecret:
|
passwordSecret:
|
||||||
name: {{ printf "%s-user-%s" $.Release.Name $user }}
|
name: {{ printf "%s-user-%s" $.Release.Name $user }}
|
||||||
login: true
|
login: true
|
||||||
inRoles:
|
|
||||||
- app
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user