mirror of
https://github.com/outbackdingo/cozystack.git
synced 2026-01-28 18:18:41 +00:00
Compare commits
599 Commits
cilium-dis
...
release-0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4369b03141 | ||
|
|
baefc78bfe | ||
|
|
4f11814551 | ||
|
|
307b5617f0 | ||
|
|
7cf0ce1abf | ||
|
|
5602e9753f | ||
|
|
ab20502b37 | ||
|
|
8369fcddbf | ||
|
|
9f9ca50dd9 | ||
|
|
e7681debe2 | ||
|
|
36b10341ca | ||
|
|
0c234e400b | ||
|
|
c0b7f4e938 | ||
|
|
654778a0c7 | ||
|
|
86fdb51236 | ||
|
|
e8b83fbbda | ||
|
|
29f26f4dd0 | ||
|
|
a0526be17d | ||
|
|
4e41c133b4 | ||
|
|
587904e8cc | ||
|
|
6358fd7a45 | ||
|
|
af595f34dc | ||
|
|
2832058036 | ||
|
|
b9d3b43c3e | ||
|
|
bd0bc64c2a | ||
|
|
2dd62f052e | ||
|
|
778577e0d5 | ||
|
|
8568b9925f | ||
|
|
46ad1b1cd8 | ||
|
|
066ed77918 | ||
|
|
c7be1a5572 | ||
|
|
439e927f6b | ||
|
|
c354d5adc6 | ||
|
|
5ffe11dfc6 | ||
|
|
37a8bfaa06 | ||
|
|
0b03768482 | ||
|
|
620d626887 | ||
|
|
4e2a081c8b | ||
|
|
fa09845ef9 | ||
|
|
a2a79cb5d9 | ||
|
|
7f7cb019e6 | ||
|
|
ba74f397f5 | ||
|
|
7c45335abb | ||
|
|
ae13b58d5f | ||
|
|
3c7f7d1127 | ||
|
|
f0fc3238ca | ||
|
|
b3380d8365 | ||
|
|
d97d6cb81d | ||
|
|
b2a697f98d | ||
|
|
6e6a05d11e | ||
|
|
5d76294ff0 | ||
|
|
62a6da0063 | ||
|
|
6a8530a00a | ||
|
|
b3b40dcf9c | ||
|
|
4479ed5e95 | ||
|
|
b16e73ad42 | ||
|
|
4631f85114 | ||
|
|
746641e523 | ||
|
|
e848dde422 | ||
|
|
3ce6dbe850 | ||
|
|
8d5007919f | ||
|
|
08e569918b | ||
|
|
6498000721 | ||
|
|
8486e6b3aa | ||
|
|
3f6b6798f4 | ||
|
|
c1b928b8ef | ||
|
|
c2e8fba483 | ||
|
|
62cb694d72 | ||
|
|
c619343aa2 | ||
|
|
75ad26989d | ||
|
|
c4fc8c18df | ||
|
|
8663dc940f | ||
|
|
cf983a8f9c | ||
|
|
ad6aa0ca94 | ||
|
|
9dc5d62f47 | ||
|
|
3b8a9f9d2c | ||
|
|
ab9926a177 | ||
|
|
f83741eb09 | ||
|
|
028f2e4e8d | ||
|
|
255fa8cbe1 | ||
|
|
b42f5cdc01 | ||
|
|
74633ad699 | ||
|
|
980185ca2b | ||
|
|
8eabe30548 | ||
|
|
0c9c688e6d | ||
|
|
908c75927e | ||
|
|
0a1f078384 | ||
|
|
6a713e5eb4 | ||
|
|
8f0a28bad5 | ||
|
|
0fa70d9d38 | ||
|
|
b14c82d606 | ||
|
|
8e79f24c5b | ||
|
|
3266a5514e | ||
|
|
0c37323a15 | ||
|
|
10af98e158 | ||
|
|
632224a30a | ||
|
|
e8d11e64a6 | ||
|
|
27c7a2feb5 | ||
|
|
9555386bd7 | ||
|
|
9733de38a3 | ||
|
|
775a05cc3a | ||
|
|
4e5cc2ae61 | ||
|
|
32adf5ab38 | ||
|
|
28302e776e | ||
|
|
911ca64de0 | ||
|
|
045ea76539 | ||
|
|
cee820e82c | ||
|
|
6183b715b7 | ||
|
|
2669ab6072 | ||
|
|
96506c7cce | ||
|
|
7bb70c839e | ||
|
|
ba97a4593c | ||
|
|
c467ed798a | ||
|
|
ed881f0741 | ||
|
|
0e0dabdd08 | ||
|
|
bd8f8bde95 | ||
|
|
646dab497c | ||
|
|
dc3b61d164 | ||
|
|
4479a038cd | ||
|
|
dfd01ff118 | ||
|
|
d2bb66db31 | ||
|
|
7af97e2d9f | ||
|
|
ac5145be87 | ||
|
|
4779db2dda | ||
|
|
25c2774bc8 | ||
|
|
bbee8103eb | ||
|
|
730ea4d5ef | ||
|
|
13fccdc465 | ||
|
|
f1b66c80f6 | ||
|
|
f34f140d49 | ||
|
|
520fbfb2e4 | ||
|
|
25016580c1 | ||
|
|
f10f8455fc | ||
|
|
974581d39b | ||
|
|
7e24297913 | ||
|
|
b6142cd4f5 | ||
|
|
e87994c769 | ||
|
|
b140f1b57f | ||
|
|
64936021d2 | ||
|
|
a887e19e6c | ||
|
|
92b97a569e | ||
|
|
0e22358b30 | ||
|
|
7429daf99c | ||
|
|
b470b82e2a | ||
|
|
a0700e7399 | ||
|
|
228e1983bc | ||
|
|
7023abdba7 | ||
|
|
1b43a5f160 | ||
|
|
20f4066c16 | ||
|
|
ea0dd68e84 | ||
|
|
e0c3d2324f | ||
|
|
cb303d694c | ||
|
|
6130f43d06 | ||
|
|
4db55ac5eb | ||
|
|
bfd20a5e0e | ||
|
|
977141bed3 | ||
|
|
c4f8d6a251 | ||
|
|
9633ca4d25 | ||
|
|
f798cbd9f9 | ||
|
|
cf87779f7b | ||
|
|
c69135e0e5 | ||
|
|
a9c3a4c601 | ||
|
|
d1081c86b3 | ||
|
|
beadc80778 | ||
|
|
5bbb5a6266 | ||
|
|
0664370218 | ||
|
|
225d103509 | ||
|
|
0e22e3c12c | ||
|
|
7b8e7e40ce | ||
|
|
c941e487fb | ||
|
|
8386e985f2 | ||
|
|
e4c944488f | ||
|
|
99a7754c00 | ||
|
|
6cbfab9b2a | ||
|
|
461f756c88 | ||
|
|
50932ba49e | ||
|
|
f9f8bb2f11 | ||
|
|
2ae8f2aa19 | ||
|
|
1a872ca95c | ||
|
|
3e379e9697 | ||
|
|
7746974644 | ||
|
|
d989a8865d | ||
|
|
4aad0fc8f2 | ||
|
|
0e5ac5ed7c | ||
|
|
c267c7eb9a | ||
|
|
7792e29065 | ||
|
|
d35ff17de8 | ||
|
|
3a7d4c24ee | ||
|
|
ff2638ef66 | ||
|
|
bc294a0fe6 | ||
|
|
bf5bccb7d9 | ||
|
|
f00364037e | ||
|
|
e83bf379ba | ||
|
|
ae0549f78b | ||
|
|
74e7e5cdfb | ||
|
|
2bf4032d5b | ||
|
|
ee1763cb85 | ||
|
|
d497be9e95 | ||
|
|
6176a18a12 | ||
|
|
5789f12f3f | ||
|
|
6279873a35 | ||
|
|
79c441acb7 | ||
|
|
7864811016 | ||
|
|
13938f34fd | ||
|
|
9fb6b41e03 | ||
|
|
a8ba6b1328 | ||
|
|
9592f7fe46 | ||
|
|
119d582379 | ||
|
|
451267310b | ||
|
|
0fee3f280b | ||
|
|
2461fcd531 | ||
|
|
866b6e0a5a | ||
|
|
1dccf96506 | ||
|
|
bca27dcfdc | ||
|
|
5407ee01ee | ||
|
|
fc8b52d73d | ||
|
|
609e7ede86 | ||
|
|
289853e661 | ||
|
|
013e1936ec | ||
|
|
893818f64e | ||
|
|
56bdaae2c9 | ||
|
|
775ecb7b11 | ||
|
|
8d74a35e9c | ||
|
|
b753fd9fa8 | ||
|
|
9698f3c9f4 | ||
|
|
31b110cd39 | ||
|
|
b4da00f96f | ||
|
|
0369852035 | ||
|
|
115497b73f | ||
|
|
4f78b133c2 | ||
|
|
d550a67f19 | ||
|
|
8e6941dfbd | ||
|
|
c54567ab45 | ||
|
|
dd592ca676 | ||
|
|
5273722769 | ||
|
|
fb26e3e9b7 | ||
|
|
5e0b0167fc | ||
|
|
73fdc5ded7 | ||
|
|
5fe7b3bf16 | ||
|
|
4ecf492cd4 | ||
|
|
c42a50229f | ||
|
|
6f55a66328 | ||
|
|
9d551cc69b | ||
|
|
93b8dbb9ab | ||
|
|
8ad010d331 | ||
|
|
404579c361 | ||
|
|
f8210cf276 | ||
|
|
545e256695 | ||
|
|
e9c463c867 | ||
|
|
798ca12e43 | ||
|
|
3780925a68 | ||
|
|
a240c0b6ed | ||
|
|
de1b38c64b | ||
|
|
15d7b6d99e | ||
|
|
9377f55000 | ||
|
|
d002879b0b | ||
|
|
2c6338a2ef | ||
|
|
fd72d7c486 | ||
|
|
db34f31175 | ||
|
|
653e2bc774 | ||
|
|
31ea5eeeb2 | ||
|
|
4a2c67e045 | ||
|
|
68fb7570f7 | ||
|
|
56fc08fab4 | ||
|
|
b00ba53171 | ||
|
|
4dd52290ea | ||
|
|
492aff5265 | ||
|
|
395cdc3af1 | ||
|
|
e6f3000b3c | ||
|
|
e21c38c103 | ||
|
|
7a7512da30 | ||
|
|
58b5f6610d | ||
|
|
e81053f7dd | ||
|
|
424aab4a83 | ||
|
|
77e6db3381 | ||
|
|
f6e3188ab8 | ||
|
|
1ca0594060 | ||
|
|
ac59b4540b | ||
|
|
d0bd4b1329 | ||
|
|
ccbcaf6331 | ||
|
|
1ad1b15a5b | ||
|
|
2349ff61c1 | ||
|
|
13139dd71d | ||
|
|
57ac614865 | ||
|
|
bbb93c647d | ||
|
|
951ba75d93 | ||
|
|
15c9c4a068 | ||
|
|
57fefde732 | ||
|
|
b4a04df6f3 | ||
|
|
1e63b5e8ce | ||
|
|
6ad30915eb | ||
|
|
557ffa536f | ||
|
|
ae05d2f545 | ||
|
|
563c643813 | ||
|
|
68c85ac9ef | ||
|
|
3ac00ea4ec | ||
|
|
29b49496f2 | ||
|
|
3c27192d3e | ||
|
|
dca732cde0 | ||
|
|
0346dc05bb | ||
|
|
a03cdeff04 | ||
|
|
062d72805a | ||
|
|
70fed8148d | ||
|
|
12c6df83f5 | ||
|
|
f61a7817e6 | ||
|
|
c482289b14 | ||
|
|
1e59e5fbb6 | ||
|
|
6106a9fe51 | ||
|
|
ec9e26c054 | ||
|
|
108fc647ea | ||
|
|
a9b235048d | ||
|
|
e1c14619d2 | ||
|
|
f644bf20c5 | ||
|
|
93bdf41144 | ||
|
|
bacf15f037 | ||
|
|
9239852ec8 | ||
|
|
87a286fc74 | ||
|
|
6d253b937b | ||
|
|
255176c321 | ||
|
|
fa341deaac | ||
|
|
f08566d3f1 | ||
|
|
a29040faf7 | ||
|
|
637551eb33 | ||
|
|
58d959b305 | ||
|
|
fcc7056e5c | ||
|
|
5d7e56bffe | ||
|
|
69b3ddf717 | ||
|
|
79b5c6b5af | ||
|
|
076128c783 | ||
|
|
894cb14d49 | ||
|
|
a0935e9ae4 | ||
|
|
f2c248acbd | ||
|
|
590f14a614 | ||
|
|
4c8dba880a | ||
|
|
de0c7b94f4 | ||
|
|
2682a6e674 | ||
|
|
e3e0b21612 | ||
|
|
455d66fbe4 | ||
|
|
7db7277636 | ||
|
|
7be5db8cff | ||
|
|
249950d94b | ||
|
|
44565dca88 | ||
|
|
cefcd24ebb | ||
|
|
13d7df47d7 | ||
|
|
1ccd3074dc | ||
|
|
70d3591ed2 | ||
|
|
700991f4fa | ||
|
|
d89acbf44d | ||
|
|
59ef3296f0 | ||
|
|
3ed0cdee1c | ||
|
|
9f5230a342 | ||
|
|
b895ccfdeb | ||
|
|
d54a407d68 | ||
|
|
f9ec630509 | ||
|
|
3f47181c10 | ||
|
|
19409d801d | ||
|
|
8a4793d571 | ||
|
|
0fc3fdcb3d | ||
|
|
04e2b3952b | ||
|
|
b56624a781 | ||
|
|
07d7fadb1a | ||
|
|
8db92d53d1 | ||
|
|
7537235f43 | ||
|
|
4bb524e53d | ||
|
|
e7ded52f93 | ||
|
|
8547dc3b21 | ||
|
|
c22603bf7e | ||
|
|
89525dedb5 | ||
|
|
1c53a6f9f6 | ||
|
|
16ee0f2c3a | ||
|
|
72d0394475 | ||
|
|
0a998c8b49 | ||
|
|
7bfad655c2 | ||
|
|
e81cbf780c | ||
|
|
e8cc44450a | ||
|
|
d3a8a4a7de | ||
|
|
fc2c5a0f6b | ||
|
|
0f8b8e1744 | ||
|
|
197434ff94 | ||
|
|
703073a164 | ||
|
|
6a0fc64475 | ||
|
|
f1624353ef | ||
|
|
277b438f68 | ||
|
|
405863cb11 | ||
|
|
63ebab5c2a | ||
|
|
0ddaff9380 | ||
|
|
a6b02bf381 | ||
|
|
39ede77fec | ||
|
|
e505857832 | ||
|
|
d8f3547db7 | ||
|
|
6d8a99269b | ||
|
|
b9112a398e | ||
|
|
719fdd29cc | ||
|
|
9e1376f709 | ||
|
|
7a9a1fcba4 | ||
|
|
2def9f4e83 | ||
|
|
c1046aae6a | ||
|
|
53cf1c537c | ||
|
|
ccedcb7419 | ||
|
|
f94a01febd | ||
|
|
495e584313 | ||
|
|
172e660cd1 | ||
|
|
14262cdd2a | ||
|
|
80576cb757 | ||
|
|
fde6e9cc73 | ||
|
|
57ca60c5a5 | ||
|
|
1d0ee15948 | ||
|
|
eeaa1b4517 | ||
|
|
a14bcf98dd | ||
|
|
be84fc6e4e | ||
|
|
73a3f481bc | ||
|
|
5903bbc64a | ||
|
|
f204809e43 | ||
|
|
fe4806ce49 | ||
|
|
8f535acc3f | ||
|
|
53cbb4ae12 | ||
|
|
4e9446d934 | ||
|
|
acbfb6ad64 | ||
|
|
8570449080 | ||
|
|
ffe6109dfb | ||
|
|
7dbb8a1d75 | ||
|
|
86210c1fc1 | ||
|
|
e96f15773d | ||
|
|
bc5635dd8e | ||
|
|
5d71c90f0a | ||
|
|
05d6ab9516 | ||
|
|
ccb001ee97 | ||
|
|
5a5cf91742 | ||
|
|
6a0d4913f2 | ||
|
|
685e50bf6c | ||
|
|
f90fc6f681 | ||
|
|
d8f3f2dee1 | ||
|
|
da8100965f | ||
|
|
6d2ea1295e | ||
|
|
d7914ff9aa | ||
|
|
7f4af5ebbc | ||
|
|
8f575c455c | ||
|
|
819166eb35 | ||
|
|
f507802ec9 | ||
|
|
62267811cb | ||
|
|
12bedef2d3 | ||
|
|
fd240701f8 | ||
|
|
60b96e0a62 | ||
|
|
1d377bab9d | ||
|
|
799690dc07 | ||
|
|
aa02d0c5e6 | ||
|
|
6b8ecf3953 | ||
|
|
e5b81f367e | ||
|
|
ba4798464d | ||
|
|
655e8be382 | ||
|
|
fd9a5b0d7b | ||
|
|
d09314bbb5 | ||
|
|
371791215a | ||
|
|
4c220bb443 | ||
|
|
e27611d45a | ||
|
|
a3e647c547 | ||
|
|
be52fe5461 | ||
|
|
1d639fda0d | ||
|
|
bbdde79428 | ||
|
|
1966f86120 | ||
|
|
fa7c98bc38 | ||
|
|
6671869acc | ||
|
|
434c5d1b9c | ||
|
|
cc9abfe03f | ||
|
|
e02fd14a3c | ||
|
|
559eb8dea9 | ||
|
|
9e6478b9c9 | ||
|
|
3a295c4474 | ||
|
|
f8dfc43cae | ||
|
|
3e19bc74d4 | ||
|
|
2966922c0b | ||
|
|
991c7e1943 | ||
|
|
c31a7710ad | ||
|
|
f4cace093c | ||
|
|
01e417d436 | ||
|
|
261ce4278f | ||
|
|
785898b507 | ||
|
|
47a2cf7cd5 | ||
|
|
1f19793613 | ||
|
|
a0df2989af | ||
|
|
bdb538ab42 | ||
|
|
c844a4fb2b | ||
|
|
fea142774a | ||
|
|
4b575299bc | ||
|
|
4eec016f7d | ||
|
|
4078b21ac6 | ||
|
|
1721d397a7 | ||
|
|
558a0572f5 | ||
|
|
d60b81c8a0 | ||
|
|
cc14c1fbab | ||
|
|
80aee1354b | ||
|
|
332d69259b | ||
|
|
9ad6b0d726 | ||
|
|
ea9df9e371 | ||
|
|
d69a9c4862 | ||
|
|
6270a11bb1 | ||
|
|
18726483a6 | ||
|
|
aed184f6ef | ||
|
|
f688a57132 | ||
|
|
e954ab7f8b | ||
|
|
c9c8235c64 | ||
|
|
8e2e77da56 | ||
|
|
1e27dedde5 | ||
|
|
e947805c15 | ||
|
|
7a1c3b6209 | ||
|
|
49b5b510ee | ||
|
|
3cf850c2c4 | ||
|
|
1fbbfcd063 | ||
|
|
de19450f44 | ||
|
|
09c94cc1a0 | ||
|
|
da301373fa | ||
|
|
1f558baa9b | ||
|
|
3c511023f3 | ||
|
|
d10a9ad4e6 | ||
|
|
9ff9f8f601 | ||
|
|
05a1099fd0 | ||
|
|
b2980afcd1 | ||
|
|
6980dc59c5 | ||
|
|
a9c8133fd4 | ||
|
|
065abdd95a | ||
|
|
cd8c6a8b9a | ||
|
|
459673f764 | ||
|
|
c795e4fb68 | ||
|
|
7c98248e45 | ||
|
|
16c771aa77 | ||
|
|
d971f2ff29 | ||
|
|
ead0cca5af | ||
|
|
47ff861f00 | ||
|
|
dbc1fb8a09 | ||
|
|
d9c6fb7625 | ||
|
|
cd23a30e76 | ||
|
|
f4fba7924b | ||
|
|
01b3a82ee2 | ||
|
|
0403f30cd6 | ||
|
|
116196b4d4 | ||
|
|
bdc3525c9b | ||
|
|
9a0f459655 | ||
|
|
59aac50ebd | ||
|
|
5f1b14ab53 | ||
|
|
5c900a7467 | ||
|
|
cc9abbc505 | ||
|
|
c66eb9f94c | ||
|
|
0045ddc757 | ||
|
|
209a3ef181 | ||
|
|
92e2173fa5 | ||
|
|
2d997a4f8d | ||
|
|
b1baaa7d98 | ||
|
|
869bd4f12c | ||
|
|
f6d4541db3 | ||
|
|
5729666e72 | ||
|
|
aa3a36831c | ||
|
|
1e03ba4a02 | ||
|
|
d12dd0e117 | ||
|
|
077045b094 | ||
|
|
85ec09b8de | ||
|
|
e0a63c32b0 | ||
|
|
a2af07d1dc | ||
|
|
0cb9e72f99 | ||
|
|
b4584b4d17 | ||
|
|
ea3b092128 | ||
|
|
728743db07 | ||
|
|
3d03b22775 | ||
|
|
d2210df9ec | ||
|
|
423514b338 | ||
|
|
0e10f95293 | ||
|
|
478a3f5191 | ||
|
|
750e452abc | ||
|
|
d266bde1ad | ||
|
|
e904f6bb16 | ||
|
|
2c33c4d78c | ||
|
|
cc2a1db651 | ||
|
|
d18f4d311f | ||
|
|
588c491f4c | ||
|
|
abf4ea129c | ||
|
|
5778a68501 | ||
|
|
3d962685ce | ||
|
|
99a53b8f9a | ||
|
|
e7fa940139 | ||
|
|
e836b62a1e | ||
|
|
023058d3a1 | ||
|
|
9daf4c90df | ||
|
|
618b04e634 | ||
|
|
8a030058eb | ||
|
|
a6a0752feb | ||
|
|
6354b564b4 | ||
|
|
40aa65caba | ||
|
|
fd5ab80f2e | ||
|
|
aa084b4635 | ||
|
|
16e40372bd | ||
|
|
b1a969b1d0 | ||
|
|
b4973945eb | ||
|
|
8267072da2 | ||
|
|
8dd8a718a7 | ||
|
|
63358e3e6c | ||
|
|
063439ac94 | ||
|
|
a7425b0caf | ||
|
|
9ad81f2577 | ||
|
|
485b1dffb7 | ||
|
|
1877f17ca1 |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1 +1 @@
|
||||
* @kvaps @lllamnyp
|
||||
* @kvaps @lllamnyp @klinch0
|
||||
|
||||
53
.github/workflows/backport.yaml
vendored
Normal file
53
.github/workflows/backport.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Automatic Backport
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed] # fires when PR is closed (merged)
|
||||
|
||||
concurrency:
|
||||
group: backport-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
backport:
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'backport')
|
||||
runs-on: [self-hosted]
|
||||
|
||||
steps:
|
||||
# 1. Decide which maintenance branch should receive the back‑port
|
||||
- name: Determine target maintenance branch
|
||||
id: target
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
let rel;
|
||||
try {
|
||||
rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
} catch (e) {
|
||||
core.setFailed('No existing releases found; cannot determine backport target.');
|
||||
return;
|
||||
}
|
||||
const [maj, min] = rel.data.tag_name.replace(/^v/, '').split('.');
|
||||
const branch = `release-${maj}.${min}`;
|
||||
core.setOutput('branch', branch);
|
||||
console.log(`Latest release ${rel.data.tag_name}; backporting to ${branch}`);
|
||||
# 2. Checkout (required by backport‑action)
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# 3. Create the back‑port pull request
|
||||
- name: Create back‑port PR
|
||||
uses: korthout/backport-action@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
label_pattern: '' # don't read labels for targets
|
||||
target_branches: ${{ steps.target.outputs.branch }}
|
||||
18
.github/workflows/pre-commit.yml
vendored
18
.github/workflows/pre-commit.yml
vendored
@@ -1,6 +1,12 @@
|
||||
name: Pre-Commit Checks
|
||||
|
||||
on: [push, pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
concurrency:
|
||||
group: pre-commit-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-commit:
|
||||
@@ -8,6 +14,9 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
@@ -21,12 +30,13 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install curl -y
|
||||
curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -
|
||||
sudo apt install nodejs -y
|
||||
git clone https://github.com/bitnami/readme-generator-for-helm
|
||||
sudo apt install npm -y
|
||||
|
||||
git clone --branch 2.7.0 --depth 1 https://github.com/bitnami/readme-generator-for-helm.git
|
||||
cd ./readme-generator-for-helm
|
||||
npm install
|
||||
npm install -g pkg
|
||||
npm install -g @yao-pkg/pkg
|
||||
pkg . -o /usr/local/bin/readme-generator
|
||||
|
||||
- name: Run pre-commit hooks
|
||||
|
||||
169
.github/workflows/pull-requests-release.yaml
vendored
Normal file
169
.github/workflows/pull-requests-release.yaml
vendored
Normal file
@@ -0,0 +1,169 @@
|
||||
name: "Releasing PR"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
|
||||
# Cancel in‑flight runs for the same PR when a new push arrives.
|
||||
concurrency:
|
||||
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
finalize:
|
||||
name: Finalize Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
if: |
|
||||
github.event.pull_request.merged == true &&
|
||||
contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
# Extract tag from branch name (branch = release-X.Y.Z*)
|
||||
- name: Extract tag from branch name
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
const tag = `v${m[1]}`;
|
||||
core.setOutput('tag', tag);
|
||||
console.log(`✅ Tag to publish: ${tag}`);
|
||||
|
||||
# Checkout repo & create / push annotated tag
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create tag on merge commit
|
||||
run: |
|
||||
git tag -f ${{ steps.get_tag.outputs.tag }} ${{ github.sha }}
|
||||
git push -f origin ${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
# Ensure maintenance branch release-X.Y
|
||||
- name: Ensure maintenance branch release-X.Y
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // e.g. v0.1.3 or v0.1.3-rc3
|
||||
const match = tag.match(/^v(\d+)\.(\d+)\.\d+(?:[-\w\.]+)?$/);
|
||||
if (!match) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-suffix'`);
|
||||
return;
|
||||
}
|
||||
const line = `${match[1]}.${match[2]}`;
|
||||
const branch = `release-${line}`;
|
||||
|
||||
// Get main branch commit for the tag
|
||||
const ref = await github.rest.git.getRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `tags/${tag}`
|
||||
});
|
||||
|
||||
const commitSha = ref.data.object.sha;
|
||||
|
||||
try {
|
||||
await github.rest.repos.getBranch({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
branch
|
||||
});
|
||||
|
||||
await github.rest.git.updateRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `heads/${branch}`,
|
||||
sha: commitSha,
|
||||
force: true
|
||||
});
|
||||
console.log(`🔁 Force-updated '${branch}' to ${commitSha}`);
|
||||
} catch (err) {
|
||||
if (err.status === 404) {
|
||||
await github.rest.git.createRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `refs/heads/${branch}`,
|
||||
sha: commitSha
|
||||
});
|
||||
console.log(`✅ Created branch '${branch}' at ${commitSha}`);
|
||||
} else {
|
||||
console.error('Unexpected error --', err);
|
||||
core.setFailed(`Unexpected error creating/updating branch: ${err.message}`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
# Get the latest published release
|
||||
- name: Get the latest published release
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare current tag vs latest using semver-utils
|
||||
- name: Semver compare
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.get_tag.outputs.tag }}
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }}
|
||||
|
||||
# Derive flags: prerelease? make_latest?
|
||||
- name: Calculate publish flags
|
||||
id: flags
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}'; // v0.31.5-rc.1
|
||||
const m = tag.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${tag}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
core.setOutput('is_rc', isRc);
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
core.setOutput('make_latest', isRc || outdated ? 'false' : 'legacy');
|
||||
|
||||
# Publish draft release with correct flags
|
||||
- name: Publish draft release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) throw new Error(`Draft release for ${tag} not found`);
|
||||
await github.rest.repos.updateRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
release_id: draft.id,
|
||||
draft: false,
|
||||
prerelease: ${{ steps.flags.outputs.is_rc }},
|
||||
make_latest: '${{ steps.flags.outputs.make_latest }}'
|
||||
});
|
||||
|
||||
console.log(`🚀 Published release for ${tag}`);
|
||||
269
.github/workflows/pull-requests.yaml
vendored
Normal file
269
.github/workflows/pull-requests.yaml
vendored
Normal file
@@ -0,0 +1,269 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, opened, synchronize, reopened]
|
||||
|
||||
# Cancel in‑flight runs for the same PR when a new push arrives.
|
||||
concurrency:
|
||||
group: pr-${{ github.workflow }}-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
# Never run when the PR carries the "release" label.
|
||||
if: |
|
||||
!contains(github.event.pull_request.labels.*.name, 'release')
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
- name: Build
|
||||
run: make build
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
- name: Build Talos image
|
||||
run: make -C packages/core/installer talos-nocloud
|
||||
|
||||
- name: Upload installer
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets/cozystack-installer.yaml
|
||||
|
||||
- name: Upload Talos image
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets/nocloud-amd64.raw.xz
|
||||
|
||||
resolve_assets:
|
||||
name: "Resolve assets"
|
||||
runs-on: ubuntu-latest
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
outputs:
|
||||
installer_id: ${{ steps.fetch_assets.outputs.installer_id }}
|
||||
disk_id: ${{ steps.fetch_assets.outputs.disk_id }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Extract tag from PR branch (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
id: get_tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const branch = context.payload.pull_request.head.ref;
|
||||
const m = branch.match(/^release-(\d+\.\d+\.\d+(?:[-\w\.]+)?)$/);
|
||||
if (!m) {
|
||||
core.setFailed(`❌ Branch '${branch}' does not match 'release-X.Y.Z[-suffix]'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('tag', `v${m[1]}`);
|
||||
|
||||
- name: Find draft release & asset IDs (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
id: fetch_assets
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PAT }}
|
||||
script: |
|
||||
const tag = '${{ steps.get_tag.outputs.tag }}';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
per_page: 100
|
||||
});
|
||||
const draft = releases.data.find(r => r.tag_name === tag && r.draft);
|
||||
if (!draft) {
|
||||
core.setFailed(`Draft release '${tag}' not found`);
|
||||
return;
|
||||
}
|
||||
const find = (n) => draft.assets.find(a => a.name === n)?.id;
|
||||
const installerId = find('cozystack-installer.yaml');
|
||||
const diskId = find('nocloud-amd64.raw.xz');
|
||||
if (!installerId || !diskId) {
|
||||
core.setFailed('Required assets missing in draft release');
|
||||
return;
|
||||
}
|
||||
core.setOutput('installer_id', installerId);
|
||||
core.setOutput('disk_id', diskId);
|
||||
|
||||
|
||||
prepare_env:
|
||||
name: "Prepare environment"
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
needs: ["build", "resolve_assets"]
|
||||
if: ${{ always() && (needs.build.result == 'success' || needs.resolve_assets.result == 'success') }}
|
||||
|
||||
steps:
|
||||
# ▸ Regular PR path – download artefacts produced by the *build* job
|
||||
- name: "Download Talos image (regular PR)"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: talos-image
|
||||
path: _out/assets
|
||||
|
||||
|
||||
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
||||
- name: Download assets from draft release (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
run: |
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/nocloud-amd64.raw.xz \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.disk_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
# ▸ Start actual job steps
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Prepare workspace
|
||||
run: |
|
||||
cd ..
|
||||
rm -rf /tmp/$SANDBOX_NAME
|
||||
cp -r cozystack /tmp/$SANDBOX_NAME
|
||||
sudo systemctl stop "rm-workspace-$SANDBOX_NAME.timer" "rm-workspace-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl reset-failed "rm-workspace-$SANDBOX_NAME.timer" "rm-workspace-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl daemon-reexec
|
||||
sudo systemd-run \
|
||||
--on-calendar="$(date -d 'now + 24 hours' '+%Y-%m-%d %H:%M:%S')" \
|
||||
--unit=rm-workspace-$SANDBOX_NAME \
|
||||
rm -rf /tmp/$SANDBOX_NAME
|
||||
|
||||
- name: Prepare environment
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make SANDBOX_NAME=$SANDBOX_NAME prepare-env
|
||||
|
||||
install_cozystack:
|
||||
name: "Install Cozystack"
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
needs: ["prepare_env", "resolve_assets"]
|
||||
if: ${{ always() && needs.prepare_env.result == 'success' }}
|
||||
|
||||
steps:
|
||||
- name: Prepare _out/assets directory
|
||||
run: mkdir -p _out/assets
|
||||
|
||||
# ▸ Regular PR path – download artefacts produced by the *build* job
|
||||
- name: "Download installer (regular PR)"
|
||||
if: "!contains(github.event.pull_request.labels.*.name, 'release')"
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: cozystack-installer
|
||||
path: _out/assets
|
||||
|
||||
# ▸ Release PR path – fetch artefacts from the corresponding draft release
|
||||
- name: Download assets from draft release (release PR)
|
||||
if: contains(github.event.pull_request.labels.*.name, 'release')
|
||||
run: |
|
||||
curl -sSL -H "Authorization: token ${GH_PAT}" -H "Accept: application/octet-stream" \
|
||||
-o _out/assets/cozystack-installer.yaml \
|
||||
"https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/assets/${{ needs.resolve_assets.outputs.installer_id }}"
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GH_PAT }}
|
||||
|
||||
# ▸ Start actual job steps
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Install Cozystack into sandbox
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME install-cozystack
|
||||
|
||||
detect_test_matrix:
|
||||
name: "Detect e2e test matrix"
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set.outputs.matrix }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- id: set
|
||||
run: |
|
||||
apps=$(find hack/e2e-apps -maxdepth 1 -mindepth 1 -name '*.bats' | \
|
||||
awk -F/ '{sub(/\..+/, "", $NF); print $NF}' | jq -R . | jq -cs .)
|
||||
echo "matrix={\"app\":$apps}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
test_apps:
|
||||
strategy:
|
||||
matrix: ${{ fromJson(needs.detect_test_matrix.outputs.matrix) }}
|
||||
name: Test ${{ matrix.app }}
|
||||
runs-on: [self-hosted]
|
||||
needs: [install_cozystack,detect_test_matrix]
|
||||
if: ${{ always() && (needs.install_cozystack.result == 'success' && needs.detect_test_matrix.result == 'success') }}
|
||||
|
||||
steps:
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: E2E Apps
|
||||
run: |
|
||||
cd /tmp/$SANDBOX_NAME
|
||||
make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME test-apps-${{ matrix.app }}
|
||||
|
||||
cleanup:
|
||||
name: Tear down environment
|
||||
runs-on: [self-hosted]
|
||||
needs: test_apps
|
||||
if: ${{ always() && needs.test_apps.result == 'success' }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Set sandbox ID
|
||||
run: echo "SANDBOX_NAME=cozy-e2e-sandbox-$(echo "${GITHUB_REPOSITORY}:${GITHUB_WORKFLOW}:${GITHUB_REF}" | sha256sum | cut -c1-10)" >> $GITHUB_ENV
|
||||
|
||||
- name: Tear down sandbox
|
||||
run: make -C packages/core/testing SANDBOX_NAME=$SANDBOX_NAME delete
|
||||
|
||||
- name: Remove workspace
|
||||
run: rm -rf /tmp/$SANDBOX_NAME
|
||||
|
||||
- name: Tear down timers
|
||||
run: |
|
||||
sudo systemctl stop "rm-workspace-$SANDBOX_NAME.timer" "rm-workspace-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl reset-failed "rm-workspace-$SANDBOX_NAME.timer" "rm-workspace-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl stop "teardown-$SANDBOX_NAME.timer" "teardown-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl reset-failed "teardown-$SANDBOX_NAME.timer" "teardown-$SANDBOX_NAME.service" 2>/dev/null || true
|
||||
sudo systemctl daemon-reexec
|
||||
232
.github/workflows/tags.yaml
vendored
Normal file
232
.github/workflows/tags.yaml
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
name: Versioned Tag
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*' # vX.Y.Z
|
||||
- 'v*.*.*-rc.*' # vX.Y.Z-rc.N
|
||||
- 'v*.*.*-beta.*' # vX.Y.Z-beta.N
|
||||
- 'v*.*.*-alpha.*' # vX.Y.Z-alpha.N
|
||||
|
||||
concurrency:
|
||||
group: tags-${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
name: Prepare Release
|
||||
runs-on: [self-hosted]
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
pull-requests: write
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
# Check if a non-draft release with this tag already exists
|
||||
- name: Check if release already exists
|
||||
id: check_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = context.ref.replace('refs/tags/', '');
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
const exists = releases.data.some(r => r.tag_name === tag && !r.draft);
|
||||
core.setOutput('skip', exists);
|
||||
console.log(exists ? `Release ${tag} already published` : `No published release ${tag}`);
|
||||
|
||||
# If a published release already exists, skip the rest of the workflow
|
||||
- name: Skip if release already exists
|
||||
if: steps.check_release.outputs.skip == 'true'
|
||||
run: echo "Release already exists, skipping workflow."
|
||||
|
||||
# Parse tag meta-data (rc?, maintenance line, etc.)
|
||||
- name: Parse tag
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: tag
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const ref = context.ref.replace('refs/tags/', ''); // e.g. v0.31.5-rc.1
|
||||
const m = ref.match(/^v(\d+\.\d+\.\d+)(-(?:alpha|beta|rc)\.\d+)?$/); // ['0.31.5', '-rc.1' | '-beta.1' | …]
|
||||
if (!m) {
|
||||
core.setFailed(`❌ tag '${ref}' must match 'vX.Y.Z' or 'vX.Y.Z-(alpha|beta|rc).N'`);
|
||||
return;
|
||||
}
|
||||
const version = m[1] + (m[2] ?? ''); // 0.31.5-rc.1
|
||||
const isRc = Boolean(m[2]);
|
||||
const [maj, min] = m[1].split('.');
|
||||
core.setOutput('tag', ref); // v0.31.5-rc.1
|
||||
core.setOutput('version', version); // 0.31.5-rc.1
|
||||
core.setOutput('is_rc', isRc); // true
|
||||
core.setOutput('line', `${maj}.${min}`); // 0.31
|
||||
|
||||
# Detect base branch (main or release-X.Y) the tag was pushed from
|
||||
- name: Get base branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: get_base
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const baseRef = context.payload.base_ref;
|
||||
if (!baseRef) {
|
||||
core.setFailed(`❌ base_ref is empty. Push the tag via 'git push origin HEAD:refs/tags/<tag>'.`);
|
||||
return;
|
||||
}
|
||||
const branch = baseRef.replace('refs/heads/', '');
|
||||
const ok = branch === 'main' || /^release-\d+\.\d+$/.test(branch);
|
||||
if (!ok) {
|
||||
core.setFailed(`❌ Tagged commit must belong to 'main' or 'release-X.Y'. Got '${branch}'`);
|
||||
return;
|
||||
}
|
||||
core.setOutput('branch', branch);
|
||||
|
||||
# Checkout & login once
|
||||
- name: Checkout code
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Login to GHCR
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
# Build project artifacts
|
||||
- name: Build
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: make build
|
||||
env:
|
||||
DOCKER_CONFIG: ${{ runner.temp }}/.docker
|
||||
|
||||
# Commit built artifacts
|
||||
- name: Commit release artifacts
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
git config user.name "github-actions"
|
||||
git config user.email "github-actions@github.com"
|
||||
git add .
|
||||
git commit -m "Prepare release ${GITHUB_REF#refs/tags/}" -s || echo "No changes to commit"
|
||||
git push origin HEAD || true
|
||||
|
||||
# Get `latest_version` from latest published release
|
||||
- name: Get latest published release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: latest_release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
try {
|
||||
const rel = await github.rest.repos.getLatestRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
core.setOutput('tag', rel.data.tag_name);
|
||||
} catch (_) {
|
||||
core.setOutput('tag', '');
|
||||
}
|
||||
|
||||
# Compare tag (A) with latest (B)
|
||||
- name: Semver compare
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: semver
|
||||
uses: madhead/semver-utils@v4.3.0
|
||||
with:
|
||||
version: ${{ steps.tag.outputs.tag }} # A
|
||||
compare-to: ${{ steps.latest_release.outputs.tag }} # B
|
||||
|
||||
# Create or reuse DRAFT GitHub Release
|
||||
- name: Create / reuse draft release
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
id: release
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const tag = '${{ steps.tag.outputs.tag }}';
|
||||
const isRc = ${{ steps.tag.outputs.is_rc }};
|
||||
const outdated = '${{ steps.semver.outputs.comparison-result }}' === '<';
|
||||
const makeLatest = outdated ? false : 'legacy';
|
||||
const releases = await github.rest.repos.listReleases({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo
|
||||
});
|
||||
let rel = releases.data.find(r => r.tag_name === tag);
|
||||
if (!rel) {
|
||||
rel = await github.rest.repos.createRelease({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
tag_name: tag,
|
||||
name: tag,
|
||||
draft: true,
|
||||
prerelease: isRc,
|
||||
make_latest: makeLatest
|
||||
});
|
||||
console.log(`Draft release created for ${tag}`);
|
||||
} else {
|
||||
console.log(`Re-using existing release ${tag}`);
|
||||
}
|
||||
core.setOutput('upload_url', rel.upload_url);
|
||||
|
||||
# Build + upload assets (optional)
|
||||
- name: Build & upload assets
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
make assets
|
||||
make upload_assets VERSION=${{ steps.tag.outputs.tag }}
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Create release-X.Y.Z branch and push (force-update)
|
||||
- name: Create release branch
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
run: |
|
||||
BRANCH="release-${GITHUB_REF#refs/tags/v}"
|
||||
git branch -f "$BRANCH"
|
||||
git push -f origin "$BRANCH"
|
||||
|
||||
# Create pull request into original base branch (if absent)
|
||||
- name: Create pull request if not exists
|
||||
if: steps.check_release.outputs.skip == 'false'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const version = context.ref.replace('refs/tags/v', '');
|
||||
const base = '${{ steps.get_base.outputs.branch }}';
|
||||
const head = `release-${version}`;
|
||||
|
||||
const prs = await github.rest.pulls.list({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head: `${context.repo.owner}:${head}`,
|
||||
base
|
||||
});
|
||||
if (prs.data.length === 0) {
|
||||
const pr = await github.rest.pulls.create({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
head,
|
||||
base,
|
||||
title: `Release v${version}`,
|
||||
body: `This PR prepares the release \`v${version}\`.`,
|
||||
draft: false
|
||||
});
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: pr.data.number,
|
||||
labels: ['release']
|
||||
});
|
||||
console.log(`Created PR #${pr.data.number}`);
|
||||
} else {
|
||||
console.log(`PR already exists from ${head} to ${base}`);
|
||||
}
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
_out
|
||||
.git
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
@@ -75,4 +76,4 @@ fabric.properties
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
.DS_Store
|
||||
**/.DS_Store
|
||||
**/.DS_Store
|
||||
|
||||
@@ -18,6 +18,7 @@ repos:
|
||||
(cd "$dir" && make generate)
|
||||
fi
|
||||
done
|
||||
git diff --color=always | cat
|
||||
'
|
||||
language: script
|
||||
files: ^.*$
|
||||
|
||||
@@ -13,8 +13,8 @@ but it means a lot to us.
|
||||
|
||||
To add your organization to this list, you can either:
|
||||
|
||||
- [open a pull request](https://github.com/aenix-io/cozystack/pulls) to directly update this file, or
|
||||
- [edit this file](https://github.com/aenix-io/cozystack/blob/main/ADOPTERS.md) directly in GitHub
|
||||
- [open a pull request](https://github.com/cozystack/cozystack/pulls) to directly update this file, or
|
||||
- [edit this file](https://github.com/cozystack/cozystack/blob/main/ADOPTERS.md) directly in GitHub
|
||||
|
||||
Feel free to ask in the Slack chat if you any questions and/or require
|
||||
assistance with updating this list.
|
||||
|
||||
@@ -6,13 +6,13 @@ As you get started, you are in the best position to give us feedbacks on areas o
|
||||
|
||||
* Problems found while setting up the development environment
|
||||
* Gaps in our documentation
|
||||
* Bugs in our Github actions
|
||||
* Bugs in our GitHub actions
|
||||
|
||||
First, though, it is important that you read the [code of conduct](CODE_OF_CONDUCT.md).
|
||||
First, though, it is important that you read the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
|
||||
The guidelines below are a starting point. We don't want to limit your
|
||||
creativity, passion, and initiative. If you think there's a better way, please
|
||||
feel free to bring it up in a Github discussion, or open a pull request. We're
|
||||
feel free to bring it up in a GitHub discussion, or open a pull request. We're
|
||||
certain there are always better ways to do things, we just need to start some
|
||||
constructive dialogue!
|
||||
|
||||
@@ -23,9 +23,9 @@ We welcome many types of contributions including:
|
||||
* New features
|
||||
* Builds, CI/CD
|
||||
* Bug fixes
|
||||
* [Documentation](https://github.com/aenix-io/cozystack-website/tree/main)
|
||||
* [Documentation](https://GitHub.com/cozystack/cozystack-website/tree/main)
|
||||
* Issue Triage
|
||||
* Answering questions on Slack or Github Discussions
|
||||
* Answering questions on Slack or GitHub Discussions
|
||||
* Web design
|
||||
* Communications / Social Media / Blog Posts
|
||||
* Events participation
|
||||
@@ -34,7 +34,7 @@ We welcome many types of contributions including:
|
||||
## Ask for Help
|
||||
|
||||
The best way to reach us with a question when contributing is to drop a line in
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new Github discussion.
|
||||
our [Telegram channel](https://t.me/cozystack), or start a new GitHub discussion.
|
||||
|
||||
## Raising Issues
|
||||
|
||||
|
||||
91
GOVERNANCE.md
Normal file
91
GOVERNANCE.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Cozystack Governance
|
||||
|
||||
This document defines the governance structure of the Cozystack community, outlining how members collaborate to achieve shared goals.
|
||||
|
||||
## Overview
|
||||
|
||||
**Cozystack**, a Cloud Native Computing Foundation (CNCF) project, is committed
|
||||
to building an open, inclusive, productive, and self-governing open source
|
||||
community focused on building a high-quality open source PaaS and framework for building clouds.
|
||||
|
||||
## Code Repositories
|
||||
|
||||
The following code repositories are governed by the Cozystack community and
|
||||
maintained under the `cozystack` namespace:
|
||||
|
||||
* **[Cozystack](https://github.com/cozystack/cozystack):** Main Cozystack codebase
|
||||
* **[website](https://github.com/cozystack/website):** Cozystack website and documentation sources
|
||||
* **[Talm](https://github.com/cozystack/talm):** Tool for managing Talos Linux the GitOps way
|
||||
* **[cozy-proxy](https://github.com/cozystack/cozy-proxy):** A simple kube-proxy addon for 1:1 NAT services in Kubernetes with NFT backend
|
||||
* **[cozystack-telemetry-server](https://github.com/cozystack/cozystack-telemetry-server):** Cozystack telemetry
|
||||
* **[talos-bootstrap](https://github.com/cozystack/talos-bootstrap):** An interactive Talos Linux installer
|
||||
* **[talos-meta-tool](https://github.com/cozystack/talos-meta-tool):** Tool for writing network metadata into META partition
|
||||
|
||||
## Community Roles
|
||||
|
||||
* **Users:** Members that engage with the Cozystack community via any medium, including Slack, Telegram, GitHub, and mailing lists.
|
||||
* **Contributors:** Members contributing to the projects by contributing and reviewing code, writing documentation,
|
||||
responding to issues, participating in proposal discussions, and so on.
|
||||
* **Directors:** Non-technical project leaders.
|
||||
* **Maintainers**: Technical project leaders.
|
||||
|
||||
## Contributors
|
||||
|
||||
Cozystack is for everyone. Anyone can become a Cozystack contributor simply by
|
||||
contributing to the project, whether through code, documentation, blog posts,
|
||||
community management, or other means.
|
||||
As with all Cozystack community members, contributors are expected to follow the
|
||||
[Cozystack Code of Conduct](https://github.com/cozystack/cozystack/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
All contributions to Cozystack code, documentation, or other components in the
|
||||
Cozystack GitHub organisation must follow the
|
||||
[contributing guidelines](https://github.com/cozystack/cozystack/blob/main/CONTRIBUTING.md).
|
||||
Whether these contributions are merged into the project is the prerogative of the maintainers.
|
||||
|
||||
## Directors
|
||||
|
||||
Directors are responsible for non-technical leadership functions within the project.
|
||||
This includes representing Cozystack and its maintainers to the community, to the press,
|
||||
and to the outside world; interfacing with CNCF and other governance entities;
|
||||
and participating in project decision-making processes when appropriate.
|
||||
|
||||
Directors are elected by a majority vote of the maintainers.
|
||||
|
||||
## Maintainers
|
||||
|
||||
Maintainers have the right to merge code into the project.
|
||||
Anyone can become a Cozystack maintainer (see "Becoming a maintainer" below).
|
||||
|
||||
### Expectations
|
||||
|
||||
Cozystack maintainers are expected to:
|
||||
|
||||
* Review pull requests, triage issues, and fix bugs in their areas of
|
||||
expertise, ensuring that all changes go through the project's code review
|
||||
and integration processes.
|
||||
* Monitor cncf-cozystack-* emails, the Cozystack Slack channels in Kubernetes
|
||||
and CNCF Slack workspaces, Telegram groups, and help out when possible.
|
||||
* Rapidly respond to any time-sensitive security release processes.
|
||||
* Attend Cozystack community meetings.
|
||||
|
||||
If a maintainer is no longer interested in or cannot perform the duties
|
||||
listed above, they should move themselves to emeritus status.
|
||||
If necessary, this can also occur through the decision-making process outlined below.
|
||||
|
||||
### Becoming a Maintainer
|
||||
|
||||
Anyone can become a Cozystack maintainer. Maintainers should be extremely
|
||||
proficient in cloud native technologies and/or Go; have relevant domain expertise;
|
||||
have the time and ability to meet the maintainer's expectations above;
|
||||
and demonstrate the ability to work with the existing maintainers and project processes.
|
||||
|
||||
To become a maintainer, start by expressing interest to existing maintainers.
|
||||
Existing maintainers will then ask you to demonstrate the qualifications above
|
||||
by contributing PRs, doing code reviews, and other such tasks under their guidance.
|
||||
After several months of working together, maintainers will decide whether to grant maintainer status.
|
||||
|
||||
## Project Decision-making Process
|
||||
|
||||
Ideally, all project decisions are resolved by consensus of maintainers and directors.
|
||||
If this is not possible, a vote will be called.
|
||||
The voting process is a simple majority in which each maintainer and director receives one vote.
|
||||
32
Makefile
32
Makefile
@@ -1,8 +1,14 @@
|
||||
.PHONY: manifests repos assets
|
||||
|
||||
build:
|
||||
build-deps:
|
||||
@command -V find docker skopeo jq gh helm > /dev/null
|
||||
@yq --version | grep -q "mikefarah" || (echo "mikefarah/yq is required" && exit 1)
|
||||
@tar --version | grep -q GNU || (echo "GNU tar is required" && exit 1)
|
||||
@sed --version | grep -q GNU || (echo "GNU sed is required" && exit 1)
|
||||
@awk --version | grep -q GNU || (echo "GNU awk is required" && exit 1)
|
||||
|
||||
build: build-deps
|
||||
make -C packages/apps/http-cache image
|
||||
make -C packages/apps/postgres image
|
||||
make -C packages/apps/mysql image
|
||||
make -C packages/apps/clickhouse image
|
||||
make -C packages/apps/kubernetes image
|
||||
@@ -11,17 +17,15 @@ build:
|
||||
make -C packages/system/cozystack-controller image
|
||||
make -C packages/system/cilium image
|
||||
make -C packages/system/kubeovn image
|
||||
make -C packages/system/kubeovn-webhook image
|
||||
make -C packages/system/dashboard image
|
||||
make -C packages/system/metallb image
|
||||
make -C packages/system/kamaji image
|
||||
make -C packages/system/bucket image
|
||||
make -C packages/core/testing image
|
||||
make -C packages/core/installer image
|
||||
make manifests
|
||||
|
||||
manifests:
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > manifests/cozystack-installer.yaml
|
||||
sed -i 's|@sha256:[^"]\+||' manifests/cozystack-installer.yaml
|
||||
|
||||
repos:
|
||||
rm -rf _out
|
||||
make -C packages/apps check-version-map
|
||||
@@ -32,14 +36,24 @@ repos:
|
||||
mkdir -p _out/logos
|
||||
cp ./packages/apps/*/logos/*.svg ./packages/extra/*/logos/*.svg _out/logos/
|
||||
|
||||
|
||||
manifests:
|
||||
mkdir -p _out/assets
|
||||
(cd packages/core/installer/; helm template -n cozy-installer installer .) > _out/assets/cozystack-installer.yaml
|
||||
|
||||
assets:
|
||||
make -C packages/core/installer/ assets
|
||||
make -C packages/core/installer assets
|
||||
|
||||
test:
|
||||
test -f _out/assets/nocloud-amd64.raw.xz || make -C packages/core/installer talos-nocloud
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing test
|
||||
make -C packages/core/testing test-applications
|
||||
|
||||
prepare-env:
|
||||
make -C packages/core/testing apply
|
||||
make -C packages/core/testing prepare-cluster
|
||||
|
||||
generate:
|
||||
hack/update-codegen.sh
|
||||
|
||||
upload_assets: manifests
|
||||
hack/upload-assets.sh
|
||||
|
||||
63
README.md
63
README.md
@@ -2,63 +2,68 @@
|
||||

|
||||
|
||||
[](https://opensource.org/)
|
||||
[](https://opensource.org/licenses/)
|
||||
[](https://aenix.io/contact-us/#meet)
|
||||
[](https://aenix.io/cozystack/)
|
||||
[](https://github.com/aenix-io/cozystack)
|
||||
[](https://github.com/aenix-io/cozystack)
|
||||
[](https://opensource.org/licenses/)
|
||||
[](https://cozystack.io/support/)
|
||||
[](https://github.com/cozystack/cozystack)
|
||||
[](https://github.com/cozystack/cozystack/releases/latest)
|
||||
[](https://github.com/cozystack/cozystack/graphs/contributors)
|
||||
|
||||
# Cozystack
|
||||
|
||||
**Cozystack** is a free PaaS platform and framework for building clouds.
|
||||
|
||||
With Cozystack, you can transform your bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters, Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
Cozystack is a [CNCF Sandbox Level Project](https://www.cncf.io/sandbox-projects/) that was originally built and sponsored by [Ænix](https://aenix.io/).
|
||||
|
||||
You can use Cozystack to build your own cloud or to provide a cost-effective development environments.
|
||||
With Cozystack, you can transform a bunch of servers into an intelligent system with a simple REST API for spawning Kubernetes clusters,
|
||||
Database-as-a-Service, virtual machines, load balancers, HTTP caching services, and other services with ease.
|
||||
|
||||
Use Cozystack to build your own cloud or provide a cost-effective development environment.
|
||||
|
||||

|
||||
|
||||
## Use-Cases
|
||||
|
||||
* [**Using Cozystack to build public cloud**](https://cozystack.io/docs/use-cases/public-cloud/)
|
||||
You can use Cozystack as backend for a public cloud
|
||||
* [**Using Cozystack to build a public cloud**](https://cozystack.io/docs/guides/use-cases/public-cloud/)
|
||||
You can use Cozystack as a backend for a public cloud
|
||||
|
||||
* [**Using Cozystack to build private cloud**](https://cozystack.io/docs/use-cases/private-cloud/)
|
||||
You can use Cozystack as platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
* [**Using Cozystack to build a private cloud**](https://cozystack.io/docs/guides/use-cases/private-cloud/)
|
||||
You can use Cozystack as a platform to build a private cloud powered by Infrastructure-as-Code approach
|
||||
|
||||
* [**Using Cozystack as Kubernetes distribution**](https://cozystack.io/docs/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as Kubernetes distribution for Bare Metal
|
||||
* [**Using Cozystack as a Kubernetes distribution**](https://cozystack.io/docs/guides/use-cases/kubernetes-distribution/)
|
||||
You can use Cozystack as a Kubernetes distribution for Bare Metal
|
||||
|
||||
## Screenshot
|
||||
|
||||

|
||||
|
||||
## Documentation
|
||||
|
||||
The documentation is located on official [cozystack.io](https://cozystack.io) website.
|
||||
The documentation is located on the [cozystack.io](https://cozystack.io) website.
|
||||
|
||||
Read [Get Started](https://cozystack.io/docs/get-started/) section for a quick start.
|
||||
Read the [Getting Started](https://cozystack.io/docs/getting-started/) section for a quick start.
|
||||
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/troubleshooting/), and work your way through the process that we've outlined.
|
||||
If you encounter any difficulties, start with the [troubleshooting guide](https://cozystack.io/docs/operations/troubleshooting/) and work your way through the process that we've outlined.
|
||||
|
||||
## Versioning
|
||||
|
||||
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
||||
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/aenix-io/cozystack/releases) section.
|
||||
A full list of the available releases is available in the GitHub repository's [Release](https://github.com/cozystack/cozystack/releases) section.
|
||||
|
||||
- [Roadmap](https://github.com/orgs/aenix-io/projects/2)
|
||||
- [Roadmap](https://cozystack.io/docs/roadmap/)
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are highly appreciated and very welcomed!
|
||||
|
||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/aenix-io/cozystack/issues) section.
|
||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||
In case of bugs, please check if the issue has already been opened by checking the [GitHub Issues](https://github.com/cozystack/cozystack/issues) section.
|
||||
If it isn't, you can open a new one. A detailed report will help us replicate it, assess it, and work on a fix.
|
||||
|
||||
You can express your intention in working on the fix on your own.
|
||||
You can express your intention to on the fix on your own.
|
||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||
|
||||
In case of **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/aenix-io/cozystack/discussions/categories/feature-requests).
|
||||
If you have **Feature Requests** please use the [Discussion's Feature Request section](https://github.com/cozystack/cozystack/discussions/categories/feature-requests).
|
||||
|
||||
You can join our weekly community meetings (just add this events to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics)) or [Telegram group](https://t.me/cozystack).
|
||||
## Community
|
||||
|
||||
You are welcome to join our [Telegram group](https://t.me/cozystack) and come to our weekly community meetings.
|
||||
Add them to your [Google Calendar](https://calendar.google.com/calendar?cid=ZTQzZDIxZTVjOWI0NWE5NWYyOGM1ZDY0OWMyY2IxZTFmNDMzZTJlNjUzYjU2ZGJiZGE3NGNhMzA2ZjBkMGY2OEBncm91cC5jYWxlbmRhci5nb29nbGUuY29t) or [iCal](https://calendar.google.com/calendar/ical/e43d21e5c9b45a95f28c5d649c2cb1e1f433e2e653b56dbbda74ca306f0d0f68%40group.calendar.google.com/public/basic.ics) for convenience.
|
||||
|
||||
## License
|
||||
|
||||
@@ -67,8 +72,4 @@ The code is provided as-is with no warranties.
|
||||
|
||||
## Commercial Support
|
||||
|
||||
[**Ænix**](https://aenix.io) offers enterprise-grade support, available 24/7.
|
||||
|
||||
We provide all types of assistance, including consultations, development of missing features, design, assistance with installation, and integration.
|
||||
|
||||
[Contact us](https://aenix.io/contact/)
|
||||
A list of companies providing commercial support for this project can be found on [official site](https://cozystack.io/support/).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
API rule violation: list_type_missing,github.com/aenix-io/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
||||
API rule violation: list_type_missing,github.com/cozystack/cozystack/pkg/apis/apps/v1alpha1,ApplicationStatus,Conditions
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource
|
||||
|
||||
@@ -19,7 +19,7 @@ package main
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/aenix-io/cozystack/pkg/cmd/server"
|
||||
"github.com/cozystack/cozystack/pkg/cmd/server"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/component-base/cli"
|
||||
)
|
||||
|
||||
@@ -36,9 +36,11 @@ import (
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
cozystackiov1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
"github.com/aenix-io/cozystack/internal/controller"
|
||||
"github.com/aenix-io/cozystack/internal/telemetry"
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
"github.com/cozystack/cozystack/internal/controller"
|
||||
"github.com/cozystack/cozystack/internal/telemetry"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
@@ -51,6 +53,7 @@ func init() {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
|
||||
utilruntime.Must(cozystackiov1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(helmv2.AddToScheme(scheme))
|
||||
// +kubebuilder:scaffold:scheme
|
||||
}
|
||||
|
||||
@@ -178,6 +181,31 @@ func main() {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadMonitor")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.WorkloadReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "WorkloadReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.TenantHelmReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "TenantHelmReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err = (&controller.CozystackConfigReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CozystackConfigReconciler")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// +kubebuilder:scaffold:builder
|
||||
|
||||
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
|
||||
@@ -626,7 +626,7 @@
|
||||
"datasource": {
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"POD\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
|
||||
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",container!=\"\",pod=~\".*-controller-.*\"}) by (pod)",
|
||||
"hide": false,
|
||||
"interval": "",
|
||||
"legendFormat": "{{pod}}",
|
||||
|
||||
@@ -450,7 +450,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval])))\n / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -520,7 +520,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"})) / sum(sum by (node) (avg_over_time(kube_node_status_allocatable{resource=\"memory\",unit=\"byte\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -590,7 +590,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]))) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -660,7 +660,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"expr": "sum(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} )) / sum(sum by (node) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"}[$__rate_interval])))",
|
||||
"hide": false,
|
||||
"legendFormat": "__auto",
|
||||
"range": true,
|
||||
@@ -1128,7 +1128,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"expr": "sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1143,7 +1143,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||
"expr": "sum(sum by (node) (rate(container_cpu_usage_seconds_total{container!=\"\",node=~\"$node\"}[$__rate_interval]) - on (namespace,pod,container,node) group_left avg by (namespace,pod,container, node)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"})) * -1 > 0)",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -1527,7 +1527,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"expr": "(sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0\n",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1542,7 +1542,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||
"expr": "sum((sum by (node) (container_memory_working_set_bytes:without_kmem{container!=\"\",node=~\"$node\"} ) - sum by (node) (kube_pod_container_resource_requests{resource=\"memory\",node=~\"$node\"})) * -1 > 0)",
|
||||
"hide": false,
|
||||
"legendFormat": "Total",
|
||||
"range": true,
|
||||
@@ -1909,7 +1909,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) * -1 > 0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2037,7 +2037,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) * -1 >0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2160,7 +2160,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"POD\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,pod,container)((rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\",container!=\"\",node=~\"$node\"}[$__rate_interval])) - on (namespace,pod,container) group_left avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"cpu\",node=~\"$node\"}))) > 0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
@@ -2288,7 +2288,7 @@
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"POD\",container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||
"expr": "topk(10, (sum by (namespace,container,pod) (container_memory_working_set_bytes:without_kmem{container!=\"\",namespace=~\"$namespace\",node=~\"$node\"}) - on (namespace,pod,container) avg by (namespace,pod,container)(kube_pod_container_resource_requests{resource=\"memory\",namespace=~\"$namespace\",node=~\"$node\"})) >0)\n",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"range": false,
|
||||
|
||||
@@ -684,7 +684,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -710,7 +710,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -723,7 +723,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -736,7 +736,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) \n (\n (\n (\n sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -762,7 +762,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -786,7 +786,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "sum by (pod)\n(\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -798,7 +798,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -810,7 +810,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n -\n sum by (namespace, pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) or sum by (namespace, pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__range]))\n ) > 0\n )\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -848,7 +848,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -860,7 +860,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"expr": "(\n sum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range])) \n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__range]))\n)\nor\nsum by (pod) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1315,7 +1315,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1488,7 +1488,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -1502,7 +1502,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -1642,7 +1642,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (pod)\n (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -1779,7 +1779,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||
"expr": " (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n )\n or\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\"}[$__rate_interval]))\n )\n) > 0",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2095,7 +2095,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
@@ -2109,7 +2109,7 @@
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2295,7 +2295,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -2306,7 +2306,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2468,7 +2468,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -2653,7 +2653,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
@@ -2666,7 +2666,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
@@ -2679,7 +2679,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
@@ -2692,7 +2692,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
@@ -2705,7 +2705,7 @@
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}) by(pod)\n * on (pod)\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2837,7 +2837,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -2974,7 +2974,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"expr": "(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (pod) group_left()\n sum by (pod)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -3290,56 +3290,56 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"POD\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by (pod)\n(\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\", pod=~\"$pod\"}\n * on (controller_type, controller_name) group_left()\n sum by (controller_type, controller_name) (avg_over_time(vpa_target_recommendation{namespace=\"$namespace\", container!=\"\", resource=\"memory\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", pod=~\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3834,7 +3834,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
@@ -3972,7 +3972,7 @@
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"expr": "sum by(pod) (\n max(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}) by(pod)\n * on (pod)\n sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", pod=~\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))\n)",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ pod }}",
|
||||
|
||||
@@ -656,7 +656,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -680,7 +680,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__range]))\n ) \nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -692,7 +692,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -704,7 +704,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -728,7 +728,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -740,7 +740,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (pod) group_left()\n sum by (namespace, pod)\n (\n avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])\n )\n )\n or\n count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -752,7 +752,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__range]))\n ) \n or \ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -764,7 +764,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -776,7 +776,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller)\n (\n avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range]))\n ) > 0\n )\n )\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -814,7 +814,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -826,7 +826,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -877,7 +877,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"expr": "sum by (controller) (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range]) * on (pod) group_left() sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}[$__range])) by (controller) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1475,7 +1475,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -1646,7 +1646,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -1657,7 +1657,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))))",
|
||||
"expr": "sum (sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1798,7 +1798,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -1939,7 +1939,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2257,28 +2257,28 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2458,7 +2458,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2470,7 +2470,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2622,7 +2622,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -2799,14 +2799,14 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2814,7 +2814,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2822,14 +2822,14 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2955,7 +2955,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -3091,7 +3091,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||
"expr": "sum by (controller)\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"}\n * on (namespace, pod) group_left()\n sum by (namespace, pod)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n or\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n +\n sum by(namespace, pod, container) (avg_over_time(container_memory:kmem{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))\n )\n ) > 0\n )\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -3408,14 +3408,14 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3423,7 +3423,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left() \n sum by (pod) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3431,35 +3431,35 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by(pod) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n ) ",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum\n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"} \n * on (pod) group_left() \n sum by(pod) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\",namespace=\"$namespace\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (controller_type, controller_name) group_left()\n sum by(controller_type, controller_name) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))\n )",
|
||||
"expr": "sum \n (\n kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller=\"$controller\"}\n * on (pod) group_left()\n sum by (pod) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3910,7 +3910,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_reads_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
@@ -4049,7 +4049,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"expr": "sum by (controller) (kube_controller_pod{node=~\"$node\", namespace=\"$namespace\", controller_type=~\"$controller_type\", controller=~\"$controller\"} * on (pod) group_left() sum by (pod) (rate(container_fs_writes_total{node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ controller }}",
|
||||
|
||||
@@ -869,7 +869,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "100 * count by (namespace) (\n sum by (namespace, verticalpodautoscaler) ( \n count by (namespace, controller_name, verticalpodautoscaler) (avg_over_time(vpa_target_recommendation{namespace=~\"$namespace\", container!=\"\"}[$__range]))\n / on (controller_name, namespace) group_left\n count by (namespace, controller_name) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range]))\n )\n) \n/ count by (namespace) (sum by (namespace, controller) (avg_over_time(kube_controller_pod{namespace=~\"$namespace\"}[$__range])))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -878,7 +878,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -895,7 +895,7 @@
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -903,7 +903,7 @@
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -919,7 +919,7 @@
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -935,7 +935,7 @@
|
||||
"refId": "I"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n ) > 0\n )\nor\ncount(avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -943,7 +943,7 @@
|
||||
"refId": "J"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__range]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__range]))\n )\n > 0\n )\nor count (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -968,7 +968,7 @@
|
||||
"refId": "M"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -977,7 +977,7 @@
|
||||
"refId": "N"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__range]))\nor\ncount (avg_over_time(kube_controller_pod{node=~\"$node\", namespace=~\"$namespace\"}[$__range])) by (namespace) * 0",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -1449,7 +1449,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -1616,7 +1616,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "System",
|
||||
@@ -1627,7 +1627,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1764,7 +1764,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -1901,7 +1901,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -2210,7 +2210,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_usage_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2218,21 +2218,21 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"expr": "sum by (namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"cpu\",unit=\"core\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval])* on (uid) group_left(phase) kube_pod_status_phase{phase=\"Running\"})",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(vpa_target_recommendation{container!=\"\", namespace=\"$namespace\", resource=\"cpu\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
@@ -2407,7 +2407,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_system_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2419,7 +2419,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_cpu_user_seconds_total{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2572,7 +2572,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -2754,14 +2754,14 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_rss{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_cache{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2769,7 +2769,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_swap{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -2777,14 +2777,14 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2910,7 +2910,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -3046,7 +3046,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"POD\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"expr": "sum by (namespace)\n (\n (\n (\n sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", namespace=~\"$namespace\"}[$__rate_interval]))\n ) or sum by(namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", container!=\"\", namespace=~\"$namespace\"}[$__rate_interval]))\n )\n > 0\n )",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -3370,14 +3370,14 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_rss{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "RSS",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_cache{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3385,7 +3385,7 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_swap{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3393,35 +3393,35 @@
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory_working_set_bytes:without_kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"POD\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(vpa_target_recommendation{container!=\"\",namespace=\"$namespace\", resource=\"memory\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "VPA Target",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
"refId": "F"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"POD\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(namespace) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",node=~\"$node\", container!=\"\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (avg_over_time(container_memory:kmem{node=~\"$node\", namespace=\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3873,7 +3873,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_fs_reads_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
@@ -4008,7 +4008,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by (namespace) (rate(container_fs_writes_total{node=~\"$node\", namespace=~\"$namespace\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ namespace }}",
|
||||
|
||||
@@ -686,7 +686,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -759,7 +759,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -847,7 +847,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -860,7 +860,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__range]))",
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__range]))",
|
||||
"format": "table",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
@@ -899,7 +899,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "${ds_prometheus}"
|
||||
},
|
||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"expr": "sum by (container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=~\"$container\"}[$__range]))\nor\nsum by (container) (avg_over_time(kube_pod_container_info{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__range]) * 0)",
|
||||
"format": "table",
|
||||
"instant": true,
|
||||
"intervalFactor": 1,
|
||||
@@ -1503,7 +1503,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1669,7 +1669,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -1681,7 +1681,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -1820,7 +1820,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"POD\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (namespace, pod, container)\n (\n (\n sum by(namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"cpu\",unit=\"core\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by(namespace, pod, container) (rate(container_cpu_usage_seconds_total{container!=\"\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2269,7 +2269,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Usage",
|
||||
@@ -2476,7 +2476,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_system_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2488,7 +2488,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_cpu_user_seconds_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "User",
|
||||
@@ -2639,7 +2639,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2816,7 +2816,7 @@
|
||||
"pluginVersion": "8.5.13",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -2824,28 +2824,28 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Cache",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}[$__rate_interval]))",
|
||||
"expr": "sum by(pod) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -2974,7 +2974,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (container)\n (\n (\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3110,7 +3110,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"POD\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"expr": "sum by (container)\n (\n (\n (\n sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"}[$__rate_interval]))\n -\n sum by (namespace, pod, container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) or sum by (namespace, pod, container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\", container!=\"\"}[$__rate_interval]))\n ) > 0\n )",
|
||||
"format": "time_series",
|
||||
"hide": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3431,7 +3431,7 @@
|
||||
"repeatDirection": "h",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_rss{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"instant": false,
|
||||
"intervalFactor": 1,
|
||||
@@ -3439,7 +3439,7 @@
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_cache{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
@@ -3447,28 +3447,28 @@
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_swap{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Swap",
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory_working_set_bytes:without_kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Working set bytes without kmem",
|
||||
"refId": "D"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_limits{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Limits",
|
||||
"refId": "E"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(kube_pod_container_resource_requests{resource=\"memory\",unit=\"byte\",namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Requests",
|
||||
@@ -3482,7 +3482,7 @@
|
||||
"refId": "G"
|
||||
},
|
||||
{
|
||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container=\"$container\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (avg_over_time(container_memory:kmem{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", container=\"$container\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Kmem",
|
||||
@@ -3930,7 +3930,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ container }}",
|
||||
@@ -4068,7 +4068,7 @@
|
||||
"type": "prometheus",
|
||||
"uid": "$ds_prometheus"
|
||||
},
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"POD\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"expr": "sum by(container) (rate(container_fs_writes_total{container!=\"\", pod=\"$pod\", namespace=\"$namespace\"}[$__rate_interval]))",
|
||||
"format": "time_series",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "{{ container }}",
|
||||
|
||||
243
docs/changelogs/v0.31.0.md
Normal file
243
docs/changelogs/v0.31.0.md
Normal file
@@ -0,0 +1,243 @@
|
||||
Cozystack v0.31.0 is a significant release that brings new features, key fixes, and updates to underlying components.
|
||||
This version enhances GPU support, improves many components of Cozystack, and introduces a more robust release process to improve stability.
|
||||
Below, we'll go over the highlights in each area for current users, developers, and our community.
|
||||
|
||||
## Major Features and Improvements
|
||||
|
||||
### GPU support for tenant Kubernetes clusters
|
||||
|
||||
Cozystack now integrates NVIDIA GPU Operator support for tenant Kubernetes clusters.
|
||||
This enables platform users to run GPU-powered AI/ML applications in their own clusters.
|
||||
To enable GPU Operator, set `addons.gpuOperator.enabled: true` in the cluster configuration.
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||
|
||||
Check out Andrei Kvapil's CNCF webinar [showcasing the GPU support by running Stable Diffusion in Cozystack](https://www.youtube.com/watch?v=S__h_QaoYEk).
|
||||
|
||||
<!--
|
||||
* [kubernetes] Introduce GPU support for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/834)
|
||||
-->
|
||||
|
||||
### Cilium Improvements
|
||||
|
||||
Cozystack’s Cilium integration received two significant enhancements.
|
||||
First, Gateway API support in Cilium is now enabled, allowing advanced L4/L7 routing features via Kubernetes Gateway API.
|
||||
We thank Zdenek Janda @zdenekjanda for contributing this feature in https://github.com/cozystack/cozystack/pull/924.
|
||||
|
||||
Second, Cozystack now permits custom user-provided parameters in the tenant cluster’s Cilium configuration.
|
||||
(@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||
|
||||
<!--
|
||||
* [cilium] Enable Cilium Gateway API. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/924)
|
||||
* [cilium] Enable user-added parameters in a tenant cluster Cilium. (@lllamnyp in https://github.com/cozystack/cozystack/pull/917)
|
||||
-->
|
||||
|
||||
### Cross-Architecture Builds (ARM Support Beta)
|
||||
|
||||
Cozystack's build system was refactored to support multi-architecture binaries and container images.
|
||||
This paves the road to running Cozystack on ARM64 servers.
|
||||
Changes include Makefile improvements (https://github.com/cozystack/cozystack/pull/907)
|
||||
and multi-arch Docker image builds (https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970).
|
||||
|
||||
We thank Nikita Bykov @nbykov0 for his ongoing work on ARM support!
|
||||
|
||||
<!--
|
||||
* Introduce support for cross-architecture builds and Cozystack on ARM:
|
||||
* [build] Refactor Makefiles introducing build variables. (@nbykov0 in https://github.com/cozystack/cozystack/pull/907)
|
||||
* [build] Add support for multi-architecture and cross-platform image builds. (@nbykov0 in https://github.com/cozystack/cozystack/pull/932 and https://github.com/cozystack/cozystack/pull/970)
|
||||
-->
|
||||
|
||||
### VerticalPodAutoscaler (VPA) Expansion
|
||||
|
||||
The VerticalPodAutoscaler is now enabled for more Cozystack components to automate resource tuning.
|
||||
Specifically, VPA was added for tenant Kubernetes control planes (@klinch0 in https://github.com/cozystack/cozystack/pull/806),
|
||||
the Cozystack Dashboard (https://github.com/cozystack/cozystack/pull/828),
|
||||
and the Cozystack etcd-operator (https://github.com/cozystack/cozystack/pull/850).
|
||||
All Cozystack components that have VPA enabled can automatically adjust their CPU and memory requests based on usage, improving platform and application stability.
|
||||
|
||||
<!--
|
||||
* Add VerticalPodAutoscaler to a few more components:
|
||||
* [kubernetes] Kubernetes clusters in user tenants. (@klinch0 in https://github.com/cozystack/cozystack/pull/806)
|
||||
* [platform] Cozystack dashboard. (@klinch0 in https://github.com/cozystack/cozystack/pull/828)
|
||||
* [platform] Cozystack etcd-operator (@klinch0 in https://github.com/cozystack/cozystack/pull/850)
|
||||
-->
|
||||
|
||||
### Tenant HelmRelease Reconcile Controller
|
||||
|
||||
A new controller was introduced to monitor and synchronize HelmRelease resources across tenants.
|
||||
This controller propagates configuration changes to tenant workloads and ensures that any HelmRelease defined in a tenant
|
||||
stays in sync with platform updates.
|
||||
It improves the reliability of deploying managed applications in Cozystack.
|
||||
(@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||
|
||||
<!--
|
||||
* [platform] Introduce a new controller to synchronize tenant HelmReleases and propagate configuration changes. (@klinch0 in https://github.com/cozystack/cozystack/pull/870)
|
||||
-->
|
||||
|
||||
### Virtual Machine Improvements
|
||||
|
||||
**Configurable KubeVirt CPU Overcommit**: The CPU allocation ratio in KubeVirt (how virtual CPUs are overcommitted relative to physical) is now configurable
|
||||
via the `cpu-allocation-ratio` value in the Cozystack configmap.
|
||||
This means Cozystack administrators can now tune CPU overcommitment for VMs to balance performance vs. density.
|
||||
(@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||
|
||||
**KubeVirt VM Export**: Cozystack now allows exporting KubeVirt virtual machines.
|
||||
This feature, enabled via KubeVirt's `VirtualMachineExport` capability, lets users snapshot or back up VM images.
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||
|
||||
**Support for various storage classes in Virtual Machines**: The `virtual-machine` application (since version 0.9.2) lets you pick any StorageClass for a VM's
|
||||
system disk instead of relying on a hard-coded PVC.
|
||||
Refer to values `systemDisk.storage` and `systemDisk.storageClass` in the [application's configs](https://cozystack.io/docs/reference/applications/virtual-machine/#common-parameters).
|
||||
(@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||
|
||||
<!--
|
||||
* [kubevirt] Enable exporting VMs. (@kvaps in https://github.com/cozystack/cozystack/pull/808)
|
||||
* [kubevirt] Make KubeVirt's CPU allocation ratio configurable. (@lllamnyp in https://github.com/cozystack/cozystack/pull/905)
|
||||
* [virtual-machine] Add support for various storages. (@kvaps in https://github.com/cozystack/cozystack/pull/974)
|
||||
-->
|
||||
|
||||
### Other Features and Improvements
|
||||
|
||||
* [platform] Introduce options `expose-services`, `expose-ingress`, and `expose-external-ips` to the ingress service. (@kvaps in https://github.com/cozystack/cozystack/pull/929)
|
||||
* [cozystack-controller] Record the IP address pool and storage class in Workload objects. (@lllamnyp in https://github.com/cozystack/cozystack/pull/831)
|
||||
* [apps] Remove user-facing config of limits and requests. (@lllamnyp in https://github.com/cozystack/cozystack/pull/935)
|
||||
|
||||
## New Release Lifecycle
|
||||
|
||||
Cozystack release lifecycle is changing to provide a more stable and predictable lifecycle to customers running Cozystack in mission-critical environments.
|
||||
|
||||
* **Gradual Release with Alpha, Beta, and Release Candidates**: Cozystack will now publish pre-release versions (alpha, beta, release candidates) before a stable release.
|
||||
Starting with v0.31.0, the team made three release candidates before releasing version v0.31.0.
|
||||
This allows more testing and feedback before marking a release as stable.
|
||||
|
||||
* **Prolonged Release Support with Patch Versions**: After the initial `vX.Y.0` release, a long-lived branch `release-X.Y` will be created to backport fixes.
|
||||
For example, with 0.31.0’s release, a `release-0.31` branch will track patch fixes (`0.31.x`).
|
||||
This strategy lets Cozystack users receive timely patch releases and updates with minimal risks.
|
||||
|
||||
To implement these new changes, we have rebuilt our CI/CD workflows and introduced automation, enabling automatic backports.
|
||||
You can read more about how it's implemented in the Development section below.
|
||||
|
||||
For more information, read the [Cozystack Release Workflow](https://github.com/cozystack/cozystack/blob/main/docs/release.md) documentation.
|
||||
|
||||
## Fixes
|
||||
|
||||
* [virtual-machine] Add GPU names to the virtual machine specifications. (@kvaps in https://github.com/cozystack/cozystack/pull/862)
|
||||
* [virtual-machine] Count Workload resources for pods by requests, not limits. Other improvements to VM resource tracking. (@lllamnyp in https://github.com/cozystack/cozystack/pull/904)
|
||||
* [virtual-machine] Set PortList method by default. (@kvaps in https://github.com/cozystack/cozystack/pull/996)
|
||||
* [virtual-machine] Specify ports even for wholeIP mode. (@kvaps in https://github.com/cozystack/cozystack/pull/1000)
|
||||
* [platform] Fix installing HelmReleases on initial setup. (@kvaps in https://github.com/cozystack/cozystack/pull/833)
|
||||
* [platform] Migration scripts update Kubernetes ConfigMap with the current stack version for improved version tracking. (@klinch0 in https://github.com/cozystack/cozystack/pull/840)
|
||||
* [platform] Reduce requested CPU and RAM for the `kamaji` provider. (@klinch0 in https://github.com/cozystack/cozystack/pull/825)
|
||||
* [platform] Improve the reconciliation loop for the Cozystack system HelmReleases logic. (@klinch0 in https://github.com/cozystack/cozystack/pull/809 and https://github.com/cozystack/cozystack/pull/810, @kvaps in https://github.com/cozystack/cozystack/pull/811)
|
||||
* [platform] Remove extra dependencies for the Piraeus operator. (@klinch0 in https://github.com/cozystack/cozystack/pull/856)
|
||||
* [platform] Refactor dashboard values. (@kvaps in https://github.com/cozystack/cozystack/pull/928, patched by @llamnyp in https://github.com/cozystack/cozystack/pull/952)
|
||||
* [platform] Make FluxCD artifact disabled by default. (@klinch0 in https://github.com/cozystack/cozystack/pull/964)
|
||||
* [kubernetes] Update garbage collection of HelmReleases in tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/835)
|
||||
* [kubernetes] Fix merging `valuesOverride` for tenant clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/879)
|
||||
* [kubernetes] Fix `ubuntu-container-disk` tag. (@kvaps in https://github.com/cozystack/cozystack/pull/887)
|
||||
* [kubernetes] Refactor Helm manifests for tenant Kubernetes clusters. (@kvaps in https://github.com/cozystack/cozystack/pull/866)
|
||||
* [kubernetes] Fix Ingress-NGINX depends on Cert-Manager. (@kvaps in https://github.com/cozystack/cozystack/pull/976)
|
||||
* [kubernetes, apps] Enable `topologySpreadConstraints` for tenant Kubernetes clusters and fix it for managed PostgreSQL. (@klinch0 in https://github.com/cozystack/cozystack/pull/995)
|
||||
* [tenant] Fix an issue with accessing external IPs of a cluster from the cluster itself. (@kvaps in https://github.com/cozystack/cozystack/pull/854)
|
||||
* [cluster-api] Remove the no longer necessary workaround for Kamaji. (@kvaps in https://github.com/cozystack/cozystack/pull/867, patched in https://github.com/cozystack/cozystack/pull/956)
|
||||
* [monitoring] Remove legacy label "POD" from the exclude filter in metrics. (@xy2 in https://github.com/cozystack/cozystack/pull/826)
|
||||
* [monitoring] Refactor management etcd monitoring config. Introduce a migration script for updating monitoring resources (`kube-rbac-proxy` daemonset). (@lllamnyp in https://github.com/cozystack/cozystack/pull/799 and https://github.com/cozystack/cozystack/pull/830)
|
||||
* [monitoring] Fix VerticalPodAutoscaler resource allocation for VMagent. (@klinch0 in https://github.com/cozystack/cozystack/pull/820)
|
||||
* [postgres] Remove duplicated `template` entry from backup manifest. (@etoshutka in https://github.com/cozystack/cozystack/pull/872)
|
||||
* [kube-ovn] Fix versions mapping in Makefile. (@kvaps in https://github.com/cozystack/cozystack/pull/883)
|
||||
* [dx] Automatically detect version for migrations in the installer.sh. (@kvaps in https://github.com/cozystack/cozystack/pull/837)
|
||||
* [dx] remove version_map and building for library charts. (@kvaps in https://github.com/cozystack/cozystack/pull/998)
|
||||
* [docs] Review the tenant Kubernetes cluster docs. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/969)
|
||||
* [docs] Explain that tenants cannot have dashes in their names. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/980)
|
||||
|
||||
## Dependencies
|
||||
|
||||
* MetalLB images are now built in-tree based on version 0.14.9 with additional critical patches. (@lllamnyp in https://github.com/cozystack/cozystack/pull/945)
|
||||
* Update Kubernetes to v1.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/949)
|
||||
* Update Talos Linux to v1.10.1. (@kvaps in https://github.com/cozystack/cozystack/pull/931)
|
||||
* Update Cilium to v1.17.3. (@kvaps in https://github.com/cozystack/cozystack/pull/848)
|
||||
* Update LINSTOR to v1.31.0. (@kvaps in https://github.com/cozystack/cozystack/pull/846)
|
||||
* Update Kube-OVN to v1.13.11. (@kvaps in https://github.com/cozystack/cozystack/pull/847, @lllamnyp in https://github.com/cozystack/cozystack/pull/922)
|
||||
* Update tenant Kubernetes to v1.32. (@kvaps in https://github.com/cozystack/cozystack/pull/871)
|
||||
* Update flux-operator to 0.20.0. (@kingdonb in https://github.com/cozystack/cozystack/pull/880 and https://github.com/cozystack/cozystack/pull/934)
|
||||
* Update multiple Cluster API components. (@kvaps in https://github.com/cozystack/cozystack/pull/867 and https://github.com/cozystack/cozystack/pull/947)
|
||||
* Update KamajiControlPlane to edge-25.4.1. (@kvaps in https://github.com/cozystack/cozystack/pull/953, fixed by @nbykov0 in https://github.com/cozystack/cozystack/pull/983)
|
||||
* Update cert-manager to v1.17.2. (@kvaps in https://github.com/cozystack/cozystack/pull/975)
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Installing Talos in Air-Gapped Environment](https://cozystack.io/docs/operations/talos/configuration/air-gapped/):
|
||||
new guide for configuring and bootstrapping Talos Linux clusters in air-gapped environments.
|
||||
(@klinch0 in https://github.com/cozystack/website/pull/203)
|
||||
|
||||
* [Cozystack Bundles](https://cozystack.io/docs/guides/bundles/): new page in the learning section explaining how Cozystack bundles work and how to choose a bundle.
|
||||
(@NickVolynkin in https://github.com/cozystack/website/pull/188, https://github.com/cozystack/website/pull/189, and others;
|
||||
updated by @kvaps in https://github.com/cozystack/website/pull/192 and https://github.com/cozystack/website/pull/193)
|
||||
|
||||
* [Managed Application Reference](https://cozystack.io/docs/reference/applications/): A set of new pages in the docs, mirroring application docs from the Cozystack dashboard.
|
||||
(@NickVolynkin in https://github.com/cozystack/website/pull/198, https://github.com/cozystack/website/pull/202, and https://github.com/cozystack/website/pull/204)
|
||||
|
||||
* **LINSTOR Networking**: Guides on [configuring dedicated network for LINSTOR](https://cozystack.io/docs/operations/storage/dedicated-network/)
|
||||
and [configuring network for distributed storage in multi-datacenter setup](https://cozystack.io/docs/operations/stretched/linstor-dedicated-network/).
|
||||
(@xy2, edited by @NickVolynkin in https://github.com/cozystack/website/pull/171, https://github.com/cozystack/website/pull/182, and https://github.com/cozystack/website/pull/184)
|
||||
|
||||
### Fixes
|
||||
|
||||
* Correct error in the doc for the command to edit the configmap. (@lb0o in https://github.com/cozystack/website/pull/207)
|
||||
* Fix group name in OIDC docs (@kingdonb in https://github.com/cozystack/website/pull/179)
|
||||
* A bit more explanation of Docker buildx builders. (@nbykov0 in https://github.com/cozystack/website/pull/187)
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
### Testing
|
||||
|
||||
Improvements:
|
||||
|
||||
* Introduce `cozytest` — a new [BATS-based](https://github.com/bats-core/bats-core) testing framework. (@kvaps in https://github.com/cozystack/cozystack/pull/982)
|
||||
|
||||
Fixes:
|
||||
|
||||
* Fix `device_ownership_from_security_context` CRI. (@dtrdnk in https://github.com/cozystack/cozystack/pull/896)
|
||||
* Increase timeout durations for `capi` and `keycloak` to improve reliability during e2e-tests. (@kvaps in https://github.com/cozystack/cozystack/pull/858)
|
||||
* Return `genisoimage` to the e2e-test Dockerfile (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/962)
|
||||
|
||||
### CI/CD Changes
|
||||
|
||||
Improvements:
|
||||
|
||||
* Use release branches `release-X.Y` for gathering and releasing fixes after initial `vX.Y.0` release. (@kvaps in https://github.com/cozystack/cozystack/pull/816)
|
||||
* Automatically create release branches after initial `vX.Y.0` release is published. (@kvaps in https://github.com/cozystack/cozystack/pull/886)
|
||||
* Introduce Release Candidate versions. Automate patch backporting by applying patches from pull requests labeled `[backport]` to the current release branch. (@kvaps in https://github.com/cozystack/cozystack/pull/841 and https://github.com/cozystack/cozystack/pull/901, @nickvolynkin in https://github.com/cozystack/cozystack/pull/890)
|
||||
* Support alpha and beta pre-releases. (@kvaps in https://github.com/cozystack/cozystack/pull/978)
|
||||
* Commit changes in release pipelines under `github-actions <github-actions@github.com>`. (@kvaps in https://github.com/cozystack/cozystack/pull/823)
|
||||
* Describe the Cozystack release workflow. (@NickVolynkin in https://github.com/cozystack/cozystack/pull/817 and https://github.com/cozystack/cozystack/pull/897)
|
||||
|
||||
Fixes:
|
||||
|
||||
* Improve the check for `versions_map` running on pull requests. (@kvaps and @klinch0 in https://github.com/cozystack/cozystack/pull/836, https://github.com/cozystack/cozystack/pull/842, and https://github.com/cozystack/cozystack/pull/845)
|
||||
* If the release step was skipped on a tag, skip tests as well. (@kvaps in https://github.com/cozystack/cozystack/pull/822)
|
||||
* Allow CI to cancel the previous job if a new one is scheduled. (@kvaps in https://github.com/cozystack/cozystack/pull/873)
|
||||
* Use the correct version name when uploading build assets to the release page. (@kvaps in https://github.com/cozystack/cozystack/pull/876)
|
||||
* Stop using `ok-to-test` label to trigger CI in pull requests. (@kvaps in https://github.com/cozystack/cozystack/pull/875)
|
||||
* Do not run tests in the release building pipeline. (@kvaps in https://github.com/cozystack/cozystack/pull/882)
|
||||
* Fix release branch creation. (@kvaps in https://github.com/cozystack/cozystack/pull/884)
|
||||
* Reduce noise in the test logs by suppressing the `wget` progress bar. (@lllamnyp in https://github.com/cozystack/cozystack/pull/865)
|
||||
* Revert "automatically trigger tests in releasing PR". (@kvaps in https://github.com/cozystack/cozystack/pull/900)
|
||||
* Force-update release branch on tagged main commits. (@kvaps in https://github.com/cozystack/cozystack/pull/977)
|
||||
* Show detailed errors in the `pull-request-release` workflow. (@lllamnyp in https://github.com/cozystack/cozystack/pull/992)
|
||||
|
||||
## Community and Maintenance
|
||||
|
||||
### Repository Maintenance
|
||||
|
||||
Added @klinch0 to CODEOWNERS. (@kvaps in https://github.com/cozystack/cozystack/pull/838)
|
||||
|
||||
### New Contributors
|
||||
|
||||
* @etoshutka made their first contribution in https://github.com/cozystack/cozystack/pull/872
|
||||
* @dtrdnk made their first contribution in https://github.com/cozystack/cozystack/pull/896
|
||||
* @zdenekjanda made their first contribution in https://github.com/cozystack/cozystack/pull/924
|
||||
* @gwynbleidd2106 made their first contribution in https://github.com/cozystack/cozystack/pull/962
|
||||
|
||||
## Full Changelog
|
||||
|
||||
See https://github.com/cozystack/cozystack/compare/v0.30.0...v0.31.0
|
||||
71
docs/changelogs/v0.32.0.md
Normal file
71
docs/changelogs/v0.32.0.md
Normal file
@@ -0,0 +1,71 @@
|
||||
Cozystack v0.32.0 is a significant release that brings new features, key fixes, and updates to underlying components.
|
||||
|
||||
## Major Features and Improvements
|
||||
|
||||
* [platform] Use `cozypkg` instead of Helm (@kvaps in https://github.com/cozystack/cozystack/pull/1057)
|
||||
* [platform] Introduce the HelmRelease reconciler for system components. (@kvaps in https://github.com/cozystack/cozystack/pull/1033)
|
||||
* [kubernetes] Enable using container registry mirrors by tenant Kubernetes clusters. Configure containerd for tenant Kubernetes clusters. (@klinch0 in https://github.com/cozystack/cozystack/pull/979, patched by @lllamnyp in https://github.com/cozystack/cozystack/pull/1032)
|
||||
* [platform] Allow users to specify CPU requests in VCPUs. Use a library chart for resource management. (@lllamnyp in https://github.com/cozystack/cozystack/pull/972 and https://github.com/cozystack/cozystack/pull/1025)
|
||||
* [platform] Annotate all child objects of apps with uniform labels for tracking by WorkloadMonitors. (@lllamnyp in https://github.com/cozystack/cozystack/pull/1018 and https://github.com/cozystack/cozystack/pull/1024)
|
||||
* [platform] Introduce `cluster-domain` option and un-hardcode `cozy.local`. (@kvaps in https://github.com/cozystack/cozystack/pull/1039)
|
||||
* [platform] Get instance type when reconciling WorkloadMonitor (https://github.com/cozystack/cozystack/pull/1030)
|
||||
* [virtual-machine] Add RBAC rules to allow port forwarding in KubeVirt for SSH via `virtctl`. (@mattia-eleuteri in https://github.com/cozystack/cozystack/pull/1027, patched by @klinch0 in https://github.com/cozystack/cozystack/pull/1028)
|
||||
* [monitoring] Add events and audit inputs (@kevin880202 in https://github.com/cozystack/cozystack/pull/948)
|
||||
|
||||
## Security
|
||||
|
||||
* Resolve a security problem that allowed tenant administrator to gain enhanced privileges outside the tenant. (@kvaps in https://github.com/cozystack/cozystack/pull/1062)
|
||||
|
||||
## Fixes
|
||||
|
||||
* [dashboard] Fix a number of issues in the Cozystack Dashboard (@kvaps in https://github.com/cozystack/cozystack/pull/1042)
|
||||
* [kafka] Specify minimal working resource presets. (@kvaps in https://github.com/cozystack/cozystack/pull/1040)
|
||||
* [cilium] Fixed Gateway API manifest. (@zdenekjanda in https://github.com/cozystack/cozystack/pull/1016)
|
||||
* [platform] Fix RBAC for annotating namespaces. (@kvaps in https://github.com/cozystack/cozystack/pull/1031)
|
||||
* [platform] Fix dependencies for paas-hosted bundle. (@kvaps in https://github.com/cozystack/cozystack/pull/1034)
|
||||
* [platform] Reduce system resource consumption by using lesser resource presets for VerticalPodAutoscaler, SeaweedFS, and KubeOVN. (@klinch0 in https://github.com/cozystack/cozystack/pull/1054)
|
||||
* [virtual-machine] Fix handling of cloudinit and ssh-key input for `virtual-machine` and `vm-instance` applications. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1019 and https://github.com/cozystack/cozystack/pull/1020)
|
||||
* [apps] Fix Clickhouse version parsing. (@kvaps in https://github.com/cozystack/cozystack/commit/28302e776e9d2bb8f424cf467619fa61d71ac49a)
|
||||
* [apps] Add resource quotas for PostgreSQL jobs and fix application readme generation check in CI. (@klinch0 in https://github.com/cozystack/cozystack/pull/1051)
|
||||
* [kube-ovn] Enable database health check. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
|
||||
* [kubernetes] Fix upstream issue by updating Kubevirt-CCM. (@kvaps in https://github.com/cozystack/cozystack/pull/1052)
|
||||
* [kubernetes] Fix resources and introduce a migration when upgrading tenant Kubernetes to v0.32.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1073)
|
||||
* [cluster-api] Add a missing migration for `capi-providers`. (@kvaps in https://github.com/cozystack/cozystack/pull/1072)
|
||||
|
||||
## Dependencies
|
||||
|
||||
* Introduce cozykpg, update to v1.1.0. (@kvaps in https://github.com/cozystack/cozystack/pull/1057 and https://github.com/cozystack/cozystack/pull/1063)
|
||||
* Update flux-operator to 0.22.0, Flux to 2.6.x. (@kingdonb in https://github.com/cozystack/cozystack/pull/1035)
|
||||
* Update Talos Linux to v1.10.3. (@kvaps in https://github.com/cozystack/cozystack/pull/1006)
|
||||
* Update Cilium to v1.17.4. (@kvaps in https://github.com/cozystack/cozystack/pull/1046)
|
||||
* Update MetalLB to v0.15.2. (@kvaps in https://github.com/cozystack/cozystack/pull/1045)
|
||||
* Update Kube-OVN to v1.13.13. (@kvaps in https://github.com/cozystack/cozystack/pull/1047)
|
||||
|
||||
## Documentation
|
||||
|
||||
* [Oracle Cloud Infrastructure installation guide](https://cozystack.io/docs/operations/talos/installation/oracle-cloud/). (@kvaps, @lllamnyp, and @NickVolynkin in https://github.com/cozystack/website/pull/168)
|
||||
* [Cluster configuration with `talosctl`](https://cozystack.io/docs/operations/talos/configuration/talosctl/). (@NickVolynkin in https://github.com/cozystack/website/pull/211)
|
||||
* [Configuring container registry mirrors for tenant Kubernetes clusters](https://cozystack.io/docs/operations/talos/configuration/air-gapped/#5-configure-container-registry-mirrors-for-tenant-kubernetes). (@klinch0 in https://github.com/cozystack/website/pull/210)
|
||||
* [Explain application management strategies and available versions for managed applications.](https://cozystack.io/docs/guides/applications/). (@NickVolynkin in https://github.com/cozystack/website/pull/219)
|
||||
* [How to clean up etcd state](https://cozystack.io/docs/operations/faq/#how-to-clean-up-etcd-state). (@gwynbleidd2106 in https://github.com/cozystack/website/pull/214)
|
||||
* [State that Cozystack is a CNCF Sandbox project](https://github.com/cozystack/cozystack?tab=readme-ov-file#cozystack). (@NickVolynkin in https://github.com/cozystack/cozystack/pull/1055)
|
||||
|
||||
## Development, Testing, and CI/CD
|
||||
|
||||
* [tests] Add tests for applications `virtual-machine`, `vm-disk`, `vm-instance`, `postgresql`, `mysql`, and `clickhouse`. (@gwynbleidd2106 in https://github.com/cozystack/cozystack/pull/1048, patched by @kvaps in https://github.com/cozystack/cozystack/pull/1074)
|
||||
* [tests] Fix concurrency for the `docker login` action. (@kvaps in https://github.com/cozystack/cozystack/pull/1014)
|
||||
* [tests] Increase QEMU system disk size in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1011)
|
||||
* [tests] Increase the waiting timeout for VMs in tests. (@kvaps in https://github.com/cozystack/cozystack/pull/1038)
|
||||
* [ci] Separate build and testing jobs in CI. (@kvaps in https://github.com/cozystack/cozystack/pull/1005 and https://github.com/cozystack/cozystack/pull/1010)
|
||||
* [ci] Fix the release assets. (@kvaps in https://github.com/cozystack/cozystack/pull/1006 and https://github.com/cozystack/cozystack/pull/1009)
|
||||
|
||||
## New Contributors
|
||||
|
||||
* @kevin880202 made their first contribution in https://github.com/cozystack/cozystack/pull/948
|
||||
* @mattia-eleuteri made their first contribution in https://github.com/cozystack/cozystack/pull/1027
|
||||
|
||||
**Full Changelog**: https://github.com/cozystack/cozystack/compare/v0.31.0...v0.32.0
|
||||
|
||||
<!--
|
||||
HEAD https://github.com/cozystack/cozystack/commit/3ce6dbe8
|
||||
-->
|
||||
166
docs/release.md
Normal file
166
docs/release.md
Normal file
@@ -0,0 +1,166 @@
|
||||
# Release Workflow
|
||||
|
||||
This document describes Cozystack’s release process.
|
||||
|
||||
## Introduction
|
||||
|
||||
Cozystack uses a staged release process to ensure stability and flexibility during development.
|
||||
|
||||
There are three types of releases:
|
||||
|
||||
- **Release Candidates (RC)** – Preview versions (e.g., `v0.42.0-rc.1`) used for final testing and validation.
|
||||
- **Regular Releases** – Final versions (e.g., `v0.42.0`) that are feature-complete and thoroughly tested.
|
||||
- **Patch Releases** – Bugfix-only updates (e.g., `v0.42.1`) made after a stable release, based on a dedicated release branch.
|
||||
|
||||
Each type plays a distinct role in delivering reliable and tested updates while allowing ongoing development to continue smoothly.
|
||||
|
||||
## Release Candidates
|
||||
|
||||
Release candidates are Cozystack versions that introduce new features and are published before a stable release.
|
||||
Their purpose is to help validate stability before finalizing a new feature release.
|
||||
They allow for final rounds of testing and bug fixes without freezing development.
|
||||
|
||||
Release candidates are given numbers `vX.Y.0-rc.N`, for example, `v0.42.0-rc.1`.
|
||||
They are created directly in the `main` branch.
|
||||
An RC is typically tagged when all major features for the upcoming release have been merged into main and the release enters its testing phase.
|
||||
However, new features and changes can still be added before the regular release `vX.Y.0`.
|
||||
|
||||
Each RC contributes to a cumulative set of release notes that will be finalized when `vX.Y.0` is released.
|
||||
After testing, if no critical issues remain, the regular release (`vX.Y.0`) is tagged from the last RC or a later commit in main.
|
||||
This begins the regular release process, creates a dedicated `release-X.Y` branch, and opens the way for patch releases.
|
||||
|
||||
## Regular Releases
|
||||
|
||||
When making a regular release, we tag the latest RC or a subsequent minimal-change commit as `vX.Y.0`.
|
||||
In this explanation, we'll use version `v0.42.0` as an example:
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
A regular release sequence starts in the following way:
|
||||
|
||||
1. Maintainer tags a commit in `main` with `v0.42.0` and pushes it to GitHub.
|
||||
2. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.0`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.0`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.0` with updated digests, pushes it to the new branch `release-0.42.0`, and opens a PR to `main`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.0` and uploads them to the release draft page.
|
||||
3. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3" tag: "v0.42.0"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Pull Request"
|
||||
```
|
||||
|
||||
When testing and editing are completed, the sequence goes on.
|
||||
|
||||
4. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.0`.
|
||||
5. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.0` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
6. The maintainer can now announce the release to the community.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "feature"
|
||||
commit id: "feature 2"
|
||||
commit id: "feature 3"
|
||||
branch release-0.42.0
|
||||
checkout release-0.42.0
|
||||
commit id: "Prepare release v0.42.0"
|
||||
checkout main
|
||||
merge release-0.42.0 id: "Release v0.42.0" tag: "v0.42.0"
|
||||
```
|
||||
|
||||
## Patch Releases
|
||||
|
||||
Making a patch release has a lot in common with a regular release, with a couple of differences:
|
||||
|
||||
* A release branch is used instead of `main`
|
||||
* Patch commits are cherry-picked to the release branch.
|
||||
* A pull request is opened against the release branch.
|
||||
|
||||
|
||||
Let's assume that we've released `v0.42.0` and that development is ongoing.
|
||||
We have introduced a couple of new features and some fixes to features that we have released
|
||||
in `v0.42.0`.
|
||||
|
||||
Once problems were found and fixed, a patch release is due.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
```
|
||||
|
||||
|
||||
1. The maintainer creates a release branch, `release-0.42,` and cherry-picks patch commits from `main` to `release-0.42`.
|
||||
These must be only patches to features that were present in version `v0.42.0`.
|
||||
|
||||
Cherry-picking can be done as soon as each patch is merged into `main`,
|
||||
or directly before the release.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2"
|
||||
```
|
||||
|
||||
When all relevant patch commits are cherry-picked, the branch is ready for release.
|
||||
|
||||
2. The maintainer tags the `HEAD` commit of branch `release-0.42` as `v0.42.1` and then pushes it to GitHub.
|
||||
3. CI workflow triggers on tag push:
|
||||
1. Creates a draft page for release `v0.42.1`, if it wasn't created before.
|
||||
2. Takes code from tag `v0.42.1`, builds images, and pushes them to ghcr.io.
|
||||
3. Makes a new commit `Prepare release v0.42.1` with updated digests, pushes it to the new branch `release-0.42.1`, and opens a PR to `release-0.42`.
|
||||
4. Builds Cozystack release assets from the new commit `Prepare release v0.42.1` and uploads them to the release draft page.
|
||||
4. Maintainer reviews PR, tests build artifacts, and edits changelogs on the release draft page.
|
||||
|
||||
```mermaid
|
||||
gitGraph
|
||||
commit id: "Release v0.42.0" tag: "v0.42.0"
|
||||
branch release-0.42
|
||||
checkout main
|
||||
commit id: "feature 4"
|
||||
commit id: "patch 1"
|
||||
commit id: "feature 5"
|
||||
commit id: "patch 2"
|
||||
checkout release-0.42
|
||||
cherry-pick id: "patch 1"
|
||||
cherry-pick id: "patch 2" tag: "v0.42.1"
|
||||
branch release-0.42.1
|
||||
commit id: "Prepare release v0.42.1"
|
||||
checkout release-0.42
|
||||
merge release-0.42.1 id: "Pull request"
|
||||
```
|
||||
|
||||
Finally, when release is confirmed, the release sequence goes on.
|
||||
|
||||
5. Maintainer merges the PR. GitHub removes the merged branch `release-0.42.1`.
|
||||
6. CI workflow triggers on merge:
|
||||
1. Moves the tag `v0.42.1` to the newly created merge commit by force-pushing a tag to GitHub.
|
||||
2. Publishes the release page (`draft` → `latest`).
|
||||
7. The maintainer can now announce the release to the community.
|
||||
15
go.mod
15
go.mod
@@ -1,6 +1,6 @@
|
||||
// This is a generated file. Do not edit directly.
|
||||
|
||||
module github.com/aenix-io/cozystack
|
||||
module github.com/cozystack/cozystack
|
||||
|
||||
go 1.23.0
|
||||
|
||||
@@ -37,6 +37,7 @@ require (
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fluxcd/pkg/apis/kustomize v1.6.1 // indirect
|
||||
@@ -91,14 +92,14 @@ require (
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/crypto v0.28.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/term v0.25.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
|
||||
28
go.sum
28
go.sum
@@ -26,8 +26,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
@@ -212,8 +212,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
@@ -222,26 +222,26 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
|
||||
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
117
hack/cozytest.sh
Executable file
117
hack/cozytest.sh
Executable file
@@ -0,0 +1,117 @@
|
||||
#!/bin/sh
|
||||
###############################################################################
|
||||
# cozytest.sh - Bats-compatible test runner with live trace and enhanced #
|
||||
# output, written in pure shell #
|
||||
###############################################################################
|
||||
set -eu
|
||||
|
||||
TEST_FILE=${1:?Usage: ./cozytest.sh <file.bats> [pattern]}
|
||||
PATTERN=${2:-*}
|
||||
LINE='----------------------------------------------------------------'
|
||||
|
||||
cols() { stty size 2>/dev/null | awk '{print $2}' || echo 80; }
|
||||
MAXW=$(( $(cols) - 12 )); [ "$MAXW" -lt 40 ] && MAXW=70
|
||||
BEGIN=$(date +%s)
|
||||
timestamp() { s=$(( $(date +%s) - BEGIN )); printf '[%02d:%02d]' $((s/60)) $((s%60)); }
|
||||
|
||||
###############################################################################
|
||||
# run_one <fn> <title> #
|
||||
###############################################################################
|
||||
run_one() {
|
||||
fn=$1 title=$2
|
||||
tmp=$(mktemp -d) || { echo "Failed to create temp directory" >&2; exit 1; }
|
||||
log="$tmp/log"
|
||||
|
||||
echo "╭ » Run test: $title"
|
||||
START=$(date +%s)
|
||||
skip_next="+ $fn" # первую строку трассировки с именем функции пропустим
|
||||
|
||||
{
|
||||
(
|
||||
PS4='+ ' # prefix for set -x
|
||||
set -eu -x # strict + trace
|
||||
"$fn"
|
||||
)
|
||||
printf '__RC__%s\n' "$?"
|
||||
} 2>&1 | tee "$log" | while IFS= read -r line; do
|
||||
case "$line" in
|
||||
'__RC__'*) : ;;
|
||||
'+ '*) cmd=${line#'+ '}
|
||||
[ "$cmd" = "${skip_next#+ }" ] && continue
|
||||
case "$cmd" in
|
||||
'set -e'|'set -x'|'set -u'|'return 0') continue ;;
|
||||
esac
|
||||
out=$cmd ;;
|
||||
*) out=$line ;;
|
||||
esac
|
||||
now=$(( $(date +%s) - START ))
|
||||
[ ${#out} -gt "$MAXW" ] && out="$(printf '%.*s…' "$MAXW" "$out")"
|
||||
printf '┊[%02d:%02d] %s\n' $((now/60)) $((now%60)) "$out"
|
||||
done
|
||||
|
||||
rc=$(awk '/^__RC__/ {print substr($0,7)}' "$log" | tail -n1)
|
||||
[ -z "$rc" ] && rc=1
|
||||
now=$(( $(date +%s) - START ))
|
||||
|
||||
if [ "$rc" -eq 0 ]; then
|
||||
printf '╰[%02d:%02d] ✅ Test OK: %s\n' $((now/60)) $((now%60)) "$title"
|
||||
else
|
||||
printf '╰[%02d:%02d] ❌ Test failed: %s (exit %s)\n' \
|
||||
$((now/60)) $((now%60)) "$title" "$rc"
|
||||
echo "----- captured output -----------------------------------------"
|
||||
grep -v '^__RC__' "$log"
|
||||
echo "$LINE"
|
||||
exit "$rc"
|
||||
fi
|
||||
|
||||
rm -rf "$tmp"
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
# convert .bats -> shell-functions #
|
||||
###############################################################################
|
||||
TMP_SH=$(mktemp) || { echo "Failed to create temp file" >&2; exit 1; }
|
||||
trap 'rm -f "$TMP_SH"' EXIT
|
||||
awk '
|
||||
/^@test[[:space:]]+"/ {
|
||||
line = substr($0, index($0, "\"") + 1)
|
||||
title = substr(line, 1, index(line, "\"") - 1)
|
||||
fname = "test_"
|
||||
for (i = 1; i <= length(title); i++) {
|
||||
c = substr(title, i, 1)
|
||||
fname = fname (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||
}
|
||||
printf("### %s\n", title)
|
||||
printf("%s() {\n", fname)
|
||||
print " set -e" # ошибка → падение теста
|
||||
next
|
||||
}
|
||||
/^}$/ {
|
||||
print " return 0" # если автор не сделал exit 1 — тест ОК
|
||||
print "}"
|
||||
next
|
||||
}
|
||||
{ print }
|
||||
' "$TEST_FILE" > "$TMP_SH"
|
||||
|
||||
[ -f "$TMP_SH" ] || { echo "Failed to generate test functions" >&2; exit 1; }
|
||||
# shellcheck disable=SC1090
|
||||
. "$TMP_SH"
|
||||
|
||||
###############################################################################
|
||||
# run selected tests #
|
||||
###############################################################################
|
||||
awk -v pat="$PATTERN" '
|
||||
/^### / {
|
||||
title = substr($0, 5)
|
||||
name = "test_"
|
||||
for (i = 1; i <= length(title); i++) {
|
||||
c = substr(title, i, 1)
|
||||
name = name (c ~ /[A-Za-z0-9]/ ? c : "_")
|
||||
}
|
||||
if (pat == "*" || index(title, pat) > 0)
|
||||
printf("%s %s\n", name, title)
|
||||
}
|
||||
' "$TMP_SH" | while IFS=' ' read -r fn title; do
|
||||
run_one "$fn" "$title"
|
||||
done
|
||||
42
hack/e2e-apps/clickhouse.bats
Normal file
42
hack/e2e-apps/clickhouse.bats
Normal file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB ClickHouse" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get clickhouses.apps.cozystack.io $name ||
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: ClickHouse
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
shards: 1
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
logTTL: 15
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/clickhouse-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr clickhouse-$name --timeout=20s --for=condition=ready
|
||||
timeout 180 sh -ec "until kubectl -n tenant-test get svc chendpoint-clickhouse-$name -o jsonpath='{.spec.ports[*].port}' | grep -q '8123 9000'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-0 --timeout=120s --for=jsonpath='{.status.replicas}'=1
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 100 sh -ec "until kubectl -n tenant-test get svc chi-clickhouse-$name-clickhouse-0-0 -o jsonpath='{.spec.ports[*].port}' | grep -q '9000 8123 9009'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get sts chi-clickhouse-$name-clickhouse-0-1 ; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/chi-clickhouse-$name-clickhouse-0-1 --timeout=140s --for=jsonpath='{.status.replicas}'=1
|
||||
}
|
||||
51
hack/e2e-apps/kafka.bats
Normal file
51
hack/e2e-apps/kafka.bats
Normal file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create Kafka" {
|
||||
name='test'
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kafka
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
kafka:
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
zookeeper:
|
||||
size: 5Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
resources:
|
||||
resourcesPreset: "nano"
|
||||
topics:
|
||||
- name: testResults
|
||||
partitions: 1
|
||||
replicas: 2
|
||||
config:
|
||||
min.insync.replicas: 2
|
||||
- name: testOrders
|
||||
config:
|
||||
cleanup.policy: compact
|
||||
segment.ms: 3600000
|
||||
max.compaction.lag.ms: 5400000
|
||||
min.insync.replicas: 2
|
||||
partitions: 1
|
||||
replicas: 2
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr kafka-$name --timeout=30s --for=condition=ready
|
||||
kubectl wait kafkas -n tenant-test test --timeout=60s --for=condition=ready
|
||||
timeout 60 sh -ec "until kubectl -n tenant-test get pvc data-kafka-$name-zookeeper-0; do sleep 10; done"
|
||||
kubectl -n tenant-test wait pvc data-kafka-$name-zookeeper-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-client -o jsonpath='{.spec.ports[0].port}' | grep -q '2181'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc kafka-$name-zookeeper-nodes -o jsonpath='{.spec.ports[*].port}' | grep -q '2181 2888 3888'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints kafka-$name-zookeeper-nodes -o jsonpath='{.subsets[*].addresses[0].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete kafka.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-0
|
||||
kubectl -n tenant-test delete pvc data-kafka-$name-zookeeper-1
|
||||
}
|
||||
73
hack/e2e-apps/kubernetes.bats
Normal file
73
hack/e2e-apps/kubernetes.bats
Normal file
@@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a tenant Kubernetes control plane" {
|
||||
kubectl -n tenant-test get kuberneteses.apps.cozystack.io test ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Kubernetes
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
addons:
|
||||
certManager:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
cilium:
|
||||
valuesOverride: {}
|
||||
fluxcd:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
gatewayAPI:
|
||||
enabled: false
|
||||
gpuOperator:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
ingressNginx:
|
||||
enabled: true
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
monitoringAgents:
|
||||
enabled: false
|
||||
valuesOverride: {}
|
||||
verticalPodAutoscaler:
|
||||
valuesOverride: {}
|
||||
controlPlane:
|
||||
apiServer:
|
||||
resources: {}
|
||||
resourcesPreset: small
|
||||
controllerManager:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
konnectivity:
|
||||
server:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
replicas: 2
|
||||
scheduler:
|
||||
resources: {}
|
||||
resourcesPreset: micro
|
||||
host: ""
|
||||
nodeGroups:
|
||||
md0:
|
||||
ephemeralStorage: 20Gi
|
||||
gpus: []
|
||||
instanceType: u1.medium
|
||||
maxReplicas: 10
|
||||
minReplicas: 0
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
roles:
|
||||
- ingress-nginx
|
||||
storageClass: replicated
|
||||
EOF
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
timeout 10 sh -ec 'until kubectl get kamajicontrolplane -n tenant-test kubernetes-test; do sleep 1; done'
|
||||
kubectl wait --for=condition=TenantControlPlaneCreated kamajicontrolplane -n tenant-test kubernetes-test --timeout=4m
|
||||
kubectl wait tcp -n tenant-test kubernetes-test --timeout=2m --for=jsonpath='{.status.kubernetesResources.version.status}'=Ready
|
||||
kubectl wait deploy --timeout=4m --for=condition=available -n tenant-test kubernetes-test kubernetes-test-cluster-autoscaler kubernetes-test-kccm kubernetes-test-kcsi-controller
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=1m --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl wait machinedeployment kubernetes-test-md0 -n tenant-test --timeout=10m --for=jsonpath='{.status.v1beta2.readyReplicas}'=2
|
||||
kubectl -n tenant-test delete kuberneteses.apps.cozystack.io test
|
||||
}
|
||||
47
hack/e2e-apps/mysql.bats
Normal file
47
hack/e2e-apps/mysql.bats
Normal file
@@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB MySQL" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get mysqls.apps.cozystack.io $name ||
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: MySQL
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
users:
|
||||
testuser:
|
||||
maxUserConnections: 1000
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr mysql-$name --timeout=30s --for=condition=ready
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name -o jsonpath='{.spec.ports[0].port}' | grep -q '3306'; do sleep 10; done"
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait statefulset.apps/mysql-$name --timeout=110s --for=jsonpath='{.status.replicas}'=2
|
||||
timeout 80 sh -ec "until kubectl -n tenant-test get svc mysql-$name-metrics -o jsonpath='{.spec.ports[0].port}' | grep -q '9104'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get endpoints mysql-$name-metrics -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test wait deployment.apps/mysql-$name-metrics --timeout=90s --for=jsonpath='{.status.replicas}'=1
|
||||
kubectl -n tenant-test delete mysqls.apps.cozystack.io $name
|
||||
}
|
||||
55
hack/e2e-apps/postgres.bats
Normal file
55
hack/e2e-apps/postgres.bats
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create DB PostgreSQL" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get postgreses.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Postgres
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 10Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
postgresql:
|
||||
parameters:
|
||||
max_connections: 100
|
||||
quorum:
|
||||
minSyncReplicas: 0
|
||||
maxSyncReplicas: 0
|
||||
users:
|
||||
testuser:
|
||||
password: xai7Wepo
|
||||
databases:
|
||||
testdb:
|
||||
roles:
|
||||
admin:
|
||||
- testuser
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
s3Bucket: s3.example.org/postgres-backups
|
||||
schedule: "0 2 * * *"
|
||||
cleanupStrategy: "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr postgres-$name --timeout=100s --for=condition=ready
|
||||
kubectl -n tenant-test wait job.batch postgres-$name-init-job --timeout=50s --for=condition=Complete
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-r -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-ro -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 40 sh -ec "until kubectl -n tenant-test get svc postgres-$name-rw -o jsonpath='{.spec.ports[0].port}' | grep -q '5432'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-r -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
# for some reason it takes longer for the read-only endpoint to be ready
|
||||
#timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-ro -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get endpoints postgres-$name-rw -o jsonpath='{.subsets[*].addresses[*].ip}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete postgreses.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete job.batch/postgres-$name-init-job
|
||||
}
|
||||
26
hack/e2e-apps/redis.bats
Normal file
26
hack/e2e-apps/redis.bats
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create Redis" {
|
||||
name='test'
|
||||
kubectl create -f- <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Redis
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
size: 1Gi
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
authEnabled: true
|
||||
resources: {}
|
||||
resourcesPreset: "nano"
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr redis-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc redisfailover-persistent-data-rfr-redis-$name-0 --timeout=50s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait deploy rfs-redis-$name --timeout=90s --for=condition=available
|
||||
kubectl -n tenant-test wait sts rfr-redis-$name --timeout=90s --for=jsonpath='{.status.replicas}'=2
|
||||
kubectl -n tenant-test delete redis.apps.cozystack.io $name
|
||||
}
|
||||
48
hack/e2e-apps/virtualmachine.bats
Normal file
48
hack/e2e-apps/virtualmachine.bats
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a Virtual Machine" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get virtualmachines.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VirtualMachine
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
systemDisk:
|
||||
image: ubuntu
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr virtual-machine-$name --timeout=10s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv virtual-machine-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc virtual-machine-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
kubectl -n tenant-test wait vm virtual-machine-$name --timeout=100s --for=condition=ready
|
||||
timeout 120 sh -ec "until kubectl -n tenant-test get vmi virtual-machine-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 10; done"
|
||||
kubectl -n tenant-test delete virtualmachines.apps.cozystack.io $name
|
||||
}
|
||||
70
hack/e2e-apps/vminstance.bats
Normal file
70
hack/e2e-apps/vminstance.bats
Normal file
@@ -0,0 +1,70 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Create a VM Disk" {
|
||||
name='test'
|
||||
kubectl -n tenant-test get vmdisks.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMDisk
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
source:
|
||||
http:
|
||||
url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
|
||||
optical: false
|
||||
storage: 5Gi
|
||||
storageClass: replicated
|
||||
EOF
|
||||
sleep 5
|
||||
kubectl -n tenant-test wait hr vm-disk-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait dv vm-disk-$name --timeout=150s --for=condition=ready
|
||||
kubectl -n tenant-test wait pvc vm-disk-$name --timeout=100s --for=jsonpath='{.status.phase}'=Bound
|
||||
}
|
||||
|
||||
@test "Create a VM Instance" {
|
||||
diskName='test'
|
||||
name='test'
|
||||
kubectl -n tenant-test get vminstances.apps.cozystack.io $name ||
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: VMInstance
|
||||
metadata:
|
||||
name: $name
|
||||
namespace: tenant-test
|
||||
spec:
|
||||
external: false
|
||||
externalMethod: PortList
|
||||
externalPorts:
|
||||
- 22
|
||||
running: true
|
||||
instanceType: "u1.medium"
|
||||
instanceProfile: ubuntu
|
||||
disks:
|
||||
- name: $diskName
|
||||
gpus: []
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
sshKeys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF
|
||||
test@test
|
||||
cloudInit: |
|
||||
#cloud-config
|
||||
users:
|
||||
- name: test
|
||||
shell: /bin/bash
|
||||
sudo: ['ALL=(ALL) NOPASSWD: ALL']
|
||||
groups: sudo
|
||||
ssh_authorized_keys:
|
||||
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPht0dPk5qQ+54g1hSX7A6AUxXJW5T6n/3d7Ga2F8gTF test@test
|
||||
cloudInitSeed: ""
|
||||
EOF
|
||||
sleep 5
|
||||
timeout 20 sh -ec "until kubectl -n tenant-test get vmi vm-instance-$name -o jsonpath='{.status.interfaces[0].ipAddress}' | grep -q '[0-9]'; do sleep 5; done"
|
||||
kubectl -n tenant-test wait hr vm-instance-$name --timeout=5s --for=condition=ready
|
||||
kubectl -n tenant-test wait vm vm-instance-$name --timeout=20s --for=condition=ready
|
||||
kubectl -n tenant-test delete vminstances.apps.cozystack.io $name
|
||||
kubectl -n tenant-test delete vmdisks.apps.cozystack.io $diskName
|
||||
}
|
||||
182
hack/e2e-install-cozystack.bats
Normal file
182
hack/e2e-install-cozystack.bats
Normal file
@@ -0,0 +1,182 @@
|
||||
#!/usr/bin/env bats
|
||||
|
||||
@test "Install Cozystack" {
|
||||
# Create namespace & configmap required by installer
|
||||
kubectl create namespace cozy-system --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl create configmap cozystack -n cozy-system \
|
||||
--from-literal=bundle-name=paas-full \
|
||||
--from-literal=ipv4-pod-cidr=10.244.0.0/16 \
|
||||
--from-literal=ipv4-pod-gateway=10.244.0.1 \
|
||||
--from-literal=ipv4-svc-cidr=10.96.0.0/16 \
|
||||
--from-literal=ipv4-join-cidr=100.64.0.0/16 \
|
||||
--from-literal=root-host=example.org \
|
||||
--from-literal=api-server-endpoint=https://192.168.123.10:6443 \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# Apply installer manifests from file
|
||||
kubectl apply -f _out/assets/cozystack-installer.yaml
|
||||
|
||||
# Wait for the installer deployment to become available
|
||||
kubectl wait deployment/cozystack -n cozy-system --timeout=1m --for=condition=Available
|
||||
|
||||
# Wait until HelmReleases appear & reconcile them
|
||||
timeout 60 sh -ec 'until kubectl get hr -A -l cozystack.io/system-app=true | grep -q cozys; do sleep 1; done'
|
||||
sleep 5
|
||||
kubectl get hr -A -l cozystack.io/system-app=true | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n "$1" hr/"$2" &"} END {print "wait"}' | sh -ex
|
||||
|
||||
# Fail the test if any HelmRelease is not Ready
|
||||
if kubectl get hr -A | grep -v " True " | grep -v NAME; then
|
||||
kubectl get hr -A
|
||||
fail "Some HelmReleases failed to reconcile"
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Wait for Cluster‑API provider deployments" {
|
||||
# Wait for Cluster‑API provider deployments
|
||||
timeout 60 sh -ec 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deployment/capi-controller-manager deployment/capi-kamaji-controller-manager deployment/capi-kubeadm-bootstrap-controller-manager deployment/capi-operator-cluster-api-operator deployment/capk-controller-manager -n cozy-cluster-api --timeout=1m --for=condition=available
|
||||
}
|
||||
|
||||
@test "Wait for LINSTOR and configure storage" {
|
||||
# Linstor controller and nodes
|
||||
kubectl wait deployment/linstor-controller -n cozy-linstor --timeout=5m --for=condition=available
|
||||
timeout 60 sh -ec 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) -eq 3 ]; do sleep 1; done'
|
||||
|
||||
created_pools=$(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor sp l -s data --pastable | awk '$2 == "data" {printf " " $4} END{printf " "}')
|
||||
for node in srv1 srv2 srv3; do
|
||||
case $created_pools in
|
||||
*" $node "*) echo "Storage pool 'data' already exists on node $node"; continue;;
|
||||
esac
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs ${node} /dev/vdc --pool-name data --storage-pool data
|
||||
done
|
||||
|
||||
# Storage classes
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: Immediate
|
||||
allowVolumeExpansion: true
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Wait for MetalLB and configure address pool" {
|
||||
# MetalLB address pool
|
||||
kubectl apply -f - <<'EOF'
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools: [cozystack]
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses: [192.168.123.200-192.168.123.250]
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOF
|
||||
}
|
||||
|
||||
@test "Check Cozystack API service" {
|
||||
kubectl wait --for=condition=Available apiservices/v1alpha1.apps.cozystack.io --timeout=2m
|
||||
}
|
||||
|
||||
@test "Configure Tenant and wait for applications" {
|
||||
# Patch root tenant and wait for its releases
|
||||
kubectl patch tenants/root -n tenant-root --type merge -p '{"spec":{"host":"example.org","ingress":true,"monitoring":true,"etcd":true,"isolated":true}}'
|
||||
|
||||
timeout 60 sh -ec 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/etcd hr/ingress hr/tenant-root -n tenant-root --timeout=2m --for=condition=ready
|
||||
|
||||
if ! kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready; then
|
||||
flux reconcile hr monitoring -n tenant-root --force
|
||||
kubectl wait hr/monitoring -n tenant-root --timeout=2m --for=condition=ready
|
||||
fi
|
||||
|
||||
# Expose Cozystack services through ingress
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"expose-services":"api,dashboard,cdi-uploadproxy,vm-exportproxy,keycloak"}}'
|
||||
|
||||
# NGINX ingress controller
|
||||
timeout 60 sh -ec 'until kubectl get deploy root-ingress-controller -n tenant-root >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait deploy/root-ingress-controller -n tenant-root --timeout=5m --for=condition=available
|
||||
|
||||
# etcd statefulset
|
||||
kubectl wait sts/etcd -n tenant-root --for=jsonpath='{.status.readyReplicas}'=3 --timeout=5m
|
||||
|
||||
# VictoriaMetrics components
|
||||
kubectl wait vmalert/vmalert-shortterm vmalertmanager/alertmanager -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vlogs/generic -n tenant-root --for=jsonpath='{.status.updateStatus}'=operational --timeout=5m
|
||||
kubectl wait vmcluster/shortterm vmcluster/longterm -n tenant-root --for=jsonpath='{.status.clusterStatus}'=operational --timeout=5m
|
||||
|
||||
# Grafana
|
||||
kubectl wait clusters.postgresql.cnpg.io/grafana-db -n tenant-root --for=condition=ready --timeout=5m
|
||||
kubectl wait deploy/grafana-deployment -n tenant-root --for=condition=available --timeout=5m
|
||||
|
||||
# Verify Grafana via ingress
|
||||
ingress_ip=$(kubectl get svc root-ingress-controller -n tenant-root -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
if ! curl -sS -k "https://${ingress_ip}" -H 'Host: grafana.example.org' --max-time 30 | grep -q Found; then
|
||||
echo "Failed to access Grafana via ingress at ${ingress_ip}" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Keycloak OIDC stack is healthy" {
|
||||
kubectl patch configmap/cozystack -n cozy-system --type merge -p '{"data":{"oidc-enabled":"true"}}'
|
||||
|
||||
timeout 120 sh -ec 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator >/dev/null 2>&1; do sleep 1; done'
|
||||
kubectl wait hr/keycloak hr/keycloak-configure hr/keycloak-operator -n cozy-keycloak --timeout=10m --for=condition=ready
|
||||
}
|
||||
|
||||
@test "Create tenant with isolated mode enabled" {
|
||||
kubectl -n tenant-root get tenants.apps.cozystack.io test ||
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: apps.cozystack.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: test
|
||||
namespace: tenant-root
|
||||
spec:
|
||||
etcd: false
|
||||
host: ""
|
||||
ingress: false
|
||||
isolated: true
|
||||
monitoring: false
|
||||
resourceQuotas: {}
|
||||
seaweedfs: false
|
||||
EOF
|
||||
kubectl wait hr/tenant-test -n tenant-root --timeout=1m --for=condition=ready
|
||||
kubectl wait namespace tenant-test --timeout=20s --for=jsonpath='{.status.phase}'=Active
|
||||
}
|
||||
235
hack/e2e-prepare-cluster.bats
Normal file
235
hack/e2e-prepare-cluster.bats
Normal file
@@ -0,0 +1,235 @@
|
||||
#!/usr/bin/env bats
|
||||
# -----------------------------------------------------------------------------
|
||||
# Cozystack end‑to‑end provisioning test (Bats)
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
@test "Required installer assets exist" {
|
||||
if [ ! -f _out/assets/cozystack-installer.yaml ]; then
|
||||
echo "Missing: _out/assets/cozystack-installer.yaml" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing: _out/assets/nocloud-amd64.raw.xz" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "IPv4 forwarding is enabled" {
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is disabled!" >&2
|
||||
echo >&2
|
||||
echo "Enable it with:" >&2
|
||||
echo " echo 1 > /proc/sys/net/ipv4/ip_forward" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@test "Clean previous VMs" {
|
||||
kill $(cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid 2>/dev/null) 2>/dev/null || true
|
||||
rm -rf srv1 srv2 srv3
|
||||
}
|
||||
|
||||
@test "Prepare networking and masquerading" {
|
||||
ip link del cozy-br0 2>/dev/null || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip address add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Masquerading rule – idempotent (delete first, then add)
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
}
|
||||
|
||||
@test "Prepare cloud‑init drive for VMs" {
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Generate cloud‑init ISOs
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv${i}" > "srv${i}/meta-data"
|
||||
|
||||
cat > "srv${i}/user-data" <<'EOF'
|
||||
#cloud-config
|
||||
EOF
|
||||
|
||||
cat > "srv${i}/network-config" <<EOF
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1${i}/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOF
|
||||
|
||||
( cd "srv${i}" && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config )
|
||||
done
|
||||
}
|
||||
|
||||
@test "Use Talos NoCloud image from assets" {
|
||||
if [ ! -f _out/assets/nocloud-amd64.raw.xz ]; then
|
||||
echo "Missing _out/assets/nocloud-amd64.raw.xz" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f nocloud-amd64.raw
|
||||
cp _out/assets/nocloud-amd64.raw.xz .
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
}
|
||||
|
||||
@test "Prepare VM disks" {
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv${i}/system.img
|
||||
qemu-img resize srv${i}/system.img 50G
|
||||
qemu-img create srv${i}/data.img 100G
|
||||
done
|
||||
}
|
||||
|
||||
@test "Create tap devices" {
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv${i} 2>/dev/null || true
|
||||
ip tuntap add dev cozy-srv${i} mode tap
|
||||
ip link set cozy-srv${i} up
|
||||
ip link set cozy-srv${i} master cozy-br0
|
||||
done
|
||||
}
|
||||
|
||||
@test "Boot QEMU VMs" {
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 8 -m 24576 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5${i} \
|
||||
-netdev tap,id=net0,ifname=cozy-srv${i},script=no,downscript=no \
|
||||
-drive file=srv${i}/system.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv${i}/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv${i}/qemu.pid
|
||||
done
|
||||
|
||||
# Give qemu a few seconds to start up networking
|
||||
sleep 5
|
||||
}
|
||||
|
||||
@test "Wait until Talos API port 50000 is reachable on all machines" {
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Generate Talos cluster configuration" {
|
||||
# Cluster‑wide patches
|
||||
cat > patch.yaml <<'EOF'
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
registries:
|
||||
mirrors:
|
||||
docker.io:
|
||||
endpoints:
|
||||
- https://mirror.gcr.io
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.cri.v1.runtime"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOF
|
||||
|
||||
# Control‑plane‑only patches
|
||||
cat > patch-controlplane.yaml <<'EOF'
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOF
|
||||
|
||||
# Generate secrets once
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 \
|
||||
--config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
}
|
||||
|
||||
@test "Apply Talos configuration to the node" {
|
||||
# Apply the configuration to all three nodes
|
||||
for node in 11 12 13; do
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.${node} -e 192.168.123.${node} -i
|
||||
done
|
||||
|
||||
# Wait for Talos services to come up again
|
||||
timeout 60 sh -ec 'until nc -nz 192.168.123.11 50000 && nc -nz 192.168.123.12 50000 && nc -nz 192.168.123.13 50000; do sleep 1; done'
|
||||
}
|
||||
|
||||
@test "Bootstrap Talos cluster" {
|
||||
# Bootstrap etcd on the first node
|
||||
timeout 10 sh -ec 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait until etcd is healthy
|
||||
timeout 180 sh -ec 'until talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 >/dev/null 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -ec 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep -q "rpc error"; do sleep 1; done'
|
||||
|
||||
# Retrieve kubeconfig
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
|
||||
# Wait until all three nodes register in Kubernetes
|
||||
timeout 60 sh -ec 'until [ $(kubectl get node --no-headers | wc -l) -eq 3 ]; do sleep 1; done'
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
RESET='\033[0m'
|
||||
YELLOW='\033[0;33m'
|
||||
|
||||
|
||||
ROOT_NS="tenant-root"
|
||||
TEST_TENANT="tenant-e2e"
|
||||
|
||||
values_base_path="/hack/testdata/"
|
||||
checks_base_path="/hack/testdata/"
|
||||
|
||||
function delete_hr() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$release_name" == "tenant-e2e" ]]; then
|
||||
echo -e "${YELLOW}Skipping deletion for release tenant-e2e.${RESET}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
kubectl delete helmrelease $release_name -n $namespace
|
||||
}
|
||||
|
||||
function install_helmrelease() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local chart_path="$3"
|
||||
local repo_name="$4"
|
||||
local repo_ns="$5"
|
||||
local values_file="$6"
|
||||
|
||||
if [[ -z "$release_name" ]]; then
|
||||
echo -e "${RED}Error: Release name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$namespace" ]]; then
|
||||
echo -e "${RED}Error: Namespace name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$chart_path" ]]; then
|
||||
echo -e "${RED}Error: Chart path name is required.${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -n "$values_file" && -f "$values_file" ]]; then
|
||||
local values_section
|
||||
values_section=$(echo " values:" && sed 's/^/ /' "$values_file")
|
||||
fi
|
||||
|
||||
local helmrelease_file=$(mktemp /tmp/HelmRelease.XXXXXX.yaml)
|
||||
{
|
||||
echo "apiVersion: helm.toolkit.fluxcd.io/v2"
|
||||
echo "kind: HelmRelease"
|
||||
echo "metadata:"
|
||||
echo " labels:"
|
||||
echo " cozystack.io/ui: \"true\""
|
||||
echo " name: \"$release_name\""
|
||||
echo " namespace: \"$namespace\""
|
||||
echo "spec:"
|
||||
echo " chart:"
|
||||
echo " spec:"
|
||||
echo " chart: \"$chart_path\""
|
||||
echo " reconcileStrategy: Revision"
|
||||
echo " sourceRef:"
|
||||
echo " kind: HelmRepository"
|
||||
echo " name: \"$repo_name\""
|
||||
echo " namespace: \"$repo_ns\""
|
||||
echo " version: '*'"
|
||||
echo " interval: 1m0s"
|
||||
echo " timeout: 5m0s"
|
||||
[[ -n "$values_section" ]] && echo "$values_section"
|
||||
} > "$helmrelease_file"
|
||||
|
||||
kubectl apply -f "$helmrelease_file"
|
||||
|
||||
rm -f "$helmrelease_file"
|
||||
}
|
||||
|
||||
function install_tenant (){
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local values_file="${values_base_path}tenant/values.yaml"
|
||||
local repo_name="cozystack-apps"
|
||||
local repo_ns="cozy-public"
|
||||
install_helmrelease "$release_name" "$namespace" "tenant" "$repo_name" "$repo_ns" "$values_file"
|
||||
}
|
||||
|
||||
function make_extra_checks(){
|
||||
local checks_file="$1"
|
||||
echo "after exec make $checks_file"
|
||||
if [[ -n "$checks_file" && -f "$checks_file" ]]; then
|
||||
echo -e "${YELLOW}Start extra checks with file: ${checks_file}${RESET}"
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
function check_helmrelease_status() {
|
||||
local release_name="$1"
|
||||
local namespace="$2"
|
||||
local checks_file="$3"
|
||||
local timeout=300 # Timeout in seconds
|
||||
local interval=5 # Interval between checks in seconds
|
||||
local elapsed=0
|
||||
|
||||
|
||||
while [[ $elapsed -lt $timeout ]]; do
|
||||
local status_output
|
||||
status_output=$(kubectl get helmrelease "$release_name" -n "$namespace" -o json | jq -r '.status.conditions[-1].reason')
|
||||
|
||||
if [[ "$status_output" == "InstallSucceeded" || "$status_output" == "UpgradeSucceeded" ]]; then
|
||||
echo -e "${GREEN}Helm release '$release_name' is ready.${RESET}"
|
||||
make_extra_checks "$checks_file"
|
||||
delete_hr $release_name $namespace
|
||||
return 0
|
||||
elif [[ "$status_output" == "InstallFailed" ]]; then
|
||||
echo -e "${RED}Helm release '$release_name': InstallFailed${RESET}"
|
||||
exit 1
|
||||
else
|
||||
echo -e "${YELLOW}Helm release '$release_name' is not ready. Current status: $status_output${RESET}"
|
||||
fi
|
||||
|
||||
sleep "$interval"
|
||||
elapsed=$((elapsed + interval))
|
||||
done
|
||||
|
||||
echo -e "${RED}Timeout reached. Helm release '$release_name' is still not ready after $timeout seconds.${RESET}"
|
||||
exit 1
|
||||
}
|
||||
|
||||
chart_name="$1"
|
||||
|
||||
if [ -z "$chart_name" ]; then
|
||||
echo -e "${RED}No chart name provided. Exiting...${RESET}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
checks_file="${checks_base_path}${chart_name}/check.sh"
|
||||
repo_name="cozystack-apps"
|
||||
repo_ns="cozy-public"
|
||||
release_name="$chart_name-e2e"
|
||||
values_file="${values_base_path}${chart_name}/values.yaml"
|
||||
|
||||
install_tenant $TEST_TENANT $ROOT_NS
|
||||
check_helmrelease_status $TEST_TENANT $ROOT_NS "${checks_base_path}tenant/check.sh"
|
||||
|
||||
echo -e "${YELLOW}Running tests for chart: $chart_name${RESET}"
|
||||
|
||||
install_helmrelease $release_name $TEST_TENANT $chart_name $repo_name $repo_ns $values_file
|
||||
check_helmrelease_status $release_name $TEST_TENANT $checks_file
|
||||
351
hack/e2e.sh
351
hack/e2e.sh
@@ -1,351 +0,0 @@
|
||||
#!/bin/bash
|
||||
if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then
|
||||
echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2
|
||||
echo 'please set it with following command:' >&2
|
||||
echo >&2
|
||||
echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$(cat /proc/sys/net/ipv4/ip_forward)" != 1 ]; then
|
||||
echo "IPv4 forwarding is not enabled!" >&2
|
||||
echo 'please enable forwarding with the following command:' >&2
|
||||
echo >&2
|
||||
echo 'echo 1 > /proc/sys/net/ipv4/ip_forward' >&2
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true
|
||||
|
||||
ip link del cozy-br0 || true
|
||||
ip link add cozy-br0 type bridge
|
||||
ip link set cozy-br0 up
|
||||
ip addr add 192.168.123.1/24 dev cozy-br0
|
||||
|
||||
# Enable masquerading
|
||||
iptables -t nat -D POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE 2>/dev/null || true
|
||||
iptables -t nat -A POSTROUTING -s 192.168.123.0/24 ! -d 192.168.123.0/24 -j MASQUERADE
|
||||
|
||||
rm -rf srv1 srv2 srv3
|
||||
mkdir -p srv1 srv2 srv3
|
||||
|
||||
# Prepare cloud-init
|
||||
for i in 1 2 3; do
|
||||
echo "hostname: srv$i" > "srv$i/meta-data"
|
||||
echo '#cloud-config' > "srv$i/user-data"
|
||||
cat > "srv$i/network-config" <<EOT
|
||||
version: 2
|
||||
ethernets:
|
||||
eth0:
|
||||
dhcp4: false
|
||||
addresses:
|
||||
- "192.168.123.1$i/26"
|
||||
gateway4: "192.168.123.1"
|
||||
nameservers:
|
||||
search: [cluster.local]
|
||||
addresses: [8.8.8.8]
|
||||
EOT
|
||||
|
||||
( cd srv$i && genisoimage \
|
||||
-output seed.img \
|
||||
-volid cidata -rational-rock -joliet \
|
||||
user-data meta-data network-config
|
||||
)
|
||||
done
|
||||
|
||||
# Prepare system drive
|
||||
if [ ! -f nocloud-amd64.raw ]; then
|
||||
wget https://github.com/aenix-io/cozystack/releases/latest/download/nocloud-amd64.raw.xz -O nocloud-amd64.raw.xz
|
||||
rm -f nocloud-amd64.raw
|
||||
xz --decompress nocloud-amd64.raw.xz
|
||||
fi
|
||||
for i in 1 2 3; do
|
||||
cp nocloud-amd64.raw srv$i/system.img
|
||||
qemu-img resize srv$i/system.img 20G
|
||||
done
|
||||
|
||||
# Prepare data drives
|
||||
for i in 1 2 3; do
|
||||
qemu-img create srv$i/data.img 100G
|
||||
done
|
||||
|
||||
# Prepare networking
|
||||
for i in 1 2 3; do
|
||||
ip link del cozy-srv$i || true
|
||||
ip tuntap add dev cozy-srv$i mode tap
|
||||
ip link set cozy-srv$i up
|
||||
ip link set cozy-srv$i master cozy-br0
|
||||
done
|
||||
|
||||
# Start VMs
|
||||
for i in 1 2 3; do
|
||||
qemu-system-x86_64 -machine type=pc,accel=kvm -cpu host -smp 4 -m 8192 \
|
||||
-device virtio-net,netdev=net0,mac=52:54:00:12:34:5$i -netdev tap,id=net0,ifname=cozy-srv$i,script=no,downscript=no \
|
||||
-drive file=srv$i/system.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/seed.img,if=virtio,format=raw \
|
||||
-drive file=srv$i/data.img,if=virtio,format=raw \
|
||||
-display none -daemonize -pidfile srv$i/qemu.pid
|
||||
done
|
||||
|
||||
sleep 5
|
||||
|
||||
# Wait for VM to start up
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
cat > patch.yaml <<\EOT
|
||||
machine:
|
||||
kubelet:
|
||||
nodeIP:
|
||||
validSubnets:
|
||||
- 192.168.123.0/24
|
||||
extraConfig:
|
||||
maxPods: 512
|
||||
kernel:
|
||||
modules:
|
||||
- name: openvswitch
|
||||
- name: drbd
|
||||
parameters:
|
||||
- usermode_helper=disabled
|
||||
- name: zfs
|
||||
- name: spl
|
||||
files:
|
||||
- content: |
|
||||
[plugins]
|
||||
[plugins."io.containerd.grpc.v1.cri"]
|
||||
device_ownership_from_security_context = true
|
||||
path: /etc/cri/conf.d/20-customization.part
|
||||
op: create
|
||||
|
||||
cluster:
|
||||
apiServer:
|
||||
extraArgs:
|
||||
oidc-issuer-url: "https://keycloak.example.org/realms/cozy"
|
||||
oidc-client-id: "kubernetes"
|
||||
oidc-username-claim: "preferred_username"
|
||||
oidc-groups-claim: "groups"
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
dnsDomain: cozy.local
|
||||
podSubnets:
|
||||
- 10.244.0.0/16
|
||||
serviceSubnets:
|
||||
- 10.96.0.0/16
|
||||
EOT
|
||||
|
||||
cat > patch-controlplane.yaml <<\EOT
|
||||
machine:
|
||||
nodeLabels:
|
||||
node.kubernetes.io/exclude-from-external-load-balancers:
|
||||
$patch: delete
|
||||
network:
|
||||
interfaces:
|
||||
- interface: eth0
|
||||
vip:
|
||||
ip: 192.168.123.10
|
||||
cluster:
|
||||
allowSchedulingOnControlPlanes: true
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
scheduler:
|
||||
extraArgs:
|
||||
bind-address: 0.0.0.0
|
||||
apiServer:
|
||||
certSANs:
|
||||
- 127.0.0.1
|
||||
proxy:
|
||||
disabled: true
|
||||
discovery:
|
||||
enabled: false
|
||||
etcd:
|
||||
advertisedSubnets:
|
||||
- 192.168.123.0/24
|
||||
EOT
|
||||
|
||||
# Gen configuration
|
||||
if [ ! -f secrets.yaml ]; then
|
||||
talosctl gen secrets
|
||||
fi
|
||||
|
||||
rm -f controlplane.yaml worker.yaml talosconfig kubeconfig
|
||||
talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml
|
||||
export TALOSCONFIG=$PWD/talosconfig
|
||||
|
||||
# Apply configuration
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i
|
||||
talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i
|
||||
|
||||
# Wait for VM to be configured
|
||||
timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done'
|
||||
|
||||
# Bootstrap
|
||||
timeout 10 sh -c 'until talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11; do sleep 1; done'
|
||||
|
||||
# Wait for etcd
|
||||
timeout 180 sh -c 'until timeout -s 9 2 talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1; do sleep 1; done'
|
||||
timeout 60 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done'
|
||||
|
||||
rm -f kubeconfig
|
||||
talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10
|
||||
export KUBECONFIG=$PWD/kubeconfig
|
||||
|
||||
# Wait for kubernetes nodes appear
|
||||
timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done'
|
||||
kubectl create ns cozy-system -o yaml | kubectl apply -f -
|
||||
kubectl create -f - <<\EOT
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
data:
|
||||
bundle-name: "paas-full"
|
||||
ipv4-pod-cidr: "10.244.0.0/16"
|
||||
ipv4-pod-gateway: "10.244.0.1"
|
||||
ipv4-svc-cidr: "10.96.0.0/16"
|
||||
ipv4-join-cidr: "100.64.0.0/16"
|
||||
root-host: example.org
|
||||
api-server-endpoint: https://192.168.123.10:6443
|
||||
EOT
|
||||
|
||||
#
|
||||
echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f -
|
||||
|
||||
# wait for cozystack pod to start
|
||||
kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack
|
||||
|
||||
# wait for helmreleases appear
|
||||
timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done'
|
||||
|
||||
sleep 5
|
||||
|
||||
kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x
|
||||
|
||||
# Wait for Cluster-API providers
|
||||
timeout 30 sh -c 'until kubectl get deploy -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager; do sleep 1; done'
|
||||
kubectl wait deploy --timeout=30s --for=condition=available -n cozy-cluster-api capi-controller-manager capi-kamaji-controller-manager capi-kubeadm-bootstrap-controller-manager capi-operator-cluster-api-operator capk-controller-manager
|
||||
|
||||
# Wait for linstor controller
|
||||
kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller
|
||||
|
||||
# Wait for all linstor nodes become Online
|
||||
timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done'
|
||||
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data
|
||||
kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data
|
||||
|
||||
kubectl create -f- <<EOT
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/layerList: "storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "false"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: replicated
|
||||
provisioner: linstor.csi.linbit.com
|
||||
parameters:
|
||||
linstor.csi.linbit.com/storagePool: "data"
|
||||
linstor.csi.linbit.com/autoPlace: "3"
|
||||
linstor.csi.linbit.com/layerList: "drbd storage"
|
||||
linstor.csi.linbit.com/allowRemoteVolumeAccess: "true"
|
||||
property.linstor.csi.linbit.com/DrbdOptions/auto-quorum: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-no-data-accessible: suspend-io
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Resource/on-suspended-primary-outdated: force-secondary
|
||||
property.linstor.csi.linbit.com/DrbdOptions/Net/rr-conflict: retry-connect
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
allowVolumeExpansion: true
|
||||
EOT
|
||||
kubectl create -f- <<EOT
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- cozystack
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-metallb
|
||||
spec:
|
||||
addresses:
|
||||
- 192.168.123.200-192.168.123.250
|
||||
autoAssign: true
|
||||
avoidBuggyIPs: false
|
||||
EOT
|
||||
|
||||
# Wait for cozystack-api
|
||||
kubectl wait --for=condition=Available apiservices v1alpha1.apps.cozystack.io --timeout=2m
|
||||
|
||||
kubectl patch -n tenant-root tenants.apps.cozystack.io root --type=merge -p '{"spec":{
|
||||
"host": "example.org",
|
||||
"ingress": true,
|
||||
"monitoring": true,
|
||||
"etcd": true,
|
||||
"isolated": true
|
||||
}}'
|
||||
|
||||
# Wait for HelmRelease be created
|
||||
timeout 60 sh -c 'until kubectl get hr -n tenant-root etcd ingress monitoring tenant-root; do sleep 1; done'
|
||||
|
||||
# Wait for HelmReleases be installed
|
||||
kubectl wait --timeout=2m --for=condition=ready -n tenant-root hr etcd ingress monitoring tenant-root
|
||||
|
||||
kubectl patch -n tenant-root ingresses.apps.cozystack.io ingress --type=merge -p '{"spec":{
|
||||
"dashboard": true
|
||||
}}'
|
||||
|
||||
# Wait for nginx-ingress-controller
|
||||
timeout 60 sh -c 'until kubectl get deploy -n tenant-root root-ingress-controller; do sleep 1; done'
|
||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy root-ingress-controller
|
||||
|
||||
# Wait for etcd
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.readyReplicas=3 -n tenant-root sts etcd
|
||||
|
||||
# Wait for Victoria metrics
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.updateStatus=operational -n tenant-root vmalert/vmalert-shortterm vmalertmanager/alertmanager
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.status=operational -n tenant-root vlogs/generic
|
||||
kubectl wait --timeout=5m --for=jsonpath=.status.clusterStatus=operational -n tenant-root vmcluster/shortterm vmcluster/longterm
|
||||
|
||||
# Wait for grafana
|
||||
kubectl wait --timeout=5m --for=condition=ready -n tenant-root clusters.postgresql.cnpg.io grafana-db
|
||||
kubectl wait --timeout=5m --for=condition=available -n tenant-root deploy grafana-deployment
|
||||
|
||||
# Get IP of nginx-ingress
|
||||
ip=$(kubectl get svc -n tenant-root root-ingress-controller -o jsonpath='{.status.loadBalancer.ingress..ip}')
|
||||
|
||||
# Check Grafana
|
||||
curl -sS -k "https://$ip" -H 'Host: grafana.example.org' | grep Found
|
||||
|
||||
|
||||
# Test OIDC
|
||||
kubectl patch -n cozy-system cm/cozystack --type=merge -p '{"data":{
|
||||
"oidc-enabled": "true"
|
||||
}}'
|
||||
|
||||
timeout 60 sh -c 'until kubectl get hr -n cozy-keycloak keycloak keycloak-configure keycloak-operator; do sleep 1; done'
|
||||
kubectl wait --timeout=10m --for=condition=ready -n cozy-keycloak hr keycloak keycloak-configure keycloak-operator
|
||||
@@ -1,12 +1,13 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
file=versions_map
|
||||
|
||||
charts=$(find . -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")')
|
||||
|
||||
# <chart> <version> <commit>
|
||||
new_map=$(
|
||||
for chart in $charts; do
|
||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' $chart/Chart.yaml
|
||||
awk '/^name:/ {chart=$2} /^version:/ {version=$2} END{printf "%s %s %s\n", chart, version, "HEAD"}' "$chart/Chart.yaml"
|
||||
done
|
||||
)
|
||||
|
||||
@@ -15,48 +16,49 @@ if [ ! -f "$file" ] || [ ! -s "$file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
miss_map=$(echo "$new_map" | awk 'NR==FNR { new_map[$1 " " $2] = $3; next } { if (!($1 " " $2 in new_map)) print $1, $2, $3}' - $file)
|
||||
miss_map=$(mktemp)
|
||||
trap 'rm -f "$miss_map"' EXIT
|
||||
echo -n "$new_map" | awk 'NR==FNR { nm[$1 " " $2] = $3; next } { if (!($1 " " $2 in nm)) print $1, $2, $3}' - "$file" > $miss_map
|
||||
|
||||
# search accross all tags sorted by version
|
||||
search_commits=$(git ls-remote --tags origin | awk -F/ '$3 ~ /v[0-9]+.[0-9]+.[0-9]+/ {print}' | sort -k2,2 -rV | awk '{print $1}')
|
||||
|
||||
resolved_miss_map=$(
|
||||
echo "$miss_map" | while read chart version commit; do
|
||||
if [ "$commit" = HEAD ]; then
|
||||
line=$(awk '/^version:/ {print NR; exit}' "./$chart/Chart.yaml")
|
||||
change_commit=$(git --no-pager blame -L"$line",+1 -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
|
||||
if [ "$change_commit" = "00000000" ]; then
|
||||
# Not committed yet, use previous commit
|
||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
||||
commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
if [ $(echo $commit | cut -c1) = "^" ]; then
|
||||
# Previous commit not exists
|
||||
commit=$(echo $commit | cut -c2-)
|
||||
fi
|
||||
else
|
||||
# Committed, but version_map wasn't updated
|
||||
line=$(git show HEAD:"./$chart/Chart.yaml" | awk '/^version:/ {print NR; exit}')
|
||||
change_commit=$(git --no-pager blame -L"$line",+1 HEAD -- "$chart/Chart.yaml" | awk '{print $1}')
|
||||
if [ $(echo $change_commit | cut -c1) = "^" ]; then
|
||||
# Previous commit not exists
|
||||
commit=$(echo $change_commit | cut -c2-)
|
||||
else
|
||||
commit=$(git describe --always "$change_commit~1")
|
||||
fi
|
||||
while read -r chart version commit; do
|
||||
# if version is found in HEAD, it's HEAD
|
||||
if [ "$(awk '$1 == "version:" {print $2}' ./${chart}/Chart.yaml)" = "${version}" ]; then
|
||||
echo "$chart $version HEAD"
|
||||
continue
|
||||
fi
|
||||
|
||||
# if commit is not HEAD, check if it's valid
|
||||
if [ "$commit" != "HEAD" ]; then
|
||||
if [ "$(git show "${commit}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" != "${version}" ]; then
|
||||
echo "Commit $commit for $chart $version is not valid" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if the commit belongs to the main branch
|
||||
if ! git merge-base --is-ancestor "$commit" main; then
|
||||
# Find the closest parent commit that belongs to main
|
||||
commit_in_main=$(git log --pretty=format:"%h" main -- "$chart" | head -n 1)
|
||||
if [ -n "$commit_in_main" ]; then
|
||||
commit="$commit_in_main"
|
||||
else
|
||||
# No valid commit found in main branch for $chart, skipping..."
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
commit=$(git rev-parse --short "$commit")
|
||||
echo "$chart $version $commit"
|
||||
continue
|
||||
fi
|
||||
echo "$chart $version $commit"
|
||||
done
|
||||
|
||||
# if commit is HEAD, but version is not found in HEAD, check all tags
|
||||
found_tag=""
|
||||
for tag in $search_commits; do
|
||||
if [ "$(git show "${tag}:./${chart}/Chart.yaml" | awk '$1 == "version:" {print $2}')" = "${version}" ]; then
|
||||
found_tag=$(git rev-parse --short "${tag}")
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$found_tag" ]; then
|
||||
echo "Can't find $chart $version in any version tag, removing it" >&2
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "$chart $version $found_tag"
|
||||
done < $miss_map
|
||||
)
|
||||
|
||||
printf "%s\n" "$new_map" "$resolved_miss_map" | sort -k1,1 -k2,2 -V | awk '$1' > "$file"
|
||||
|
||||
65
hack/package_chart.sh
Executable file
65
hack/package_chart.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
printf "%s\n" "Usage:" >&2 ;
|
||||
printf -- "%s\n" '---' >&2 ;
|
||||
printf "%s %s\n" "$0" "INPUT_DIR OUTPUT_DIR TMP_DIR [DEPENDENCY_DIR]" >&2 ;
|
||||
printf -- "%s\n" '---' >&2 ;
|
||||
printf "%s\n" "Takes a helm repository from INPUT_DIR, with an optional library repository in" >&2 ;
|
||||
printf "%s\n" "DEPENDENCY_DIR, prepares a view of the git archive at select points in history" >&2 ;
|
||||
printf "%s\n" "in TMP_DIR and packages helm charts, outputting the tarballs to OUTPUT_DIR" >&2 ;
|
||||
}
|
||||
|
||||
if [ "x$(basename $PWD)" != "xpackages" ]
|
||||
then
|
||||
echo "Error: This script must run from the ./packages/ directory" >&2
|
||||
echo >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "x$#" != "x3" ] && [ "x$#" != "x4" ]
|
||||
then
|
||||
echo "Error: This script takes 3 or 4 arguments" >&2
|
||||
echo "Got $# arguments:" "$@" >&2
|
||||
echo >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
input_dir=$1
|
||||
output_dir=$2
|
||||
tmp_dir=$3
|
||||
|
||||
if [ "x$#" = "x4" ]
|
||||
then
|
||||
dependency_dir=$4
|
||||
fi
|
||||
|
||||
rm -rf "${output_dir:?}"
|
||||
mkdir -p "${output_dir}"
|
||||
while read package _ commit
|
||||
do
|
||||
# this lets devs build the packages from a dirty repo for quick local testing
|
||||
if [ "x$commit" = "xHEAD" ]
|
||||
then
|
||||
helm package "${input_dir}/${package}" -d "${output_dir}"
|
||||
continue
|
||||
fi
|
||||
git archive --format tar "${commit}" "${input_dir}/${package}" | tar -xf- -C "${tmp_dir}/"
|
||||
|
||||
# the library chart is not present in older commits and git archive doesn't fail gracefully if the path is not found
|
||||
if [ "x${dependency_dir}" != "x" ] && git ls-tree --name-only "${commit}" "${dependency_dir}" | grep -qx "${dependency_dir}"
|
||||
then
|
||||
git archive --format tar "${commit}" "${dependency_dir}" | tar -xf- -C "${tmp_dir}/"
|
||||
fi
|
||||
helm package "${tmp_dir}/${input_dir}/${package}" -d "${output_dir}"
|
||||
rm -rf "${tmp_dir:?}/${input_dir:?}/${package:?}"
|
||||
if [ "x${dependency_dir}" != "x" ]
|
||||
then
|
||||
rm -rf "${tmp_dir:?}/${dependency_dir:?}"
|
||||
fi
|
||||
done < "${input_dir}/versions_map"
|
||||
helm repo index "${output_dir}"
|
||||
1
hack/testdata/http-cache/check.sh
vendored
1
hack/testdata/http-cache/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
2
hack/testdata/http-cache/values.yaml
vendored
2
hack/testdata/http-cache/values.yaml
vendored
@@ -1,2 +0,0 @@
|
||||
endpoints:
|
||||
- 8.8.8.8:443
|
||||
1
hack/testdata/kubernetes/check.sh
vendored
1
hack/testdata/kubernetes/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
62
hack/testdata/kubernetes/values.yaml
vendored
62
hack/testdata/kubernetes/values.yaml
vendored
@@ -1,62 +0,0 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param host The hostname used to access the Kubernetes cluster externally (defaults to using the cluster name as a subdomain for the tenant host).
|
||||
## @param controlPlane.replicas Number of replicas for Kubernetes contorl-plane components
|
||||
## @param storageClass StorageClass used to store user data
|
||||
##
|
||||
host: ""
|
||||
controlPlane:
|
||||
replicas: 2
|
||||
storageClass: replicated
|
||||
|
||||
## @param nodeGroups [object] nodeGroups configuration
|
||||
##
|
||||
nodeGroups:
|
||||
md0:
|
||||
minReplicas: 0
|
||||
maxReplicas: 10
|
||||
instanceType: "u1.medium"
|
||||
ephemeralStorage: 20Gi
|
||||
roles:
|
||||
- ingress-nginx
|
||||
|
||||
resources:
|
||||
cpu: ""
|
||||
memory: ""
|
||||
|
||||
## @section Cluster Addons
|
||||
##
|
||||
addons:
|
||||
|
||||
## Cert-manager: automatically creates and manages SSL/TLS certificate
|
||||
##
|
||||
certManager:
|
||||
## @param addons.certManager.enabled Enables the cert-manager
|
||||
## @param addons.certManager.valuesOverride Custom values to override
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
|
||||
## Ingress-NGINX Controller
|
||||
##
|
||||
ingressNginx:
|
||||
## @param addons.ingressNginx.enabled Enable Ingress-NGINX controller (expect nodes with 'ingress-nginx' role)
|
||||
## @param addons.ingressNginx.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
## @param addons.ingressNginx.hosts List of domain names that should be passed through to the cluster by upper cluster
|
||||
## e.g:
|
||||
## hosts:
|
||||
## - example.org
|
||||
## - foo.example.net
|
||||
##
|
||||
hosts: []
|
||||
valuesOverride: {}
|
||||
|
||||
## Flux CD
|
||||
##
|
||||
fluxcd:
|
||||
## @param addons.fluxcd.enabled Enables Flux CD
|
||||
## @param addons.fluxcd.valuesOverride Custom values to override
|
||||
##
|
||||
enabled: true
|
||||
valuesOverride: {}
|
||||
1
hack/testdata/nats/check.sh
vendored
1
hack/testdata/nats/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
10
hack/testdata/nats/values.yaml
vendored
10
hack/testdata/nats/values.yaml
vendored
@@ -1,10 +0,0 @@
|
||||
|
||||
## @section Common parameters
|
||||
|
||||
## @param external Enable external access from outside the cluster
|
||||
## @param replicas Persistent Volume size for NATS
|
||||
## @param storageClass StorageClass used to store the data
|
||||
##
|
||||
external: false
|
||||
replicas: 2
|
||||
storageClass: ""
|
||||
1
hack/testdata/tenant/check.sh
vendored
1
hack/testdata/tenant/check.sh
vendored
@@ -1 +0,0 @@
|
||||
return 0
|
||||
6
hack/testdata/tenant/values.yaml
vendored
6
hack/testdata/tenant/values.yaml
vendored
@@ -1,6 +0,0 @@
|
||||
host: ""
|
||||
etcd: false
|
||||
monitoring: false
|
||||
ingress: false
|
||||
seaweedfs: false
|
||||
isolated: true
|
||||
11
hack/upload-assets.sh
Executable file
11
hack/upload-assets.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
version=${VERSION:-$(git describe --tags)}
|
||||
|
||||
gh release upload --clobber $version _out/assets/cozystack-installer.yaml
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.iso
|
||||
gh release upload --clobber $version _out/assets/metal-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/nocloud-amd64.raw.xz
|
||||
gh release upload --clobber $version _out/assets/kernel-amd64
|
||||
gh release upload --clobber $version _out/assets/initramfs-metal-amd64.xz
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
cozystackiov1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
cozystackiov1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
|
||||
139
internal/controller/system_helm_reconciler.go
Normal file
139
internal/controller/system_helm_reconciler.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
type CozystackConfigReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
var configMapNames = []string{"cozystack", "cozystack-branding", "cozystack-scheduling"}
|
||||
|
||||
const configMapNamespace = "cozy-system"
|
||||
const digestAnnotation = "cozystack.io/cozy-config-digest"
|
||||
const forceReconcileKey = "reconcile.fluxcd.io/forceAt"
|
||||
const requestedAt = "reconcile.fluxcd.io/requestedAt"
|
||||
|
||||
func (r *CozystackConfigReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
|
||||
log := log.FromContext(ctx)
|
||||
|
||||
digest, err := r.computeDigest(ctx)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to compute config digest")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
var helmList helmv2.HelmReleaseList
|
||||
if err := r.List(ctx, &helmList); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to list HelmReleases: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now().Format(time.RFC3339Nano)
|
||||
updated := 0
|
||||
|
||||
for _, hr := range helmList.Items {
|
||||
isSystemApp := hr.Labels["cozystack.io/system-app"] == "true"
|
||||
isTenantRoot := hr.Namespace == "tenant-root" && hr.Name == "tenant-root"
|
||||
if !isSystemApp && !isTenantRoot {
|
||||
continue
|
||||
}
|
||||
|
||||
if hr.Annotations == nil {
|
||||
hr.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
if hr.Annotations[digestAnnotation] == digest {
|
||||
continue
|
||||
}
|
||||
|
||||
patch := client.MergeFrom(hr.DeepCopy())
|
||||
hr.Annotations[digestAnnotation] = digest
|
||||
hr.Annotations[forceReconcileKey] = now
|
||||
hr.Annotations[requestedAt] = now
|
||||
|
||||
if err := r.Patch(ctx, &hr, patch); err != nil {
|
||||
log.Error(err, "failed to patch HelmRelease", "name", hr.Name, "namespace", hr.Namespace)
|
||||
continue
|
||||
}
|
||||
updated++
|
||||
log.Info("patched HelmRelease with new config digest", "name", hr.Name, "namespace", hr.Namespace)
|
||||
}
|
||||
|
||||
log.Info("finished reconciliation", "updatedHelmReleases", updated)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *CozystackConfigReconciler) computeDigest(ctx context.Context) (string, error) {
|
||||
hash := sha256.New()
|
||||
|
||||
for _, name := range configMapNames {
|
||||
var cm corev1.ConfigMap
|
||||
err := r.Get(ctx, client.ObjectKey{Namespace: configMapNamespace, Name: name}, &cm)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
continue // ignore missing
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Sort keys for consistent hashing
|
||||
var keys []string
|
||||
for k := range cm.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
v := cm.Data[k]
|
||||
fmt.Fprintf(hash, "%s:%s=%s\n", name, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (r *CozystackConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
WithEventFilter(predicate.Funcs{
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
cm, ok := e.ObjectNew.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
CreateFunc: func(e event.CreateEvent) bool {
|
||||
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
cm, ok := e.Object.(*corev1.ConfigMap)
|
||||
return ok && cm.Namespace == configMapNamespace && contains(configMapNames, cm.Name)
|
||||
},
|
||||
}).
|
||||
For(&corev1.ConfigMap{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func contains(slice []string, val string) bool {
|
||||
for _, s := range slice {
|
||||
if s == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
158
internal/controller/tenant_helm_reconciler.go
Normal file
158
internal/controller/tenant_helm_reconciler.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
e "errors"
|
||||
|
||||
helmv2 "github.com/fluxcd/helm-controller/api/v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
type TenantHelmReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
hr := &helmv2.HelmRelease{}
|
||||
if err := r.Get(ctx, req.NamespacedName, hr); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "unable to fetch HelmRelease")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(hr.Name, "tenant-") {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.Conditions) == 0 || hr.Status.Conditions[0].Type != "Ready" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if len(hr.Status.History) == 0 {
|
||||
logger.Info("no history in HelmRelease status", "name", hr.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if hr.Status.History[0].Status != "deployed" {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
newDigest := hr.Status.History[0].Digest
|
||||
var hrList helmv2.HelmReleaseList
|
||||
childNamespace := getChildNamespace(hr.Namespace, hr.Name)
|
||||
if childNamespace == "tenant-root" && hr.Name == "tenant-root" {
|
||||
if hr.Spec.Values == nil {
|
||||
logger.Error(e.New("hr.Spec.Values is nil"), "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
err := annotateTenantRootNs(*hr.Spec.Values, r.Client)
|
||||
if err != nil {
|
||||
logger.Error(err, "cant annotate tenant-root ns")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Info("namespace 'tenant-root' annotated")
|
||||
}
|
||||
|
||||
if err := r.List(ctx, &hrList, client.InNamespace(childNamespace)); err != nil {
|
||||
logger.Error(err, "unable to list HelmReleases in namespace", "namespace", hr.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, item := range hrList.Items {
|
||||
if item.Name == hr.Name {
|
||||
continue
|
||||
}
|
||||
oldDigest := item.GetAnnotations()["cozystack.io/tenant-config-digest"]
|
||||
if oldDigest == newDigest {
|
||||
continue
|
||||
}
|
||||
patchTarget := item.DeepCopy()
|
||||
|
||||
if patchTarget.Annotations == nil {
|
||||
patchTarget.Annotations = map[string]string{}
|
||||
}
|
||||
ts := time.Now().Format(time.RFC3339Nano)
|
||||
|
||||
patchTarget.Annotations["cozystack.io/tenant-config-digest"] = newDigest
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/forceAt"] = ts
|
||||
patchTarget.Annotations["reconcile.fluxcd.io/requestedAt"] = ts
|
||||
|
||||
patch := client.MergeFrom(item.DeepCopy())
|
||||
if err := r.Patch(ctx, patchTarget, patch); err != nil {
|
||||
logger.Error(err, "failed to patch HelmRelease", "name", patchTarget.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("patched HelmRelease with new digest", "name", patchTarget.Name, "digest", newDigest, "version", hr.Status.History[0].Version)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *TenantHelmReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&helmv2.HelmRelease{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getChildNamespace(currentNamespace, hrName string) string {
|
||||
tenantName := strings.TrimPrefix(hrName, "tenant-")
|
||||
|
||||
switch {
|
||||
case currentNamespace == "tenant-root" && hrName == "tenant-root":
|
||||
// 1) root tenant inside root namespace
|
||||
return "tenant-root"
|
||||
|
||||
case currentNamespace == "tenant-root":
|
||||
// 2) any other tenant in root namespace
|
||||
return fmt.Sprintf("tenant-%s", tenantName)
|
||||
|
||||
default:
|
||||
// 3) tenant in a dedicated namespace
|
||||
return fmt.Sprintf("%s-%s", currentNamespace, tenantName)
|
||||
}
|
||||
}
|
||||
|
||||
func annotateTenantRootNs(values apiextensionsv1.JSON, c client.Client) error {
|
||||
var data map[string]interface{}
|
||||
if err := yaml.Unmarshal(values.Raw, &data); err != nil {
|
||||
return fmt.Errorf("failed to parse HelmRelease values: %w", err)
|
||||
}
|
||||
|
||||
host, ok := data["host"].(string)
|
||||
if !ok || host == "" {
|
||||
return fmt.Errorf("host field not found or not a string")
|
||||
}
|
||||
|
||||
var ns corev1.Namespace
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: "tenant-root"}, &ns); err != nil {
|
||||
return fmt.Errorf("failed to get namespace tenant-root: %w", err)
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.Annotations = map[string]string{}
|
||||
}
|
||||
ns.Annotations["namespace.cozystack.io/host"] = host
|
||||
|
||||
if err := c.Update(context.TODO(), &ns); err != nil {
|
||||
return fmt.Errorf("failed to update namespace: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
99
internal/controller/workload_controller.go
Normal file
99
internal/controller/workload_controller.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
type WorkloadReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
func (r *WorkloadReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
err := r.Get(ctx, req.NamespacedName, w)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
logger.Error(err, "Unable to fetch Workload")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// it's being deleted, nothing to handle
|
||||
if w.DeletionTimestamp != nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
t := getMonitoredObject(w)
|
||||
|
||||
if t == nil {
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Get(ctx, types.NamespacedName{Name: t.GetName(), Namespace: t.GetNamespace()}, t)
|
||||
|
||||
// found object, nothing to do
|
||||
if err == nil {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// error getting object but not 404 -- requeue
|
||||
if !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to get dependent object", "kind", t.GetObjectKind(), "dependent-object-name", t.GetName())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
err = r.Delete(ctx, w)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to delete workload")
|
||||
}
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// SetupWithManager registers our controller with the Manager and sets up watches.
|
||||
func (r *WorkloadReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
// Watch WorkloadMonitor objects
|
||||
For(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func getMonitoredObject(w *cozyv1alpha1.Workload) client.Object {
|
||||
switch {
|
||||
case strings.HasPrefix(w.Name, "pvc-"):
|
||||
obj := &corev1.PersistentVolumeClaim{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pvc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "svc-"):
|
||||
obj := &corev1.Service{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "svc-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
case strings.HasPrefix(w.Name, "pod-"):
|
||||
obj := &corev1.Pod{}
|
||||
obj.Name = strings.TrimPrefix(w.Name, "pod-")
|
||||
obj.Namespace = w.Namespace
|
||||
return obj
|
||||
}
|
||||
var obj client.Object
|
||||
return obj
|
||||
}
|
||||
26
internal/controller/workload_controller_test.go
Normal file
26
internal/controller/workload_controller_test.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestUnprefixedMonitoredObjectReturnsNil(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "unprefixed-name"
|
||||
obj := getMonitoredObject(w)
|
||||
if obj != nil {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want nil`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodMonitoredObject(t *testing.T) {
|
||||
w := &cozyv1alpha1.Workload{}
|
||||
w.Name = "pod-mypod"
|
||||
obj := getMonitoredObject(w)
|
||||
if pod, ok := obj.(*corev1.Pod); !ok || pod.Name != "mypod" {
|
||||
t.Errorf(`getMonitoredObject(&Workload{Name: "%s"}) == %v, want &Pod{Name: "mypod"}`, w.Name, obj)
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -19,7 +20,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
cozyv1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// WorkloadMonitorReconciler reconciles a WorkloadMonitor object
|
||||
@@ -33,6 +34,17 @@ type WorkloadMonitorReconciler struct {
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=cozystack.io,resources=workloads/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch
|
||||
|
||||
// isServiceReady checks if the service has an external IP bound
|
||||
func (r *WorkloadMonitorReconciler) isServiceReady(svc *corev1.Service) bool {
|
||||
return len(svc.Status.LoadBalancer.Ingress) > 0
|
||||
}
|
||||
|
||||
// isPVCReady checks if the PVC is bound
|
||||
func (r *WorkloadMonitorReconciler) isPVCReady(pvc *corev1.PersistentVolumeClaim) bool {
|
||||
return pvc.Status.Phase == corev1.ClaimBound
|
||||
}
|
||||
|
||||
// isPodReady checks if the Pod is in the Ready condition.
|
||||
func (r *WorkloadMonitorReconciler) isPodReady(pod *corev1.Pod) bool {
|
||||
@@ -88,6 +100,110 @@ func updateOwnerReferences(obj metav1.Object, monitor client.Object) {
|
||||
obj.SetOwnerReferences(owners)
|
||||
}
|
||||
|
||||
// reconcileServiceForMonitor creates or updates a Workload object for the given Service and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcileServiceForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
svc corev1.Service,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%s", svc.Name),
|
||||
Namespace: svc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
quantity := resource.MustParse("0")
|
||||
|
||||
for _, ing := range svc.Status.LoadBalancer.Ingress {
|
||||
if ing.IP != "" {
|
||||
quantity.Add(resource.MustParse("1"))
|
||||
}
|
||||
}
|
||||
|
||||
var resourceLabel string
|
||||
if svc.Annotations != nil {
|
||||
var ok bool
|
||||
resourceLabel, ok = svc.Annotations["metallb.universe.tf/ip-allocated-from-pool"]
|
||||
if !ok {
|
||||
resourceLabel = "default"
|
||||
}
|
||||
}
|
||||
resourceLabel = fmt.Sprintf("%s.ipaddresspool.metallb.io/requests.ipaddresses", resourceLabel)
|
||||
resources[resourceLabel] = quantity
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = svc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isServiceReady(&svc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePVCForMonitor creates or updates a Workload object for the given PVC and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePVCForMonitor(
|
||||
ctx context.Context,
|
||||
monitor *cozyv1alpha1.WorkloadMonitor,
|
||||
pvc corev1.PersistentVolumeClaim,
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pvc-%s", pvc.Name),
|
||||
Namespace: pvc.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
resources := make(map[string]resource.Quantity)
|
||||
|
||||
for resourceName, resourceQuantity := range pvc.Status.Capacity {
|
||||
storageClass := "default"
|
||||
if pvc.Spec.StorageClassName != nil || *pvc.Spec.StorageClassName == "" {
|
||||
storageClass = *pvc.Spec.StorageClassName
|
||||
}
|
||||
resourceLabel := fmt.Sprintf("%s.storageclass.storage.k8s.io/requests.%s", storageClass, resourceName.String())
|
||||
resources[resourceLabel] = resourceQuantity
|
||||
}
|
||||
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
workload.Labels = pvc.Labels
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
workload.Status.Type = monitor.Spec.Type
|
||||
workload.Status.Resources = resources
|
||||
workload.Status.Operational = r.isPVCReady(&pvc)
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to CreateOrUpdate Workload", "workload", workload.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconcilePodForMonitor creates or updates a Workload object for the given Pod and WorkloadMonitor.
|
||||
func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
ctx context.Context,
|
||||
@@ -96,15 +212,12 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Combine both init containers and normal containers to sum resources properly
|
||||
combinedContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
|
||||
|
||||
// totalResources will store the sum of all container resource limits
|
||||
// totalResources will store the sum of all container resource requests
|
||||
totalResources := make(map[string]resource.Quantity)
|
||||
|
||||
// Iterate over all containers to aggregate their Limits
|
||||
for _, container := range combinedContainers {
|
||||
for name, qty := range container.Resources.Limits {
|
||||
// Iterate over all containers to aggregate their requests
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for name, qty := range container.Resources.Requests {
|
||||
if existing, exists := totalResources[name.String()]; exists {
|
||||
existing.Add(qty)
|
||||
totalResources[name.String()] = existing
|
||||
@@ -133,17 +246,26 @@ func (r *WorkloadMonitorReconciler) reconcilePodForMonitor(
|
||||
|
||||
workload := &cozyv1alpha1.Workload{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Name: fmt.Sprintf("pod-%s", pod.Name),
|
||||
Namespace: pod.Namespace,
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
metaLabels := r.getWorkloadMetadata(&pod)
|
||||
_, err := ctrl.CreateOrUpdate(ctx, r.Client, workload, func() error {
|
||||
// Update owner references with the new monitor
|
||||
updateOwnerReferences(workload.GetObjectMeta(), monitor)
|
||||
|
||||
// Copy labels from the Pod if needed
|
||||
workload.Labels = pod.Labels
|
||||
for k, v := range pod.Labels {
|
||||
workload.Labels[k] = v
|
||||
}
|
||||
|
||||
// Add workload meta to labels
|
||||
for k, v := range metaLabels {
|
||||
workload.Labels[k] = v
|
||||
}
|
||||
|
||||
// Fill Workload status fields:
|
||||
workload.Status.Kind = monitor.Spec.Kind
|
||||
@@ -205,6 +327,45 @@ func (r *WorkloadMonitorReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
}
|
||||
}
|
||||
|
||||
pvcList := &corev1.PersistentVolumeClaimList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
pvcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list PVCs for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, pvc := range pvcList.Items {
|
||||
if err := r.reconcilePVCForMonitor(ctx, monitor, pvc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for PVC", "PVC", pvc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
svcList := &corev1.ServiceList{}
|
||||
if err := r.List(
|
||||
ctx,
|
||||
svcList,
|
||||
client.InNamespace(monitor.Namespace),
|
||||
client.MatchingLabels(monitor.Spec.Selector),
|
||||
); err != nil {
|
||||
logger.Error(err, "Unable to list Services for WorkloadMonitor", "monitor", monitor.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
for _, svc := range svcList.Items {
|
||||
if svc.Spec.Type != corev1.ServiceTypeLoadBalancer {
|
||||
continue
|
||||
}
|
||||
if err := r.reconcileServiceForMonitor(ctx, monitor, svc); err != nil {
|
||||
logger.Error(err, "Failed to reconcile Workload for Service", "Service", svc.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Update WorkloadMonitor status based on observed pods
|
||||
monitor.Status.ObservedReplicas = observedReplicas
|
||||
monitor.Status.AvailableReplicas = availableReplicas
|
||||
@@ -233,41 +394,60 @@ func (r *WorkloadMonitorReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Also watch Pod objects and map them back to WorkloadMonitor if labels match
|
||||
Watches(
|
||||
&corev1.Pod{},
|
||||
handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
pod, ok := obj.(*corev1.Pod)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := r.List(ctx, &monitorList, client.InNamespace(pod.Namespace)); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if podVal, exists := pod.Labels[k]; !exists || podVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}),
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.Pod{}, r.Client)),
|
||||
).
|
||||
// Watch PVCs as well
|
||||
Watches(
|
||||
&corev1.PersistentVolumeClaim{},
|
||||
handler.EnqueueRequestsFromMapFunc(mapObjectToMonitor(&corev1.PersistentVolumeClaim{}, r.Client)),
|
||||
).
|
||||
// Watch for changes to Workload objects we create (owned by WorkloadMonitor)
|
||||
Owns(&cozyv1alpha1.Workload{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func mapObjectToMonitor[T client.Object](_ T, c client.Client) func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
return func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
concrete, ok := obj.(T)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var monitorList cozyv1alpha1.WorkloadMonitorList
|
||||
// List all WorkloadMonitors in the same namespace
|
||||
if err := c.List(ctx, &monitorList, client.InNamespace(concrete.GetNamespace())); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
labels := concrete.GetLabels()
|
||||
// Match each monitor's selector with the Pod's labels
|
||||
var requests []reconcile.Request
|
||||
for _, m := range monitorList.Items {
|
||||
matches := true
|
||||
for k, v := range m.Spec.Selector {
|
||||
if labelVal, exists := labels[k]; !exists || labelVal != v {
|
||||
matches = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
requests = append(requests, reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: m.Namespace,
|
||||
Name: m.Name,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return requests
|
||||
}
|
||||
}
|
||||
|
||||
func (r *WorkloadMonitorReconciler) getWorkloadMetadata(obj client.Object) map[string]string {
|
||||
labels := make(map[string]string)
|
||||
annotations := obj.GetAnnotations()
|
||||
if instanceType, ok := annotations["kubevirt.io/cluster-instancetype-name"]; ok {
|
||||
labels["workloads.cozystack.io/kubevirt-vmi-instance-type"] = instanceType
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
cozyv1alpha1 "github.com/aenix-io/cozystack/api/v1alpha1"
|
||||
cozyv1alpha1 "github.com/cozystack/cozystack/api/v1alpha1"
|
||||
)
|
||||
|
||||
// Collector handles telemetry data collection and sending
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: cozy-system
|
||||
labels:
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cozystack
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8123
|
||||
selector:
|
||||
app: cozystack
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: cozy-installer/templates/cozystack.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cozystack
|
||||
namespace: cozy-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cozystack
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cozystack
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: cozystack
|
||||
containers:
|
||||
- name: cozystack
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.26.1"
|
||||
env:
|
||||
- name: KUBERNETES_SERVICE_HOST
|
||||
value: localhost
|
||||
- name: KUBERNETES_SERVICE_PORT
|
||||
value: "7445"
|
||||
- name: K8S_AWAIT_ELECTION_ENABLED
|
||||
value: "1"
|
||||
- name: K8S_AWAIT_ELECTION_NAME
|
||||
value: cozystack
|
||||
- name: K8S_AWAIT_ELECTION_LOCK_NAME
|
||||
value: cozystack
|
||||
- name: K8S_AWAIT_ELECTION_LOCK_NAMESPACE
|
||||
value: cozy-system
|
||||
- name: K8S_AWAIT_ELECTION_IDENTITY
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: assets
|
||||
image: "ghcr.io/aenix-io/cozystack/cozystack:v0.26.1"
|
||||
command:
|
||||
- /usr/bin/cozystack-assets-server
|
||||
- "-dir=/cozystack/assets"
|
||||
- "-address=:8123"
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8123
|
||||
tolerations:
|
||||
- key: "node.kubernetes.io/not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
- key: "node.cilium.io/agent-not-ready"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
@@ -1,14 +1,8 @@
|
||||
OUT=../../_out/repos/apps
|
||||
TMP=../../_out/repos/apps/historical
|
||||
OUT=../_out/repos/apps
|
||||
TMP := $(shell mktemp -d)
|
||||
|
||||
repo:
|
||||
rm -rf "$(OUT)"
|
||||
mkdir -p "$(OUT)"
|
||||
awk '$$3 != "HEAD" {print "mkdir -p $(TMP)/" $$1 "-" $$2}' versions_map | sh -ex
|
||||
awk '$$3 != "HEAD" {print "git archive " $$3 " " $$1 " | tar -xf- --strip-components=1 -C $(TMP)/" $$1 "-" $$2 }' versions_map | sh -ex
|
||||
helm package -d "$(OUT)" $$(find . $(TMP) -mindepth 2 -maxdepth 2 -name Chart.yaml | awk 'sub("/Chart.yaml", "")' | sort -V)
|
||||
cd "$(OUT)" && helm repo index . --url http://cozystack.cozy-system.svc/repos/apps
|
||||
rm -rf "$(TMP)"
|
||||
cd .. && ../hack/package_chart.sh apps $(OUT) $(TMP) library
|
||||
|
||||
fix-chartnames:
|
||||
find . -maxdepth 2 -name Chart.yaml | awk -F/ '{print $$2}' | while read i; do sed -i "s/^name: .*/name: $$i/" "$$i/Chart.yaml"; done
|
||||
|
||||
@@ -16,10 +16,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
version: 0.2.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.1.0"
|
||||
appVersion: "0.2.0"
|
||||
|
||||
3
packages/apps/bucket/README.md
Normal file
3
packages/apps/bucket/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# S3 bucket
|
||||
|
||||
## Parameters
|
||||
1
packages/apps/bucket/charts/cozy-lib
Symbolic link
1
packages/apps/bucket/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -18,3 +18,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}-ui
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
kind: HelmRepository
|
||||
name: cozystack-system
|
||||
namespace: cozy-system
|
||||
version: '*'
|
||||
version: '>= 0.0.0-0'
|
||||
interval: 1m0s
|
||||
timeout: 5m0s
|
||||
values:
|
||||
|
||||
5
packages/apps/bucket/values.schema.json
Normal file
5
packages/apps/bucket/values.schema.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"title": "Chart Values",
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
1
packages/apps/bucket/values.yaml
Normal file
1
packages/apps/bucket/values.yaml
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.6.2
|
||||
version: 0.10.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$1 == "version:" {print $$2}' Chart.yaml)
|
||||
CLICKHOUSE_BACKUP_TAG = $(shell awk '$$0 ~ /^version:/ {print $$2}' Chart.yaml)
|
||||
|
||||
include ../../../scripts/common-envs.mk
|
||||
include ../../../scripts/package.mk
|
||||
@@ -7,13 +7,16 @@ generate:
|
||||
readme-generator -v values.yaml -s values.schema.json -r README.md
|
||||
|
||||
image:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/clickhouse-backup \
|
||||
docker buildx build images/clickhouse-backup \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/clickhouse-backup:latest \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/clickhouse-backup.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/clickhouse-backup:$(call settag,$(CLICKHOUSE_BACKUP_TAG))@$$(yq e '."containerimage.digest"' images/clickhouse-backup.json -o json -r)" \
|
||||
> images/clickhouse-backup.tag
|
||||
|
||||
@@ -1,32 +1,35 @@
|
||||
# Managed Clickhouse Service
|
||||
|
||||
ClickHouse is an open source high-performance and column-oriented SQL database management system (DBMS).
|
||||
It is used for online analytical processing (OLAP).
|
||||
Cozystack platform uses Altinity operator to provide ClickHouse.
|
||||
|
||||
### How to restore backup:
|
||||
|
||||
find snapshot:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
||||
```
|
||||
1. Find a snapshot:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name snapshots
|
||||
```
|
||||
|
||||
restore:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
||||
```
|
||||
2. Restore it:
|
||||
```
|
||||
restic -r s3:s3.example.org/clickhouse-backups/table_name restore latest --target /tmp/
|
||||
```
|
||||
|
||||
more details:
|
||||
- https://itnext.io/restic-effective-backup-from-stdin-4bc1e8f083c1
|
||||
For more details, read [Restic: Effective Backup from Stdin](https://blog.aenix.io/restic-effective-backup-from-stdin-4bc1e8f083c1).
|
||||
|
||||
## Parameters
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ---------------- | ----------------------------------- | ------ |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `logStorageSize` | Persistent Volume for logs size | `2Gi` |
|
||||
| `shards` | Number of Clickhouse replicas | `1` |
|
||||
| `replicas` | Number of Clickhouse shards | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `logTTL` | for query_log and query_thread_log | `15` |
|
||||
| Name | Description | Value |
|
||||
| ---------------- | -------------------------------------------------------- | ------ |
|
||||
| `size` | Size of Persistent Volume for data | `10Gi` |
|
||||
| `logStorageSize` | Size of Persistent Volume for logs | `2Gi` |
|
||||
| `shards` | Number of Clickhouse shards | `1` |
|
||||
| `replicas` | Number of Clickhouse replicas | `2` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `logTTL` | TTL (expiration time) for query_log and query_thread_log | `15` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
@@ -36,13 +39,32 @@ more details:
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | --------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable periodic backups | `false` |
|
||||
| `backup.s3Region` | AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | S3 bucket used for storing backups | `s3.example.org/clickhouse-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | Retention strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | Access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | Secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | Password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Explicit CPU/memory resource requests and limits for the Clickhouse service | `{}` |
|
||||
| `resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. | `small` |
|
||||
|
||||
|
||||
In production environments, it's recommended to set `resources` explicitly.
|
||||
Example of `resources`:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4000m
|
||||
memory: 4Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 512Mi
|
||||
```
|
||||
|
||||
Allowed values for `resourcesPreset` are `none`, `nano`, `micro`, `small`, `medium`, `large`, `xlarge`, `2xlarge`.
|
||||
This value is ignored if `resources` value is set.
|
||||
|
||||
1
packages/apps/clickhouse/charts/cozy-lib
Symbolic link
1
packages/apps/clickhouse/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/clickhouse-backup:0.6.1@sha256:7a99cabdfd541f863aa5d1b2f7b49afd39838fb94c8448986634a1dc9050751c
|
||||
ghcr.io/cozystack/cozystack/clickhouse-backup:0.10.1@sha256:3faf7a4cebf390b9053763107482de175aa0fdb88c1e77424fd81100b1c3a205
|
||||
|
||||
49
packages/apps/clickhouse/templates/_resources.tpl
Normal file
49
packages/apps/clickhouse/templates/_resources.tpl
Normal file
@@ -0,0 +1,49 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -1,3 +1,5 @@
|
||||
{{- $cozyConfig := lookup "v1" "ConfigMap" "cozy-system" "cozystack" }}
|
||||
{{- $clusterDomain := (index $cozyConfig.data "cluster-domain") | default "cozy.local" }}
|
||||
{{- $existingSecret := lookup "v1" "Secret" .Release.Namespace (printf "%s-credentials" .Release.Name) }}
|
||||
{{- $passwords := dict }}
|
||||
{{- $users := .Values.users }}
|
||||
@@ -32,7 +34,7 @@ kind: "ClickHouseInstallation"
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
namespaceDomainPattern: "%s.svc.cozy.local"
|
||||
namespaceDomainPattern: "%s.svc.{{ $clusterDomain }}"
|
||||
defaults:
|
||||
templates:
|
||||
dataVolumeClaimTemplate: data-volume-template
|
||||
@@ -92,6 +94,9 @@ spec:
|
||||
templates:
|
||||
volumeClaimTemplates:
|
||||
- name: data-volume-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -99,6 +104,9 @@ spec:
|
||||
requests:
|
||||
storage: {{ .Values.size }}
|
||||
- name: log-volume-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
@@ -107,6 +115,9 @@ spec:
|
||||
storage: {{ .Values.logStorageSize }}
|
||||
podTemplates:
|
||||
- name: clickhouse-per-host
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
@@ -121,6 +132,11 @@ spec:
|
||||
containers:
|
||||
- name: clickhouse
|
||||
image: clickhouse/clickhouse-server:24.9.2.42
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 16 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 16 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: data-volume-template
|
||||
mountPath: /var/lib/clickhouse
|
||||
@@ -128,6 +144,9 @@ spec:
|
||||
mountPath: /var/log/clickhouse-server
|
||||
serviceTemplates:
|
||||
- name: svc-template
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
generateName: chendpoint-{chi}
|
||||
spec:
|
||||
ports:
|
||||
|
||||
@@ -24,3 +24,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -9,5 +9,5 @@ spec:
|
||||
kind: clickhouse
|
||||
type: clickhouse
|
||||
selector:
|
||||
clickhouse.altinity.com/chi: {{ $.Release.Name }}
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -4,22 +4,22 @@
|
||||
"properties": {
|
||||
"size": {
|
||||
"type": "string",
|
||||
"description": "Persistent Volume size",
|
||||
"description": "Size of Persistent Volume for data",
|
||||
"default": "10Gi"
|
||||
},
|
||||
"logStorageSize": {
|
||||
"type": "string",
|
||||
"description": "Persistent Volume for logs size",
|
||||
"description": "Size of Persistent Volume for logs",
|
||||
"default": "2Gi"
|
||||
},
|
||||
"shards": {
|
||||
"type": "number",
|
||||
"description": "Number of Clickhouse replicas",
|
||||
"description": "Number of Clickhouse shards",
|
||||
"default": 1
|
||||
},
|
||||
"replicas": {
|
||||
"type": "number",
|
||||
"description": "Number of Clickhouse shards",
|
||||
"description": "Number of Clickhouse replicas",
|
||||
"default": 2
|
||||
},
|
||||
"storageClass": {
|
||||
@@ -29,7 +29,7 @@
|
||||
},
|
||||
"logTTL": {
|
||||
"type": "number",
|
||||
"description": "for query_log and query_thread_log",
|
||||
"description": "TTL (expiration time) for query_log and query_thread_log",
|
||||
"default": 15
|
||||
},
|
||||
"backup": {
|
||||
@@ -37,17 +37,17 @@
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean",
|
||||
"description": "Enable pereiodic backups",
|
||||
"description": "Enable periodic backups",
|
||||
"default": false
|
||||
},
|
||||
"s3Region": {
|
||||
"type": "string",
|
||||
"description": "The AWS S3 region where backups are stored",
|
||||
"description": "AWS S3 region where backups are stored",
|
||||
"default": "us-east-1"
|
||||
},
|
||||
"s3Bucket": {
|
||||
"type": "string",
|
||||
"description": "The S3 bucket used for storing backups",
|
||||
"description": "S3 bucket used for storing backups",
|
||||
"default": "s3.example.org/clickhouse-backups"
|
||||
},
|
||||
"schedule": {
|
||||
@@ -57,25 +57,35 @@
|
||||
},
|
||||
"cleanupStrategy": {
|
||||
"type": "string",
|
||||
"description": "The strategy for cleaning up old backups",
|
||||
"description": "Retention strategy for cleaning up old backups",
|
||||
"default": "--keep-last=3 --keep-daily=3 --keep-within-weekly=1m"
|
||||
},
|
||||
"s3AccessKey": {
|
||||
"type": "string",
|
||||
"description": "The access key for S3, used for authentication",
|
||||
"description": "Access key for S3, used for authentication",
|
||||
"default": "oobaiRus9pah8PhohL1ThaeTa4UVa7gu"
|
||||
},
|
||||
"s3SecretKey": {
|
||||
"type": "string",
|
||||
"description": "The secret key for S3, used for authentication",
|
||||
"description": "Secret key for S3, used for authentication",
|
||||
"default": "ju3eum4dekeich9ahM1te8waeGai0oog"
|
||||
},
|
||||
"resticPassword": {
|
||||
"type": "string",
|
||||
"description": "The password for Restic backup encryption",
|
||||
"description": "Password for Restic backup encryption",
|
||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Explicit CPU/memory resource requests and limits for the Clickhouse service",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly.",
|
||||
"default": "small"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
## @section Common parameters
|
||||
|
||||
## @param size Persistent Volume size
|
||||
## @param logStorageSize Persistent Volume for logs size
|
||||
## @param shards Number of Clickhouse replicas
|
||||
## @param replicas Number of Clickhouse shards
|
||||
## @param size Size of Persistent Volume for data
|
||||
## @param logStorageSize Size of Persistent Volume for logs
|
||||
## @param shards Number of Clickhouse shards
|
||||
## @param replicas Number of Clickhouse replicas
|
||||
## @param storageClass StorageClass used to store the data
|
||||
## @param logTTL for query_log and query_thread_log
|
||||
## @param logTTL TTL (expiration time) for query_log and query_thread_log
|
||||
##
|
||||
size: 10Gi
|
||||
logStorageSize: 2Gi
|
||||
@@ -29,14 +29,14 @@ users: {}
|
||||
|
||||
## @section Backup parameters
|
||||
|
||||
## @param backup.enabled Enable pereiodic backups
|
||||
## @param backup.s3Region The AWS S3 region where backups are stored
|
||||
## @param backup.s3Bucket The S3 bucket used for storing backups
|
||||
## @param backup.enabled Enable periodic backups
|
||||
## @param backup.s3Region AWS S3 region where backups are stored
|
||||
## @param backup.s3Bucket S3 bucket used for storing backups
|
||||
## @param backup.schedule Cron schedule for automated backups
|
||||
## @param backup.cleanupStrategy The strategy for cleaning up old backups
|
||||
## @param backup.s3AccessKey The access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey The secret key for S3, used for authentication
|
||||
## @param backup.resticPassword The password for Restic backup encryption
|
||||
## @param backup.cleanupStrategy Retention strategy for cleaning up old backups
|
||||
## @param backup.s3AccessKey Access key for S3, used for authentication
|
||||
## @param backup.s3SecretKey Secret key for S3, used for authentication
|
||||
## @param backup.resticPassword Password for Restic backup encryption
|
||||
backup:
|
||||
enabled: false
|
||||
s3Region: us-east-1
|
||||
@@ -46,3 +46,12 @@ backup:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Explicit CPU/memory resource requests and limits for the Clickhouse service
|
||||
resources: {}
|
||||
# resources:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
|
||||
## @param resourcesPreset Use a common resources preset when `resources` is not set explicitly.
|
||||
resourcesPreset: "small"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.4.2
|
||||
version: 0.7.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -21,15 +21,17 @@
|
||||
|
||||
### Backup parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ---------------------------------------------- | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------ |
|
||||
| `backup.enabled` | Enable pereiodic backups | `false` |
|
||||
| `backup.s3Region` | The AWS S3 region where backups are stored | `us-east-1` |
|
||||
| `backup.s3Bucket` | The S3 bucket used for storing backups | `s3.example.org/postgres-backups` |
|
||||
| `backup.schedule` | Cron schedule for automated backups | `0 2 * * *` |
|
||||
| `backup.cleanupStrategy` | The strategy for cleaning up old backups | `--keep-last=3 --keep-daily=3 --keep-within-weekly=1m` |
|
||||
| `backup.s3AccessKey` | The access key for S3, used for authentication | `oobaiRus9pah8PhohL1ThaeTa4UVa7gu` |
|
||||
| `backup.s3SecretKey` | The secret key for S3, used for authentication | `ju3eum4dekeich9ahM1te8waeGai0oog` |
|
||||
| `backup.resticPassword` | The password for Restic backup encryption | `ChaXoveekoh6eigh4siesheeda2quai0` |
|
||||
| `resources` | Resources | `{}` |
|
||||
| `resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge) | `nano` |
|
||||
|
||||
|
||||
|
||||
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
1
packages/apps/ferretdb/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/postgres-backup:0.8.0@sha256:d1f7692b6761f46f24687d885ec335330280346ae4a9ff28b3179681b36106b7
|
||||
ghcr.io/cozystack/cozystack/postgres-backup:0.14.0@sha256:10179ed56457460d95cd5708db2a00130901255fa30c4dd76c65d2ef5622b61f
|
||||
|
||||
49
packages/apps/ferretdb/templates/_resources.tpl
Normal file
49
packages/apps/ferretdb/templates/_resources.tpl
Normal file
@@ -0,0 +1,49 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -24,3 +24,14 @@ rules:
|
||||
resourceNames:
|
||||
- {{ .Release.Name }}
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
subjects:
|
||||
{{ include "cozy-lib.rbac.subjectsForTenantAndAccessLevel" (list "use" .Release.Namespace) }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ .Release.Name }}-dashboard-resources
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -2,6 +2,8 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Release.Name }}
|
||||
labels:
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
type: {{ ternary "LoadBalancer" "ClusterIP" .Values.external }}
|
||||
{{- if .Values.external }}
|
||||
|
||||
@@ -12,6 +12,7 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ .Release.Name }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
containers:
|
||||
- name: ferretdb
|
||||
|
||||
@@ -11,11 +11,18 @@ spec:
|
||||
{{- $rawConstraints := get $configMap.data "globalAppTopologySpreadConstraints" }}
|
||||
{{- if $rawConstraints }}
|
||||
{{- $rawConstraints | fromYaml | toYaml | nindent 2 }}
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
cnpg.io/cluster: {{ .Release.Name }}-postgres
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
minSyncReplicas: {{ .Values.quorum.minSyncReplicas }}
|
||||
maxSyncReplicas: {{ .Values.quorum.maxSyncReplicas }}
|
||||
|
||||
{{- if .Values.resources }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.resources $) | nindent 4 }}
|
||||
{{- else if ne .Values.resourcesPreset "none" }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.resourcesPreset $) | nindent 4 }}
|
||||
{{- end }}
|
||||
monitoring:
|
||||
enablePodMonitor: true
|
||||
|
||||
@@ -28,6 +35,7 @@ spec:
|
||||
inheritedMetadata:
|
||||
labels:
|
||||
policy.cozystack.io/allow-to-apiserver: "true"
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
{{- if .Values.users }}
|
||||
managed:
|
||||
|
||||
@@ -9,5 +9,5 @@ spec:
|
||||
kind: ferretdb
|
||||
type: ferretdb
|
||||
selector:
|
||||
app: {{ $.Release.Name }}
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
|
||||
@@ -81,6 +81,16 @@
|
||||
"default": "ChaXoveekoh6eigh4siesheeda2quai0"
|
||||
}
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"type": "object",
|
||||
"description": "Resources",
|
||||
"default": {}
|
||||
},
|
||||
"resourcesPreset": {
|
||||
"type": "string",
|
||||
"description": "Use a common resources preset when `resources` is not set explicitly. (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge)",
|
||||
"default": "nano"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -48,3 +48,12 @@ backup:
|
||||
s3AccessKey: oobaiRus9pah8PhohL1ThaeTa4UVa7gu
|
||||
s3SecretKey: ju3eum4dekeich9ahM1te8waeGai0oog
|
||||
resticPassword: ChaXoveekoh6eigh4siesheeda2quai0
|
||||
|
||||
## @param resources Resources
|
||||
resources: {}
|
||||
# resources:
|
||||
# cpu: 4000m
|
||||
# memory: 4Gi
|
||||
|
||||
## @param resourcesPreset Use a common resources preset when `resources` is not set explicitly. (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge)
|
||||
resourcesPreset: "nano"
|
||||
|
||||
@@ -16,7 +16,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.1
|
||||
version: 0.5.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
||||
@@ -6,13 +6,16 @@ include ../../../scripts/package.mk
|
||||
image: image-nginx
|
||||
|
||||
image-nginx:
|
||||
docker buildx build --platform linux/amd64 --build-arg ARCH=amd64 images/nginx-cache \
|
||||
docker buildx build images/nginx-cache \
|
||||
--provenance false \
|
||||
--builder=$(BUILDER) \
|
||||
--platform=$(PLATFORM) \
|
||||
--tag $(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG)) \
|
||||
--cache-from type=registry,ref=$(REGISTRY)/nginx-cache:latest \
|
||||
--cache-to type=inline \
|
||||
--metadata-file images/nginx-cache.json \
|
||||
--push=$(PUSH) \
|
||||
--label "org.opencontainers.image.source=https://github.com/cozystack/cozystack" \
|
||||
--load=$(LOAD)
|
||||
echo "$(REGISTRY)/nginx-cache:$(call settag,$(NGINX_CACHE_TAG))@$$(yq e '."containerimage.digest"' images/nginx-cache.json -o json -r)" \
|
||||
> images/nginx-cache.tag
|
||||
|
||||
@@ -60,13 +60,17 @@ VTS module shows wrong upstream resonse time
|
||||
|
||||
### Common parameters
|
||||
|
||||
| Name | Description | Value |
|
||||
| ------------------ | ----------------------------------------------- | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||
| Name | Description | Value |
|
||||
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | ------- |
|
||||
| `external` | Enable external access from outside the cluster | `false` |
|
||||
| `size` | Persistent Volume size | `10Gi` |
|
||||
| `storageClass` | StorageClass used to store the data | `""` |
|
||||
| `haproxy.replicas` | Number of HAProxy replicas | `2` |
|
||||
| `nginx.replicas` | Number of Nginx replicas | `2` |
|
||||
| `haproxy.resources` | | `{}` |
|
||||
| `haproxy.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge) | `nano` |
|
||||
| `nginx.resources` | Resources | `{}` |
|
||||
| `nginx.resourcesPreset` | Use a common resources preset when `resources` is not set explicitly. (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge) | `nano` |
|
||||
|
||||
### Configuration parameters
|
||||
|
||||
|
||||
1
packages/apps/http-cache/charts/cozy-lib
Symbolic link
1
packages/apps/http-cache/charts/cozy-lib
Symbolic link
@@ -0,0 +1 @@
|
||||
../../../library/cozy-lib
|
||||
@@ -1 +1 @@
|
||||
ghcr.io/aenix-io/cozystack/nginx-cache:0.3.1@sha256:854b3908114de1876038eb9902577595cce93553ce89bf75ac956d22f1e8b8cc
|
||||
ghcr.io/cozystack/cozystack/nginx-cache:0.5.2@sha256:e0a07082bb6fc6aeaae2315f335386f1705a646c72f9e0af512aebbca5cb2b15
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM ubuntu:22.04 as stage
|
||||
FROM ubuntu:22.04 AS stage
|
||||
|
||||
ARG NGINX_VERSION=1.25.3
|
||||
ARG IP2LOCATION_C_VERSION=8.6.1
|
||||
@@ -9,11 +9,15 @@ ARG FIFTYONEDEGREES_NGINX_VERSION=3.2.21.1
|
||||
ARG NGINX_CACHE_PURGE_VERSION=2.5.3
|
||||
ARG NGINX_VTS_VERSION=0.2.2
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install required packages for development
|
||||
RUN apt-get update -q \
|
||||
&& apt-get install -yq \
|
||||
RUN apt update -q \
|
||||
&& apt install -yq --no-install-recommends \
|
||||
ca-certificates \
|
||||
unzip \
|
||||
autoconf \
|
||||
automake \
|
||||
build-essential \
|
||||
libtool \
|
||||
libpcre3 \
|
||||
@@ -68,7 +72,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=ip2location-c \
|
||||
--pkgversion=${IP2LOCATION_C_VERSION} \
|
||||
--pkgarch=amd64 \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkggroup=lib \
|
||||
--pkgsource="https://github.com/chrislim2888/IP2Location-C-Library" \
|
||||
--maintainer="Eduard Generalov <eduard@generalov.net>" \
|
||||
@@ -97,7 +101,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=ip2proxy-c \
|
||||
--pkgversion=${IP2PROXY_C_VERSION} \
|
||||
--pkgarch=amd64 \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkggroup=lib \
|
||||
--pkgsource="https://github.com/ip2location/ip2proxy-c" \
|
||||
--maintainer="Eduard Generalov <eduard@generalov.net>" \
|
||||
@@ -144,7 +148,7 @@ RUN checkinstall \
|
||||
--default \
|
||||
--pkgname=nginx \
|
||||
--pkgversion=$VERS \
|
||||
--pkgarch=amd64 \
|
||||
--pkgarch=${TARGETARCH} \
|
||||
--pkggroup=web \
|
||||
--provides=nginx \
|
||||
--requires=ip2location-c,ip2proxy-c,libssl3,libc-bin,libc6,libzstd1,libpcre++0v5,libpcre16-3,libpcre2-8-0,libpcre3,libpcre32-3,libpcrecpp0v5,libmaxminddb0 \
|
||||
@@ -165,10 +169,9 @@ COPY nginx-reloader.sh /usr/bin/nginx-reloader.sh
|
||||
RUN set -x \
|
||||
&& groupadd --system --gid 101 nginx \
|
||||
&& useradd --system --gid nginx --no-create-home --home /nonexistent --comment "nginx user" --shell /bin/false --uid 101 nginx \
|
||||
&& apt update \
|
||||
&& apt-get install --no-install-recommends --no-install-suggests -y gnupg1 ca-certificates inotify-tools \
|
||||
&& apt -y install /packages/*.deb \
|
||||
&& apt-get clean \
|
||||
&& apt update -q \
|
||||
&& apt install -yq --no-install-recommends --no-install-suggests gnupg1 ca-certificates inotify-tools \
|
||||
&& apt install -y /packages/*.deb \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& mkdir -p /var/lib/nginx /var/log/nginx \
|
||||
&& ln -sf /dev/stdout /var/log/nginx/access.log \
|
||||
|
||||
49
packages/apps/http-cache/templates/_resources.tpl
Normal file
49
packages/apps/http-cache/templates/_resources.tpl
Normal file
@@ -0,0 +1,49 @@
|
||||
{{/*
|
||||
Copyright Broadcom, Inc. All Rights Reserved.
|
||||
SPDX-License-Identifier: APACHE-2.0
|
||||
*/}}
|
||||
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
|
||||
{{/*
|
||||
Return a resource request/limit object based on a given preset.
|
||||
These presets are for basic testing and not meant to be used in production
|
||||
{{ include "resources.preset" (dict "type" "nano") -}}
|
||||
*/}}
|
||||
{{- define "resources.preset" -}}
|
||||
{{- $presets := dict
|
||||
"nano" (dict
|
||||
"requests" (dict "cpu" "100m" "memory" "128Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "128Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"micro" (dict
|
||||
"requests" (dict "cpu" "250m" "memory" "256Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "256Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"small" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "512Mi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "512Mi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"medium" (dict
|
||||
"requests" (dict "cpu" "500m" "memory" "1Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "1Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"large" (dict
|
||||
"requests" (dict "cpu" "1" "memory" "2Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "2Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"xlarge" (dict
|
||||
"requests" (dict "cpu" "2" "memory" "4Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "4Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
"2xlarge" (dict
|
||||
"requests" (dict "cpu" "4" "memory" "8Gi" "ephemeral-storage" "50Mi")
|
||||
"limits" (dict "memory" "8Gi" "ephemeral-storage" "2Gi")
|
||||
)
|
||||
}}
|
||||
{{- if hasKey $presets .type -}}
|
||||
{{- index $presets .type | toYaml -}}
|
||||
{{- else -}}
|
||||
{{- printf "ERROR: Preset key '%s' invalid. Allowed values are %s" .type (join "," (keys $presets)) | fail -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
@@ -33,6 +33,11 @@ spec:
|
||||
containers:
|
||||
- image: haproxy:latest
|
||||
name: haproxy
|
||||
{{- if .Values.haproxy.resources }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list .Values.haproxy.resources $) | nindent 10 }}
|
||||
{{- else if ne .Values.haproxy.resourcesPreset "none" }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list .Values.haproxy.resourcesPreset $) | nindent 10 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
|
||||
@@ -52,6 +52,11 @@ spec:
|
||||
shareProcessNamespace: true
|
||||
containers:
|
||||
- name: nginx
|
||||
{{- if $.Values.nginx.resources }}
|
||||
resources: {{- include "cozy-lib.resources.sanitize" (list $.Values.nginx.resources $) | nindent 10 }}
|
||||
{{- else if ne $.Values.nginx.resourcesPreset "none" }}
|
||||
resources: {{- include "cozy-lib.resources.preset" (list $.Values.nginx.resourcesPreset $) | nindent 10 }}
|
||||
{{- end }}
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
@@ -83,6 +88,13 @@ spec:
|
||||
- name: reloader
|
||||
image: "{{ $.Files.Get "images/nginx-cache.tag" | trim }}"
|
||||
command: ["/usr/bin/nginx-reloader.sh"]
|
||||
resources:
|
||||
limits:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
#command: ["sleep", "infinity"]
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx/nginx.conf
|
||||
|
||||
39
packages/apps/http-cache/templates/workloadmonitor.yaml
Normal file
39
packages/apps/http-cache/templates/workloadmonitor.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-haproxy
|
||||
spec:
|
||||
replicas: {{ .Values.haproxy.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app: {{ $.Release.Name }}-haproxy
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}-nginx
|
||||
spec:
|
||||
replicas: {{ .Values.nginx.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app: {{ $.Release.Name }}-nginx-cache
|
||||
version: {{ $.Chart.Version }}
|
||||
---
|
||||
apiVersion: cozystack.io/v1alpha1
|
||||
kind: WorkloadMonitor
|
||||
metadata:
|
||||
name: {{ $.Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicas }}
|
||||
minReplicas: 1
|
||||
kind: http-cache
|
||||
type: http-cache
|
||||
selector:
|
||||
app.kubernetes.io/instance: {{ $.Release.Name }}
|
||||
version: {{ $.Chart.Version }}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user