Compare commits

..

293 Commits
develop ... php

Author SHA1 Message Date
renovate[bot]
6d36bf01a9 Update zoeyvid/nginx-quic Docker tag to v656 2025-12-05 13:53:49 +01:00
renovate[bot]
b75ee29178 Update zoeyvid/nginx-quic Docker tag to v655 2025-12-04 22:00:30 +01:00
renovate[bot]
d308ee709c Update zoeyvid/valkey-static Docker tag to v72 2025-12-04 15:49:21 +01:00
renovate[bot]
b19558b12b Update alpine Docker tag to v3.23.0 2025-12-04 15:48:49 +01:00
renovate[bot]
fee0b5bce5 Update zoeyvid/nginx-quic Docker tag to v643 2025-12-04 15:48:21 +01:00
renovate[bot]
39349e4ae0 Update zoeyvid/nginx-quic Docker tag to v638 2025-12-01 17:08:00 +01:00
renovate[bot]
88ff769818 Update zoeyvid/nginx-quic Docker tag to v637 2025-11-25 14:40:21 +01:00
renovate[bot]
22e9ac2584 Update dependency phpmailer/phpmailer to v7.0.1 2025-11-25 10:53:02 +01:00
renovate[bot]
76827a8c20 Update zoeyvid/nginx-quic Docker tag to v628 2025-11-23 08:08:39 +01:00
renovate[bot]
8e208c292d chore(deps): update zoeyvid/valkey-static docker tag to v70 2025-11-21 07:01:44 +01:00
renovate[bot]
768fee9ac6 chore(deps): update zoeyvid/nginx-quic docker tag to v627 2025-11-20 23:01:36 +01:00
renovate[bot]
2ab79ded30 Update actions/checkout action to v6 2025-11-20 17:35:58 +01:00
GitHub
69044c24b2 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-11-16 14:12:17 +00:00
renovate[bot]
a19b1d5143 Lock file maintenance 2025-11-16 15:11:50 +01:00
renovate[bot]
5526ecd5c6 Update zoeyvid/nginx-quic Docker tag to v625 2025-10-30 11:55:53 +01:00
renovate[bot]
613683de76 chore(deps): update zoeyvid/nginx-quic docker tag to v623 2025-10-29 08:50:38 +01:00
renovate[bot]
3129cf1c71 chore(deps): update zoeyvid/valkey-static docker tag to v68 2025-10-26 10:32:49 +01:00
GitHub
1e73f31a8a tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-10-25 23:43:02 +02:00
renovate[bot]
c40e2aec6b Update zoeyvid/nginx-quic Docker tag to v621 2025-10-25 23:43:02 +02:00
GitHub
df7c2c0148 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-10-22 00:40:03 +02:00
renovate[bot]
0ecdf43080 Update zoeyvid/valkey-static Docker tag to v66 2025-10-22 00:40:03 +02:00
renovate[bot]
fe83a18831 chore(deps): update zoeyvid/nginx-quic docker tag to v619 2025-10-17 17:11:29 +02:00
renovate[bot]
1ed08dd30e Update zoeyvid/nginx-quic Docker tag to v616 2025-10-17 07:38:41 +02:00
Zoey
1e8157df97 Update Dockerfile
Signed-off-by: Zoey <zoey@z0ey.de>
2025-10-16 22:47:07 +02:00
renovate[bot]
a46475cbf7 Update dependency phpmailer/phpmailer to v7 2025-10-15 22:49:33 +02:00
renovate[bot]
ad99992e2c Update zoeyvid/nginx-quic Docker tag to v605 2025-10-13 17:48:01 +02:00
renovate[bot]
3320386985 chore(deps): update zoeyvid/valkey-static docker tag to v65 2025-10-10 08:24:00 +02:00
renovate[bot]
f91dac7657 chore(deps): update zoeyvid/nginx-quic docker tag to v604 2025-10-10 08:19:42 +02:00
renovate[bot]
7e846f2fa3 chore(deps): update zoeyvid/valkey-static docker tag to v64 2025-10-09 12:43:47 +02:00
renovate[bot]
f814972b72 chore(deps): update alpine docker tag to v3.22.2 2025-10-09 10:09:35 +02:00
renovate[bot]
20c73fd059 chore(deps): update zoeyvid/nginx-quic docker tag to v594 2025-10-09 10:06:48 +02:00
renovate[bot]
71ca6d88d8 chore(deps): update zoeyvid/nginx-quic docker tag to v592 2025-10-08 10:55:44 +02:00
renovate[bot]
c26aa75a62 chore(deps): update zoeyvid/nginx-quic docker tag to v588 2025-10-08 08:55:18 +02:00
renovate[bot]
a09718d20b chore(deps): update zoeyvid/nginx-quic docker tag to v587 2025-10-07 21:31:54 +02:00
renovate[bot]
7b363b2a11 chore(deps): update zoeyvid/nginx-quic docker tag to v580 2025-10-05 16:36:19 +02:00
renovate[bot]
a73cc2ddf6 chore(deps): update zoeyvid/valkey-static docker tag to v62 2025-10-04 11:02:31 +02:00
renovate[bot]
84f1ecdf89 Update zoeyvid/nginx-quic Docker tag to v555 2025-10-03 08:16:21 +02:00
renovate[bot]
b077b4a269 Update zoeyvid/nginx-quic Docker tag to v554 2025-10-02 23:52:38 +02:00
GitHub
9e08e88bbd tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-10-02 22:41:02 +02:00
renovate[bot]
02038ddcec Update zoeyvid/nginx-quic Docker tag to v552 2025-10-02 22:41:02 +02:00
renovate[bot]
3ff9bb32d3 fix(deps): update dependency phpmailer/phpmailer to v6.11.1 2025-09-30 16:29:05 +02:00
renovate[bot]
c74803910a fix(deps): update dependency phpmailer/phpmailer to v6.11.0 2025-09-29 20:14:26 +02:00
renovate[bot]
0028245d4f chore(deps): update zoeyvid/nginx-quic docker tag to v550 2025-09-26 22:23:19 +02:00
renovate[bot]
fa6a497bdd chore(deps): update zoeyvid/valkey-static docker tag to v60 2025-09-26 22:23:12 +02:00
renovate[bot]
30f3d937f9 chore(deps): update zoeyvid/nginx-quic docker tag to v536 2025-09-24 10:24:42 +02:00
GitHub
f8481e1474 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-09-07 12:04:49 +02:00
renovate[bot]
f6f439c053 chore(deps): update zoeyvid/nginx-quic docker tag to v535 2025-09-07 12:04:49 +02:00
renovate[bot]
bc2b3b3bb6 chore(deps): update caddy docker tag to v2.10.2 2025-08-27 16:18:17 +02:00
renovate[bot]
a86cb5bb35 chore(deps): update zoeyvid/nginx-quic docker tag to v533 2025-08-16 16:24:11 +02:00
GitHub
3255c70be8 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-08-15 21:25:11 +02:00
renovate[bot]
01de11c96a chore(deps): update zoeyvid/nginx-quic docker tag to v531 2025-08-15 21:25:11 +02:00
renovate[bot]
bb5c583ba1 chore(deps): update zoeyvid/nginx-quic docker tag to v529 2025-08-11 21:34:26 +02:00
renovate[bot]
3363e9667d chore(deps): update zoeyvid/valkey-static docker tag to v55 2025-08-11 16:39:24 +02:00
renovate[bot]
68f2bba72c chore(deps): update actions/checkout action to v5 2025-08-11 15:25:27 +02:00
renovate[bot]
6911b9f6cf chore(deps): update zoeyvid/nginx-quic docker tag to v527 2025-08-08 07:24:20 +02:00
renovate[bot]
0ee266bb53 chore(deps): update zoeyvid/nginx-quic docker tag to v525 2025-08-07 17:33:12 +02:00
renovate[bot]
3ee16c5597 Update zoeyvid/nginx-quic Docker tag to v523 2025-07-29 09:13:08 +02:00
renovate[bot]
3d328ee29a chore(deps): update zoeyvid/valkey-static docker tag to v52 2025-07-16 18:26:42 +02:00
renovate[bot]
1852cf5559 Update zoeyvid/nginx-quic Docker tag to v522 2025-07-16 18:25:17 +02:00
renovate[bot]
74e0ee0bc0 Update alpine Docker tag to v3.22.1 2025-07-16 09:15:54 +02:00
renovate[bot]
c9ef60c697 chore(deps): update zoeyvid/nginx-quic docker tag to v520 2025-07-12 15:30:07 +02:00
renovate[bot]
ac6bc89935 Update zoeyvid/nginx-quic Docker tag to v519 2025-07-11 07:33:13 +02:00
renovate[bot]
ffc7b3bf50 Update zoeyvid/valkey-static Docker tag to v49 2025-07-07 17:21:22 +02:00
renovate[bot]
0a53a469bd Update zoeyvid/valkey-static Docker tag to v48 2025-07-07 14:15:14 +02:00
renovate[bot]
434b7ec19f chore(deps): update zoeyvid/nginx-quic docker tag to v518 2025-07-04 15:20:53 +02:00
GitHub
3fa803d611 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-07-03 22:34:46 +02:00
renovate[bot]
512ff8e6e4 Update zoeyvid/nginx-quic Docker tag to v517 2025-07-03 22:34:46 +02:00
renovate[bot]
b08dac4b01 chore(deps): update zoeyvid/nginx-quic docker tag to v515 2025-06-24 21:49:22 +02:00
renovate[bot]
beb5a460a9 chore(deps): update zoeyvid/nginx-quic docker tag to v514 2025-06-13 11:11:44 +02:00
renovate[bot]
45772a73d9 chore(deps): update zoeyvid/nginx-quic docker tag to v512 2025-06-13 00:56:21 +02:00
GitHub
97e24dfeb5 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-06-12 07:47:40 +02:00
renovate[bot]
7fa3819155 chore(deps): update zoeyvid/valkey-static docker tag to v47 2025-06-12 07:47:40 +02:00
renovate[bot]
4575eb1071 chore(deps): update zoeyvid/nginx-quic docker tag to v510 2025-06-11 15:49:09 +02:00
renovate[bot]
6612aaae9a chore(deps): update zoeyvid/nginx-quic docker tag to v507 2025-06-10 09:22:30 +02:00
renovate[bot]
d14c238ad5 chore(deps): update zoeyvid/nginx-quic docker tag to v506 2025-06-08 19:15:50 +02:00
renovate[bot]
526b4e2302 chore(deps): update zoeyvid/nginx-quic docker tag to v503 2025-06-04 23:25:42 +02:00
renovate[bot]
314063e880 chore(deps): update zoeyvid/nginx-quic docker tag to v501 2025-06-04 18:18:49 +02:00
renovate[bot]
8ffbeb0ecd chore(deps): update zoeyvid/nginx-quic docker tag to v498 2025-06-02 09:05:02 +02:00
renovate[bot]
81a2057a5e chore(deps): update alpine docker tag to v3.22.0 2025-06-02 01:47:43 +02:00
renovate[bot]
0c03016ddb chore(deps): update zoeyvid/valkey-static docker tag to v46 2025-05-31 04:19:49 +02:00
renovate[bot]
746cfc8da9 chore(deps): update zoeyvid/nginx-quic docker tag to v493 2025-05-29 21:22:38 +02:00
GitHub
c87b393c0a tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-05-28 22:46:41 +02:00
renovate[bot]
ea5b531bd6 Update zoeyvid/nginx-quic Docker tag to v492 2025-05-28 22:46:41 +02:00
renovate[bot]
2dd90b1d4f chore(deps): update zoeyvid/nginx-quic docker tag to v488 2025-05-22 13:24:34 +02:00
GitHub
1e629859e5 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-05-16 21:50:40 +02:00
renovate[bot]
2c84be2cdd chore(deps): update zoeyvid/nginx-quic docker tag to v485 2025-05-16 21:50:40 +02:00
GitHub
d78fddb001 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-05-06 08:07:06 +02:00
renovate[bot]
48ba636143 Update zoeyvid/nginx-quic Docker tag to v484 2025-05-06 08:07:06 +02:00
GitHub
24555abb14 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-04-24 22:00:03 +02:00
renovate[bot]
a488703004 fix(deps): update dependency phpmailer/phpmailer to v6.10.0 2025-04-24 22:00:03 +02:00
renovate[bot]
a851c2dbc5 chore(deps): update zoeyvid/nginx-quic docker tag to v483 2025-04-24 08:03:42 +02:00
renovate[bot]
bd3163aa16 chore(deps): update zoeyvid/valkey-static docker tag to v43 2025-04-24 08:03:31 +02:00
renovate[bot]
b74832920f chore(deps): update caddy docker tag to v2.10.0 2025-04-22 08:37:10 +02:00
renovate[bot]
4e89b5b0eb chore(deps): update zoeyvid/nginx-quic docker tag to v482 2025-04-21 15:00:56 +02:00
renovate[bot]
e93f637d6e chore(deps): update zoeyvid/nginx-quic docker tag to v481 2025-04-17 22:32:58 +02:00
GitHub
6ea4657391 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-04-16 17:55:19 +02:00
renovate[bot]
52a09fd174 chore(deps): update zoeyvid/nginx-quic docker tag to v480 2025-04-16 17:55:19 +02:00
renovate[bot]
41faae2adb chore(deps): update zoeyvid/nginx-quic docker tag to v477 2025-04-09 13:57:02 +01:00
GitHub
2ee7a12d19 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-04-08 14:44:28 +01:00
renovate[bot]
b39e3feeba chore(deps): update zoeyvid/nginx-quic docker tag to v475 2025-04-08 14:44:28 +01:00
GitHub
1aa89dda3c tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-04-02 09:59:03 +00:00
GitHub
b8f06ab902 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-04-02 11:58:49 +02:00
renovate[bot]
576f4c72f4 chore(deps): update zoeyvid/nginx-quic docker tag to v472 2025-04-02 11:58:49 +02:00
renovate[bot]
0b2355cada Update zoeyvid/valkey-static Docker tag to v42 2025-04-01 15:05:06 +02:00
GitHub
88916891c0 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-03-26 21:07:23 +01:00
renovate[bot]
678ce2c2ee Update zoeyvid/nginx-quic Docker tag to v471 2025-03-26 21:07:23 +01:00
GitHub
be75a2a6b0 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-03-25 20:31:23 +01:00
renovate[bot]
2a9fe58de0 Update zoeyvid/nginx-quic Docker tag to v470 2025-03-25 20:31:23 +01:00
renovate[bot]
56e79583fc Update zoeyvid/nginx-quic Docker tag to v469 2025-03-24 19:16:02 +01:00
GitHub
27b50c3275 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-03-24 14:07:59 +01:00
renovate[bot]
6a828bf1e1 Update zoeyvid/nginx-quic Docker tag to v460 2025-03-24 14:07:59 +01:00
renovate[bot]
dc1772b04c chore(deps): update zoeyvid/nginx-quic docker tag to v459 2025-03-20 15:12:17 +01:00
GitHub
4108cdd771 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-03-17 11:38:00 +01:00
renovate[bot]
f8cbe5bceb Update zoeyvid/nginx-quic Docker tag to v458 2025-03-17 11:38:00 +01:00
renovate[bot]
289e9baf18 chore(deps): update zoeyvid/nginx-quic docker tag to v456 2025-03-13 09:00:12 +01:00
renovate[bot]
5c0e5bbee1 chore(deps): update zoeyvid/nginx-quic docker tag to v455 2025-03-12 09:59:44 +01:00
GitHub
18b4ce5c07 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-03-11 19:03:13 +01:00
renovate[bot]
a80c7e4153 chore(deps): update zoeyvid/nginx-quic docker tag to v453 2025-03-11 19:03:13 +01:00
GitHub
7eac9fd51e tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-03-09 14:22:28 +00:00
renovate[bot]
8af3af68b5 Update zoeyvid/nginx-quic Docker tag to v450 2025-03-09 15:22:11 +01:00
renovate[bot]
be207c514a chore(deps): update zoeyvid/nginx-quic docker tag to v438 2025-02-27 07:31:03 +01:00
GitHub
9a0c3263b0 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-02-26 17:01:21 +01:00
renovate[bot]
4eec16ccaf Update zoeyvid/nginx-quic Docker tag to v437 2025-02-26 17:01:21 +01:00
Zoey
01342d8168 Update .prettierignore
Signed-off-by: Zoey <zoey@z0ey.de>
2025-02-21 08:32:23 +01:00
Zoey
5e3ec8d6f4 Update .prettierignore
Signed-off-by: Zoey <zoey@z0ey.de>
2025-02-21 08:30:35 +01:00
renovate[bot]
a29f9ab3ae Update zoeyvid/nginx-quic Docker tag to v436 2025-02-21 08:27:30 +01:00
GitHub
862b0434ea tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-02-20 19:44:04 +01:00
renovate[bot]
bc87345682 Update zoeyvid/nginx-quic Docker tag to v435 2025-02-20 19:44:04 +01:00
Zoey
7e0763b4df Update lint.yml
Signed-off-by: Zoey <zoey@z0ey.de>
2025-02-18 12:20:12 +01:00
renovate[bot]
6dd4948e73 Update zoeyvid/nginx-quic Docker tag to v431 2025-02-18 12:14:27 +01:00
renovate[bot]
7135d33d23 chore(deps): update zoeyvid/valkey-static docker tag to v41 2025-02-15 15:31:30 +01:00
renovate[bot]
33cf41d861 chore(deps): update alpine docker tag to v3.21.3 2025-02-15 11:13:55 +01:00
renovate[bot]
e3e79ebd16 chore(deps): update zoeyvid/nginx-quic docker tag to v423 2025-02-15 11:13:44 +01:00
renovate[bot]
89504d95b4 chore(deps): update zoeyvid/nginx-quic docker tag to v421 2025-02-12 13:21:13 +01:00
GitHub
b3dcd40de5 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-02-12 07:00:13 +01:00
renovate[bot]
9711b6aaeb Update zoeyvid/nginx-quic Docker tag to v420 2025-02-12 07:00:13 +01:00
renovate[bot]
c1c1bcf75f Update zoeyvid/nginx-quic Docker tag to v417 2025-02-07 09:17:51 +01:00
GitHub
cc16c9ce1e tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-02-07 00:52:30 +01:00
renovate[bot]
d93d8f7da7 chore(deps): update zoeyvid/nginx-quic docker tag to v414 2025-02-07 00:52:30 +01:00
GitHub
88c77c106d tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-02-06 11:03:15 +01:00
renovate[bot]
ce63e47992 chore(deps): update zoeyvid/nginx-quic docker tag to v411 2025-02-06 11:03:15 +01:00
renovate[bot]
91f1ff80b3 chore(deps): update zoeyvid/nginx-quic docker tag to v395 2025-01-29 08:10:30 +01:00
renovate[bot]
222a42a665 Update zoeyvid/nginx-quic Docker tag to v393 2025-01-28 06:32:58 +01:00
renovate[bot]
60e4970df8 Update zoeyvid/nginx-quic Docker tag to v392 2025-01-26 08:25:31 +01:00
renovate[bot]
85fbfeab0e Update zoeyvid/nginx-quic Docker tag to v390 2025-01-23 19:43:32 +01:00
GitHub
86bc5798d8 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2025-01-23 08:08:29 +00:00
Zoey
81a265a4e0 Update tailwindcss-update.yml
Signed-off-by: Zoey <zoey@z0ey.de>
2025-01-23 09:08:06 +01:00
Zoey
31ed77dccc Update .gitignore
Signed-off-by: Zoey <zoey@z0ey.de>
2025-01-23 09:06:50 +01:00
GitHub
63c39e36ca prettier
Signed-off-by: GitHub <noreply@github.com>
2025-01-23 08:04:34 +00:00
Zoey
d94b0cef12 update tailwind to v4 2025-01-23 09:04:15 +01:00
renovate[bot]
376fc77183 Update zoeyvid/nginx-quic Docker tag to v384 2025-01-17 10:37:52 +01:00
renovate[bot]
c4167f976e Update zoeyvid/nginx-quic Docker tag to v382 2025-01-15 07:09:41 +01:00
renovate[bot]
667b60097d Update zoeyvid/nginx-quic Docker tag to v376 2025-01-14 10:44:23 +01:00
renovate[bot]
d38633069a chore(deps): update zoeyvid/valkey-static docker tag to v38 2025-01-09 10:18:16 +01:00
renovate[bot]
c7619021d4 chore(deps): update zoeyvid/nginx-quic docker tag to v375 2025-01-09 10:18:05 +01:00
renovate[bot]
2580f79d02 chore(deps): update caddy docker tag to v2.9.1 2025-01-09 10:13:27 +01:00
renovate[bot]
d9bb0e0233 chore(deps): update alpine docker tag to v3.21.2 2025-01-09 00:35:55 +01:00
renovate[bot]
15e0ffb16a chore(deps): update zoeyvid/valkey-static docker tag to v35 2025-01-08 20:31:57 +01:00
renovate[bot]
5904cc7048 Update zoeyvid/nginx-quic Docker tag to v373 2025-01-08 14:55:58 +01:00
renovate[bot]
0a30808aaf Update alpine Docker tag to v3.21.1 2025-01-07 11:16:00 +01:00
renovate[bot]
c147cea4a9 Update zoeyvid/nginx-quic Docker tag to v372 2025-01-07 11:15:38 +01:00
renovate[bot]
e4cd6fbbc2 Update zoeyvid/nginx-quic Docker tag to v371 2025-01-01 17:00:44 +01:00
GitHub
4a48b2daa8 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-12-24 12:41:34 +01:00
renovate[bot]
1ae5b3d56f Update zoeyvid/nginx-quic Docker tag to v369 2024-12-24 12:41:34 +01:00
Zoey
85c3bc3075 Update Dockerfile
Signed-off-by: Zoey <zoey@z0ey.de>
2024-12-14 21:39:50 +01:00
renovate[bot]
b03eef67b0 chore(deps): update zoeyvid/nginx-quic docker tag to v368 2024-12-14 09:50:00 +01:00
renovate[bot]
4bed5d8aa6 chore(deps): update zoeyvid/curl-quic docker tag to v431 2024-12-11 10:53:56 +01:00
renovate[bot]
b1a0542728 chore(deps): update zoeyvid/nginx-quic docker tag to v362 2024-12-10 08:58:21 +01:00
renovate[bot]
d389af9158 chore(deps): update alpine docker tag to v3.21.0 2024-12-10 08:58:07 +01:00
renovate[bot]
f2ff6a6139 chore(deps): update zoeyvid/valkey-static docker tag to v34 2024-12-06 16:23:52 +01:00
renovate[bot]
4bdffc4007 chore(deps): update zoeyvid/curl-quic docker tag to v430 2024-12-06 16:23:02 +01:00
GitHub
618bec326c tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-12-05 15:08:18 +01:00
renovate[bot]
f3492983a7 chore(deps): update zoeyvid/nginx-quic docker tag to v359 2024-12-05 15:08:18 +01:00
renovate[bot]
ce5bd46e8c chore(deps): update zoeyvid/nginx-quic docker tag to v357 2024-12-02 15:23:09 +01:00
GitHub
e86081901e tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-11-25 18:37:01 +00:00
DavidCraftDev
83e8149b5c style(auth): enhance login page UI with improved layout and styling 2024-11-25 19:36:35 +01:00
renovate[bot]
ca2f18ca1f chore(deps): update zoeyvid/curl-quic docker tag to v427 2024-11-25 16:46:55 +01:00
renovate[bot]
2154bb3300 chore(deps): update zoeyvid/nginx-quic docker tag to v356 2024-11-25 16:46:33 +01:00
renovate[bot]
bf7d1a5610 fix(deps): update dependency phpmailer/phpmailer to v6.9.3 2024-11-24 21:01:13 +01:00
renovate[bot]
d7f59a7364 chore(deps): update zoeyvid/curl-quic docker tag to v426 2024-11-19 19:10:16 +01:00
GitHub
e8cbceaf08 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-11-16 20:26:24 +01:00
renovate[bot]
74da93fe30 chore(deps): update zoeyvid/nginx-quic docker tag to v352 2024-11-16 20:26:24 +01:00
renovate[bot]
10b83b33b7 chore(deps): update zoeyvid/curl-quic docker tag to v425 2024-11-06 17:27:00 +01:00
renovate[bot]
b37a3b0eeb chore(deps): update zoeyvid/nginx-quic docker tag to v351 2024-11-02 18:45:43 +01:00
renovate[bot]
cbef03d216 Update zoeyvid/curl-quic Docker tag to v423 2024-10-26 18:30:29 +02:00
renovate[bot]
15ee498ccd Update zoeyvid/nginx-quic Docker tag to v350 2024-10-22 11:05:59 +02:00
renovate[bot]
8bd3b813ab Update zoeyvid/nginx-quic Docker tag to v349 2024-10-20 19:51:59 +02:00
GitHub
ebe3d09d71 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-10-17 17:16:40 +02:00
renovate[bot]
2f0d646126 chore(deps): update zoeyvid/curl-quic docker tag to v420 2024-10-17 17:16:40 +02:00
renovate[bot]
ec574f33cb chore(deps): update zoeyvid/nginx-quic docker tag to v347 2024-10-10 13:02:50 +02:00
renovate[bot]
73210a740b fix(deps): update dependency phpmailer/phpmailer to v6.9.2 2024-10-09 14:48:10 +02:00
renovate[bot]
07720a24ad chore(deps): update zoeyvid/nginx-quic docker tag to v346 2024-10-09 14:33:24 +02:00
renovate[bot]
c8ac0ec599 chore(deps): update zoeyvid/nginx-quic docker tag to v345 2024-10-08 20:46:13 +02:00
renovate[bot]
71abcad9f5 chore(deps): update zoeyvid/curl-quic docker tag to v419 2024-10-06 20:22:33 +02:00
Zoey
792a02a4a9 Delete 20240921100301_regenerate_default_host.js 2024-10-03 00:19:43 +02:00
Zoey
73a9b79ad6 Merge branch 'develop' into php 2024-10-03 00:19:19 +02:00
Zoey
211f43c71b Update Dockerfile
Signed-off-by: Zoey <zoey@z0ey.de>
2024-10-03 00:12:33 +02:00
GitHub
8ded42654e tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-10-02 06:29:24 +00:00
Zoey
d761f8cd9b Update Dockerfile
Signed-off-by: Zoey <zoey@z0ey.de>
2024-10-02 08:29:02 +02:00
renovate[bot]
df254927e9 chore(deps): update zoeyvid/nginx-quic docker tag to v337 2024-09-18 22:14:20 +02:00
renovate[bot]
e97c39baff chore(deps): update zoeyvid/curl-quic docker tag to v416 2024-09-18 18:53:34 +02:00
Zoey
b8448242a1 Update Dockerfile
Signed-off-by: Zoey <zoey@z0ey.de>
2024-09-18 06:44:13 +02:00
GitHub
921719b025 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-09-18 06:44:13 +02:00
renovate[bot]
d5acc8c450 chore(deps): update zoeyvid/nginx-quic docker tag to v335 2024-09-18 06:44:13 +02:00
renovate[bot]
1ab9d59594 chore(deps): update zoeyvid/curl-quic docker tag to v415 2024-09-17 06:55:07 +02:00
GitHub
1f480724e9 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-09-16 06:20:50 +02:00
renovate[bot]
cb0f304ff0 chore(deps): update zoeyvid/valkey-static docker tag to v30 2024-09-16 06:20:50 +02:00
renovate[bot]
3c6624e82b chore(deps): update zoeyvid/curl-quic docker tag to v414 2024-09-11 16:49:40 +02:00
renovate[bot]
0ee10da787 chore(deps): update zoeyvid/valkey-static docker tag to v29 2024-09-08 21:54:06 +02:00
renovate[bot]
6c224247f2 chore(deps): update zoeyvid/nginx-quic docker tag to v314 2024-09-08 21:53:03 +02:00
renovate[bot]
c7e4cb2156 chore(deps): update zoeyvid/curl-quic docker tag to v413 2024-09-08 21:52:49 +02:00
renovate[bot]
615ba3f353 chore(deps): update alpine docker tag to v3.20.3 2024-09-07 17:57:20 +02:00
renovate[bot]
4ebd0a2d30 chore(deps): update zoeyvid/nginx-quic docker tag to v312 2024-09-03 21:57:59 +02:00
renovate[bot]
1ffbe65071 chore(deps): update peter-evans/create-pull-request action to v7 2024-09-03 18:09:59 +02:00
renovate[bot]
5ee1e8957e chore(deps): update zoeyvid/nginx-quic docker tag to v310 2024-08-31 00:14:41 +02:00
renovate[bot]
51ffd378dd chore(deps): update zoeyvid/curl-quic docker tag to v410 2024-08-24 11:55:16 +02:00
renovate[bot]
41b14e1208 Update zoeyvid/nginx-quic Docker tag to v307 2024-08-17 07:56:55 +02:00
GitHub
8e407d4bbc tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-08-15 16:16:46 +00:00
renovate[bot]
7369b02a44 chore(deps): update zoeyvid/nginx-quic docker tag to v306 2024-08-15 18:16:27 +02:00
GitHub
f869a69378 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-08-08 17:42:18 +02:00
renovate[bot]
312fa94afb chore(deps): update zoeyvid/nginx-quic docker tag to v305 2024-08-08 17:42:18 +02:00
GitHub
d659db7051 tailwindcss-update
Signed-off-by: GitHub <noreply@github.com>
2024-08-07 20:52:52 +02:00
renovate[bot]
3995d2bc71 chore(deps): update zoeyvid/curl-quic docker tag to v408 2024-08-07 20:52:52 +02:00
Zoey
80881b1cb3 make file executeable again
Signed-off-by: Zoey <zoey@z0ey.de>
2024-08-03 17:06:43 +02:00
Zoey
6d3cd5dd8e Merge branch 'develop' into php 2024-08-03 16:36:53 +02:00
renovate[bot]
c6220b8379 dep updates 2024-08-03 16:33:57 +02:00
Zoey
2983ec7b17 Delete 20240711144745_change_incoming_port_to_string.js 2024-07-11 16:13:30 +02:00
Zoey
6678d230cf Merge branch 'develop' into php 2024-07-11 16:12:45 +02:00
renovate[bot]
a2d7b02667 chore(deps): update zoeyvid/nginx-quic docker tag to v297 2024-07-09 20:26:11 +02:00
renovate[bot]
38f6999f8e chore(deps): update zoeyvid/curl-quic docker tag to v399 2024-07-09 08:49:54 +02:00
Zoey
ab33b30ce1 Merge branch 'develop' into php 2024-06-29 09:23:09 +02:00
Zoey
9a624764cc Merge branch 'develop' into php 2024-06-28 17:42:44 +02:00
Zoey
cb7077d409 Update zoeyvid/nginx-quic Docker tag to v296 2024-06-28 17:41:13 +02:00
Zoey
e34fcf5861 Merge branch 'develop' into php 2024-06-22 00:56:36 +02:00
Zoey
143456ca28 Merge branch 'develop' into php 2024-06-22 00:51:50 +02:00
Zoey
9cafbd1c53 dep updates/alpine 3.20.1
Signed-off-by: Zoey <zoey@z0ey.de>
2024-06-22 00:46:50 +02:00
Zoey
c9d7e5b37f Merge branch 'develop' into php 2024-06-21 00:57:35 +02:00
Zoey
586e64cd49 dep updates/alpine 3.20.1
Signed-off-by: Zoey <zoey@z0ey.de>
2024-06-21 00:55:49 +02:00
renovate[bot]
c400610ca1 update alpine to 3.20.1
Signed-off-by: Zoey <zoey@z0ey.de>
2024-06-21 00:53:11 +02:00
Zoey
57613b1b5c Merge branch 'develop' into php 2024-06-10 22:30:09 +02:00
DavidCraftDev
fd4acf21f7 Fix design on mobile and add ascending sorting arrow by @DavidCraftDev
Co-Authored-By: David <contact@davidcraft.de>
2024-06-10 22:28:48 +02:00
Zoey
383aa71ad5 Merge branch 'develop' into php 2024-06-09 15:50:52 +02:00
Zoey
5708c55c53 enable human readable sizes again 2024-06-09 15:50:18 +02:00
Zoey
26a761618f merge develop 2024-06-09 15:44:07 +02:00
Zoey
3c585bc0eb Merge branch 'develop' into php 2024-06-09 15:37:49 +02:00
Zoey
396596d3e9 new design for fancyindex by @DavidCraftDev
Signed-off-by: Zoey <zoey@z0ey.de>
Co-Authored-By: David <contact@davidcraft.de>
2024-06-09 15:27:38 +02:00
Zoey
22ab2dba6a Merge branch 'develop' into php 2024-06-04 23:13:53 +02:00
DavidCraftDev
d82787d01e add default/dead host pages by @DavidCraftDev
Signed-off-by: Zoey <zoey@z0ey.de>
Co-Authored-By: David <contact@davidcraft.de>
2024-06-04 23:13:36 +02:00
Zoey
e258b72e98 Merge branch 'develop' into php 2024-06-02 11:18:07 +02:00
Zoey
bf68860414 add bootstrap icons/tailwind/lint/prettier workflows/remove php qr code 2024-06-01 23:36:12 +02:00
Zoey
ef83fe9f55 Merge branch 'develop' into php 2024-06-01 00:44:11 +02:00
Zoey
ec5327fec2 move files 2024-06-01 00:42:48 +02:00
Zoey
6041cbf8cb add auth.php 2024-05-28 01:02:10 +02:00
Zoey
599bebbdfb Enable protected-mode
Signed-off-by: Zoey <zoey@z0ey.de>
2024-05-24 18:59:25 +02:00
renovate[bot]
e52f38ef04 Update zoeyvid/valkey-static Docker tag to v14 2024-05-24 18:56:13 +02:00
Zoey
6ee15142da use strong instead of b 2024-05-24 17:43:45 +02:00
renovate[bot]
906edd98b7 Update zoeyvid/valkey-static Docker tag to v13 2024-05-24 13:19:12 +02:00
Zoey
22212954cd improve login.php 2024-05-23 22:55:01 +02:00
Zoey
6aed6d6de8 add some todo notes
Signed-off-by: Zoey <zoey@z0ey.de>
2024-05-23 17:48:59 +02:00
Zoey
bffca9c450 Merge branch 'develop' into php 2024-05-23 17:04:37 +02:00
Zoey
2c4df9c441 Update start.sh 2024-05-23 16:32:22 +02:00
Zoey
8c18961fb7 Merge branch 'develop' into php 2024-05-23 16:30:08 +02:00
Zoey
56e7472605 remove PHP81 2024-05-23 16:12:40 +02:00
renovate[bot]
243e9fbf27 dep updates/small changes 2024-05-23 15:44:59 +02:00
Zoey
121d76156f add login page 2024-05-13 15:06:07 +02:00
Zoey
80000874c8 fix launch 2024-05-12 23:25:46 +02:00
Zoey
3e5f727fa6 change name of php session 2024-05-12 22:25:14 +02:00
Zoey
87501e9dfe Merge branch 'develop' into php 2024-05-12 22:20:44 +02:00
Zoey
4299e783f5 configure session with valkey
Signed-off-by: Zoey <zoey@z0ey.de>
2024-05-12 22:19:34 +02:00
Zoey
b31f6069f1 update nginx-quic
Signed-off-by: Zoey <zoey@z0ey.de>
2024-05-12 22:18:42 +02:00
Zoey
57b927d719 Merge branch 'develop' into php 2024-05-12 17:09:49 +02:00
Zoey
a6e0a3d45a merge upstream 2024-05-12 17:08:23 +02:00
Zoey
e09df0031e Merge branch 'develop' into php 2024-05-12 16:41:10 +02:00
Zoey
bdde83997c merge upstream 2024-05-12 16:39:36 +02:00
Zoey
3c312a44c8 add totp function
Signed-off-by: Zoey <zoey@z0ey.de>
2024-05-12 00:15:01 +02:00
Zoey
eef25d8554 Update composer.json
Signed-off-by: Zoey <zoey@z0ey.de>
2024-05-11 20:39:36 +02:00
Zoey
6c16829d81 Merge branch 'develop' into php 2024-05-03 13:14:36 +02:00
Zoey
b5f4e82435 Merge branch 'develop' into php 2024-05-03 00:12:26 +02:00
Zoey
9da7bfb755 use lts nodejs
Signed-off-by: Zoey <zoey@z0ey.de>
2024-05-02 15:02:00 +02:00
renovate[bot]
e789ccfa72 Update dependency @babel/core to v7.24.5 2024-04-29 20:49:29 +02:00
Zoey
a642a8d173 Update Dockerfile
Signed-off-by: Zoey <zoey@z0ey.de>
2024-04-29 19:52:09 +02:00
renovate[bot]
6230132c34 Update dependency liquidjs to v10.12.0 2024-04-28 22:02:47 +02:00
renovate[bot]
f7ca07adee Update dependency globals to v15.1.0 2024-04-28 15:53:10 +02:00
Zoey
6a4dd7eaf5 Merge branch 'develop' into php 2024-04-27 11:05:45 +02:00
Zoey
f3588679a9 Merge branch 'develop' into php
Signed-off-by: Zoey <zoey@z0ey.de>
2024-04-21 02:21:43 +02:00
Zoey
1e0777fb7c Update Dockerfile
Signed-off-by: Zoey <zoey@z0ey.de>
2024-04-21 02:15:59 +02:00
renovate[bot]
69a0967304 Update zoeyvid/nginx-quic Docker tag to v273 2024-04-21 02:15:59 +02:00
renovate[bot]
b6db58f58b Update zoeyvid/nginx-quic Docker tag to v273 2024-04-21 02:15:27 +02:00
Zoey
7a6a9b8d52 init PHP rewrite
Signed-off-by: Zoey <zoey@z0ey.de>
2024-04-21 00:33:52 +02:00
Zoey
70352021af remove global/frintend and backend folder
Signed-off-by: Zoey <zoey@z0ey.de>
2024-04-20 20:43:45 +02:00
Zoey
2db8ab1e2d Update renovate.json
Signed-off-by: Zoey <zoey@z0ey.de>
2024-04-20 18:37:37 +02:00
renovate[bot]
d411530753 Update dependency eslint to v9.1.0 2024-04-20 08:46:52 +02:00
renovate[bot]
340d1f3ee4 Update dependency @eslint/js to v9.1.1 2024-04-19 23:25:39 +02:00
876 changed files with 15651 additions and 64085 deletions

View File

@@ -1 +0,0 @@
*/node_modules

View File

@@ -2,4 +2,4 @@ exclude:
- main
- stable
- develop
delete_closed_pr: true
delete_closed_pr: true

30
.github/workflows/bootstrap-icons.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
name: bootstrap-icons
on:
push:
paths:
- src/bootstrap-icons.json
- .github/workflows/bootstrap-icons.yml
schedule:
- cron: "0 */6 * * *"
workflow_dispatch:
jobs:
bootstrap-icons:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: update bootstrap-icons
run: |
curl -sL $(curl -sL https://api.github.com/repos/twbs/icons/releases/latest --header "authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" | jq -r .assets[].browser_download_url) -o bootstrap-icons.zip
unzip bootstrap-icons.zip
rm -rf src/public/bootstrap-icons bootstrap-icons.zip
mv bootstrap-icons-* bootstrap-icons
mkdir src/public/bootstrap-icons
for icon in $(jq -r .[] src/bootstrap-icons.json); do mv bootstrap-icons/"$icon".svg src/public/bootstrap-icons/"$icon".svg; done
- name: push changes
run: |
git add src/public/bootstrap-icons
git config user.name "GitHub"
git config user.email "noreply@github.com"
git diff-index --quiet HEAD || git commit -sm "bootstrap-icons"
git push

View File

@@ -1,29 +1,29 @@
name: Format Caddyfile
name: caddy-fmt
on:
push:
branches:
- develop
paths:
- .github/workflows/caddy-fmt.yml
- caddy/Dockerfile
- caddy/Caddyfile
- Caddy.Dockerfile
- Caddyfile
workflow_dispatch:
jobs:
caddy-fmt:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@v6
- name: Read version
id: version
run: echo "version=$(cat caddy/Dockerfile | grep "^COPY --from=caddy:.*$" | head -1 | sed "s|COPY --from=caddy:\([0-9.]\+\).*|\1|g")" >> $GITHUB_OUTPUT
run: echo "version=$(cat Caddy.Dockerfile | grep "^COPY --from=caddy:.*$" | head -1 | sed "s|COPY --from=caddy:\([0-9.]\+\).*|\1|g")" >> $GITHUB_OUTPUT
- name: caddy-fmt
run: |
docker run --rm -v ${{ github.workspace }}/caddy/Caddyfile:/etc/caddy/Caddyfile caddy:${{ steps.version.outputs.version }} caddy fmt --overwrite /etc/caddy/Caddyfile
docker run --rm -v ${{ github.workspace }}/Caddyfile:/etc/caddy/Caddyfile caddy:${{ steps.version.outputs.version }} caddy fmt --overwrite /etc/caddy/Caddyfile
- name: push changes
run: |
git add caddy/Caddyfile
git add -A
git config user.name "GitHub"
git config user.email "noreply@github.com"
git diff-index --quiet HEAD || git commit -sm "caddy fmt"
git diff-index --quiet HEAD || git commit -sm "caddy-fmt"
git push

38
.github/workflows/caddy-latest.yml vendored Normal file
View File

@@ -0,0 +1,38 @@
name: Docker push Caddy develop to latest
on:
workflow_dispatch:
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert Username
id: un
run: echo "un=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Convert repository name
id: rn
run: echo "rn=$(echo "${{ github.event.repository.name }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ steps.un.outputs.un }}
password: ${{ github.token }}
- name: Push develop to latest
run: |
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/nginx-proxy-manager:caddy ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/nginx-proxy-manager:caddy ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.ref_name }}
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.ref_name }}
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.run_number }} ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.run_number }} ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.ref_name }}
- name: Show Caddy version
run: |
docker run --rm --entrypoint caddy ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy version
docker run --rm --entrypoint caddy ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy version

View File

@@ -5,42 +5,49 @@ on:
- develop
paths:
- .github/workflows/caddy.yml
- caddy/Dockerfile
- caddy/Caddyfile
- Caddy.Dockerfile
- Caddyfile
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@v6
- name: Set up QEMU
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
uses: docker/setup-qemu-action@v3
with:
platforms: all
platforms: arm64 #all
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
uses: docker/setup-buildx-action@v3
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert Username
id: un
run: echo "un=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Convert repository name
id: rn
run: echo "rn=$(echo "${{ github.event.repository.name }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
uses: docker/login-action@v3
with:
registry: ghcr.io
username: zoeyvid
username: ${{ steps.un.outputs.un }}
password: ${{ github.token }}
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
uses: docker/build-push-action@v6
if: ${{ github.event_name != 'pull_request' }}
with:
context: caddy
platforms: linux/amd64,linux/arm64
push: true
context: .
file: ./Caddy.Dockerfile
platforms: linux/amd64,linux/arm64 #,linux/amd64/v2,linux/amd64/v3,linux/amd64/v4 #,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6
push: ${{ github.event_name != 'pull_request' }}
tags: |
zoeyvid/npmplus:caddy
ghcr.io/zoeyvid/npmplus:caddy
zoeyvid/npmplus:caddy-${{ github.run_number }}
ghcr.io/zoeyvid/npmplus:caddy-${{ github.run_number }}
${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.ref_name }}
ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:caddy-${{ github.ref_name }}

View File

@@ -4,560 +4,58 @@ on:
branches:
- develop
schedule:
- cron: "0 */3 * * *"
- cron: "0 */6 * * *"
workflow_dispatch:
jobs:
nginx-update:
cs-nginx-bouncer-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update nginx version
uses: actions/checkout@v6
- name: update cs-nginx-bouncer version
id: update
run: |
NGINX_VER="$(
git ls-remote --tags https://github.com/nginx/nginx \
CSNB_VER="$(
git ls-remote --tags https://github.com/crowdsecurity/cs-nginx-bouncer \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NGINX_VER=.*|ARG NGINX_VER=$NGINX_VER|" Dockerfile
echo "version=$NGINX_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update nginx version to ${{ steps.update.outputs.version }}
branch: update-nginx-version
title: update nginx version to ${{ steps.update.outputs.version }}
body: update nginx version to ${{ steps.update.outputs.version }}
dynamic_tls_records-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update dynamic_tls_records version
id: update
run: |
git clone --depth 1 https://github.com/nginx-modules/ngx_http_tls_dyn_size ngx_http_tls_dyn_size
DTR_VER="$(
ls ngx_http_tls_dyn_size/nginx__dynamic_tls_records_*.patch \
| sed "s|ngx_http_tls_dyn_size/nginx__dynamic_tls_records_\([0-9.]\+\)+.patch|\1|g" \
| sort -V \
| tail -1
)"
rm -r ngx_http_tls_dyn_size
sed -i "s|ARG DTR_VER=.*|ARG DTR_VER=$DTR_VER|" Dockerfile
echo "version=$DTR_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update dynamic_tls_records version to ${{ steps.update.outputs.version }}
branch: update-dynamic_tls_records-version
title: update dynamic_tls_records version to ${{ steps.update.outputs.version }}
body: update dynamic_tls_records version to ${{ steps.update.outputs.version }}
resolver_conf_parsing-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update resolver_conf_parsing version
id: update
run: |
git clone --depth 1 https://github.com/openresty/openresty openresty
RCP_VER="$(
ls openresty/patches/nginx \
| sort -V \
| tail -1
)"
rm -r openresty
sed -i "s|ARG RCP_VER=.*|ARG RCP_VER=$RCP_VER|" Dockerfile
echo "version=$RCP_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update resolver_conf_parsing version to ${{ steps.update.outputs.version }}
branch: update-resolver_conf_parsing-version
title: update resolver_conf_parsing version to ${{ steps.update.outputs.version }}
body: update resolver_conf_parsing version to ${{ steps.update.outputs.version }}
zlib-ng-patch-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update zlib-ng-patch version
id: update
run: |
git clone --depth 1 https://github.com/zlib-ng/patches zlib-ng-patches
ZNP_VER="$(
ls zlib-ng-patches/nginx/*-zlib-ng.patch \
| sed "s|zlib-ng-patches/nginx/\([0-9.]\+\)-zlib-ng.patch|\1|g" \
| sort -V \
| tail -1
)"
rm -r zlib-ng-patches
sed -i "s|ARG ZNP_VER=.*|ARG ZNP_VER=$ZNP_VER|" Dockerfile
echo "version=$ZNP_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update zlib-ng-patch version to ${{ steps.update.outputs.version }}
branch: update-zlib-ng-patch-version
title: update zlib-ng-patch version to ${{ steps.update.outputs.version }}
body: update zlib-ng-patch version to ${{ steps.update.outputs.version }}
ngx_brotli-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_brotli version
id: update
run: |
NB_VER="$(
git ls-remote --tags https://github.com/google/ngx_brotli \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NB_VER=.*|ARG NB_VER=$NB_VER|" Dockerfile
echo "version=$NB_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != '' }}
with:
signoff: true
delete-branch: true
commit-message: update ngx_brotli version to ${{ steps.update.outputs.version }}
branch: update-ngx_brotli-version
title: update ngx_brotli version to ${{ steps.update.outputs.version }}
body: update ngx_brotli version to ${{ steps.update.outputs.version }}
ngx_unbrotli-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_unbrotli version
id: update
run: |
NUB_VER="$(
git ls-remote --tags https://github.com/clyfish/ngx_unbrotli \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NUB_VER=.*|ARG NUB_VER=$NUB_VER|" Dockerfile
echo "version=$NUB_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != '' }}
with:
signoff: true
delete-branch: true
commit-message: update ngx_unbrotli version to ${{ steps.update.outputs.version }}
branch: update-ngx_unbrotli-version
title: update ngx_unbrotli version to ${{ steps.update.outputs.version }}
body: update ngx_unbrotli version to ${{ steps.update.outputs.version }}
zstd-nginx-module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update zstd-nginx-module version
id: update
run: |
ZNM_VER="$(
git ls-remote --tags https://github.com/tokers/zstd-nginx-module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG ZNM_VER=.*|ARG ZNM_VER=$ZNM_VER|" Dockerfile
echo "version=$ZNM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != '0.1.1' }}
with:
signoff: true
delete-branch: true
commit-message: update zstd-nginx-module version to ${{ steps.update.outputs.version }}
branch: update-zstd-nginx-module-version
title: update zstd-nginx-module version to ${{ steps.update.outputs.version }}
body: update zstd-nginx-module version to ${{ steps.update.outputs.version }}
ngx_http_unzstd_filter_module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_http_unzstd_filter_module version
id: update
run: |
NHUZFM_VER="$(
git ls-remote --tags https://github.com/HanadaLee/ngx_http_unzstd_filter_module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NHUZFM_VER=.*|ARG NHUZFM_VER=$NHUZFM_VER|" Dockerfile
echo "version=$NHUZFM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != '' }}
with:
signoff: true
delete-branch: true
commit-message: update ngx_http_unzstd_filter_module version to ${{ steps.update.outputs.version }}
branch: update-ngx_http_unzstd_filter_module-version
title: update ngx_http_unzstd_filter_module version to ${{ steps.update.outputs.version }}
body: update ngx_http_unzstd_filter_module version to ${{ steps.update.outputs.version }}
ngx-fancyindex-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx-fancyindex version
id: update
run: |
NF_VER="$(
git ls-remote --tags https://github.com/aperezdc/ngx-fancyindex \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NF_VER=.*|ARG NF_VER=$NF_VER|" Dockerfile
echo "version=$NF_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != 'v0.5.2' }}
with:
signoff: true
delete-branch: true
commit-message: update ngx-fancyindex version to ${{ steps.update.outputs.version }}
branch: update-ngx-fancyindex-version
title: update ngx-fancyindex version to ${{ steps.update.outputs.version }}
body: update ngx-fancyindex version to ${{ steps.update.outputs.version }}
headers-more-nginx-module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update headers-more-nginx-module version
id: update
run: |
HMNM_VER="$(
git ls-remote --tags https://github.com/openresty/headers-more-nginx-module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG HMNM_VER=.*|ARG HMNM_VER=$HMNM_VER|" Dockerfile
echo "version=$HMNM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update headers-more-nginx-module version to ${{ steps.update.outputs.version }}
branch: update-headers-more-nginx-module-version
title: update headers-more-nginx-module version to ${{ steps.update.outputs.version }}
body: update headers-more-nginx-module version to ${{ steps.update.outputs.version }}
ngx_devel_kit-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_devel_kit version
id: update
run: |
NDK_VER="$(
git ls-remote --tags https://github.com/vision5/ngx_devel_kit \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NDK_VER=.*|ARG NDK_VER=$NDK_VER|" Dockerfile
echo "version=$NDK_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update ngx_devel_kit version to ${{ steps.update.outputs.version }}
branch: update-ngx_devel_kit-version
title: update ngx_devel_kit version to ${{ steps.update.outputs.version }}
body: update ngx_devel_kit version to ${{ steps.update.outputs.version }}
lua-nginx-module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update lua-nginx-module version
id: update
run: |
LNM_VER="$(
git ls-remote --tags https://github.com/openresty/lua-nginx-module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG LNM_VER=.*|ARG LNM_VER=$LNM_VER|" Dockerfile
echo "version=$LNM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update lua-nginx-module version to ${{ steps.update.outputs.version }}
branch: update-lua-nginx-module-version
title: update lua-nginx-module version to ${{ steps.update.outputs.version }}
body: update lua-nginx-module version to ${{ steps.update.outputs.version }}
njs-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update njs version
id: update
run: |
NJS_VER="$(
git ls-remote --tags https://github.com/nginx/njs \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NJS_VER=.*|ARG NJS_VER=$NJS_VER|" Dockerfile
echo "version=$NJS_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update njs version to ${{ steps.update.outputs.version }}
branch: update-njs-version
title: update njs version to ${{ steps.update.outputs.version }}
body: update njs version to ${{ steps.update.outputs.version }}
nginx-auth-ldap-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update nginx-auth-ldap version
id: update
run: |
NAL_VER="$(
git ls-remote --tags https://github.com/kvspb/nginx-auth-ldap \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NAL_VER=.*|ARG NAL_VER=$NAL_VER|" Dockerfile
echo "version=$NAL_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != 'v0.1' }}
with:
signoff: true
delete-branch: true
commit-message: update nginx-auth-ldap version to ${{ steps.update.outputs.version }}
branch: update-nginx-auth-ldap-version
title: update nginx-auth-ldap version to ${{ steps.update.outputs.version }}
body: update nginx-auth-ldap version to ${{ steps.update.outputs.version }}
nginx-module-vts-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update vts version
id: update
run: |
VTS_VER="$(
git ls-remote --tags https://github.com/vozlt/nginx-module-vts \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG VTS_VER=.*|ARG VTS_VER=$VTS_VER|" Dockerfile
echo "version=$VTS_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update vts version to ${{ steps.update.outputs.version }}
branch: update-vts-version
title: update vts version to ${{ steps.update.outputs.version }}
body: update vtsversion to ${{ steps.update.outputs.version }}
nginx-ntlm-module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update nginx-ntlm-module version
id: update
run: |
NNTLM_VER="$(
git ls-remote --tags https://github.com/gabihodoroaga/nginx-ntlm-module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NNTLM_VER=.*|ARG NNTLM_VER=$NNTLM_VER|" Dockerfile
echo "version=$NNTLM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != 'v1.19.3-beta.1' }}
with:
signoff: true
delete-branch: true
commit-message: update nginx-ntlm-module version to ${{ steps.update.outputs.version }}
branch: update-nginx-ntlm-module-version
title: update nginx-ntlm-module version to ${{ steps.update.outputs.version }}
body: update nginx-ntlm-module version to ${{ steps.update.outputs.version }}
ngx_http_geoip2_module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_http_geoip2_module version
id: update
run: |
NHG2M_VER="$(
git ls-remote --tags https://github.com/leev/ngx_http_geoip2_module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NHG2M_VER=.*|ARG NHG2M_VER=$NHG2M_VER|" Dockerfile
echo "version=$NHG2M_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update ngx_http_geoip2_module version to ${{ steps.update.outputs.version }}
branch: update-ngx_http_geoip2_module-version
title: update ngx_http_geoip2_module version to ${{ steps.update.outputs.version }}
body: update ngx_http_geoip2_module version to ${{ steps.update.outputs.version }}
lua-resty-core-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update lua-resty-core version
id: update
run: |
LRC_VER="$(
git ls-remote --tags https://github.com/openresty/lua-resty-core \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG LRC_VER=.*|ARG LRC_VER=$LRC_VER|" Dockerfile
echo "version=$LRC_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update lua-resty-core version to ${{ steps.update.outputs.version }}
branch: update-lua-resty-core-version
title: update lua-resty-core version to ${{ steps.update.outputs.version }}
body: update lua-resty-core version to ${{ steps.update.outputs.version }}
lua-resty-lrucache-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update lua-resty-lrucache version
id: update
run: |
LRL_VER="$(
git ls-remote --tags https://github.com/openresty/lua-resty-lrucache \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG LRL_VER=.*|ARG LRL_VER=$LRL_VER|" Dockerfile
echo "version=$LRL_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update lua-resty-lrucache version to ${{ steps.update.outputs.version }}
branch: update-lua-resty-lrucache-version
title: update lua-resty-lrucache version to ${{ steps.update.outputs.version }}
body: update lua-resty-lrucache version to ${{ steps.update.outputs.version }}
lua-cs-bouncer-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update lua-cs-bouncer version
id: update
run: |
LCSB_VER="$(
git ls-remote --tags https://github.com/crowdsecurity/lua-cs-bouncer \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed -E "s/\^\{\}//"
)"
sed -i "s|ARG LCSB_VER=.*|ARG LCSB_VER=$LCSB_VER|" Dockerfile
echo "version=$LCSB_VER" >> $GITHUB_OUTPUT
wget https://raw.githubusercontent.com/crowdsecurity/cs-nginx-bouncer/refs/heads/main/nginx/crowdsec_nginx.conf -O rootfs/usr/local/nginx/conf/conf.d/crowdsec.conf.original
wget https://raw.githubusercontent.com/crowdsecurity/lua-cs-bouncer/refs/tags/"$LCSB_VER"/config_example.conf -O rootfs/etc/crowdsec.conf.original
sed -i "s|ARG CSNB_VER=.*|ARG CSNB_VER=$CSNB_VER|" Dockerfile
echo "version=$CSNB_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
uses: peter-evans/create-pull-request@v7
with:
signoff: true
delete-branch: true
commit-message: update lua-cs-bouncer version to ${{ steps.update.outputs.version }}
branch: update-lua-cs-bouncer-version
title: update lua-cs-bouncer version to ${{ steps.update.outputs.version }}
body: update lua-cs-bouncer version to ${{ steps.update.outputs.version }}
commit-message: update cs-nginx-bouncer version to ${{ steps.update.outputs.version }}
branch: update-cs-nginx-bouncer-version
title: update cs-nginx-bouncer version to ${{ steps.update.outputs.version }}
body: update cs-nginx-bouncer version to ${{ steps.update.outputs.version }}
coreruleset-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: update coreruleset version
id: update
run: |
CRS_VER="$(
git ls-remote --tags https://github.com/coreruleset/coreruleset \
| cut -d/ -f3 \
| sort -V \
| tail -1 \
| sed -E "s/\^\{\}//"
)"
sed -i "s|ARG CRS_VER=.*|ARG CRS_VER=$CRS_VER|" Dockerfile
echo "version=$CRS_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@v7
with:
signoff: true
delete-branch: true
commit-message: update coreruleset version to ${{ steps.update.outputs.version }}
branch: update-coreruleset-version
title: update coreruleset version to ${{ steps.update.outputs.version }}
body: update coreruleset version to ${{ steps.update.outputs.version }}

View File

@@ -1,91 +0,0 @@
name: Build beta Docker Image
on:
workflow_dispatch:
inputs:
tag:
description: 'name of the beta'
required: true
type: string
jobs:
build-x86_64:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:beta-x86_64
build-args: |
FLAGS=-march=x86-64-v2 -mtune=generic -fcf-protection=full
build-aarch64:
runs-on: ubuntu-24.04-arm
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:beta-aarch64
build-args: |
FLAGS=-mbranch-protection=standard
merge:
runs-on: ubuntu-latest
needs: [build-x86_64, build-aarch64]
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Login to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: create multiarch
run: |
docker buildx imagetools create --tag zoeyvid/npmplus:beta ghcr.io/zoeyvid/npmplus:beta-x86_64 ghcr.io/zoeyvid/npmplus:beta-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:beta ghcr.io/zoeyvid/npmplus:beta-x86_64 ghcr.io/zoeyvid/npmplus:beta-aarch64
docker buildx imagetools create --tag zoeyvid/npmplus:${{ inputs.tag }} ghcr.io/zoeyvid/npmplus:beta-x86_64 ghcr.io/zoeyvid/npmplus:beta-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:${{ inputs.tag }} ghcr.io/zoeyvid/npmplus:beta-x86_64 ghcr.io/zoeyvid/npmplus:beta-aarch64

View File

@@ -1,95 +1,38 @@
name: Build latest Docker Image
name: Docker push develop to latest
on:
workflow_dispatch:
inputs:
tag:
description: 'name of the release'
required: true
type: string
jobs:
build-x86_64:
docker:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:latest-x86_64
build-args: |
FLAGS=-march=x86-64-v2 -mtune=generic -fcf-protection=full
build-aarch64:
runs-on: ubuntu-24.04-arm
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:latest-aarch64
build-args: |
FLAGS=-mbranch-protection=standard
merge:
runs-on: ubuntu-latest
needs: [build-x86_64, build-aarch64]
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Login to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert Username
id: un
run: echo "un=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Convert repository name
id: rn
run: echo "rn=$(echo "${{ github.event.repository.name }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
uses: docker/login-action@v3
with:
registry: ghcr.io
username: zoeyvid
username: ${{ steps.un.outputs.un }}
password: ${{ github.token }}
- name: create multiarch
- name: Push develop to latest
run: |
docker buildx imagetools create --tag zoeyvid/npmplus:beta ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:beta ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag zoeyvid/npmplus:latest ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:latest ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag zoeyvid/npmplus:${{ inputs.tag }} ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:${{ inputs.tag }} ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag zoeyvid/nginx-proxy-manager:latest ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/nginx-proxy-manager:latest ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/nginx-proxy-manager:latest ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/nginx-proxy-manager:latest ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }}
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:latest ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }}
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.run_number }} ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:latest ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.run_number }} ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }}
- name: Show Nginx version
run: |
docker run --rm --entrypoint nginx ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:latest -V
docker run --rm --entrypoint nginx ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:latest -V

View File

@@ -3,85 +3,91 @@ on:
push:
branches:
- develop
paths:
- .github/workflows/docker.yml
- Dockerfile
- rootfs/**
- src/**
pull_request:
paths:
- .github/workflows/docker.yml
- Dockerfile
- rootfs/**
- src/**
workflow_dispatch:
jobs:
build-x86_64:
build:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@v6
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: arm64 #all
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
uses: docker/setup-buildx-action@v3
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"$(git rev-parse --short HEAD)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"$(git rev-parse --short HEAD)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:develop-x86_64
build-args: |
FLAGS=-march=x86-64-v2 -mtune=generic -fcf-protection=full
build-aarch64:
runs-on: ubuntu-24.04-arm
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"$(git rev-parse --short HEAD)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"$(git rev-parse --short HEAD)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:develop-aarch64
build-args: |
FLAGS=-mbranch-protection=standard
merge:
runs-on: ubuntu-latest
needs: [build-x86_64, build-aarch64]
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Login to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert Username
id: un
run: echo "un=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Convert repository name
id: rn
run: echo "rn=$(echo "${{ github.event.repository.name }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
uses: docker/login-action@v3
with:
registry: ghcr.io
username: zoeyvid
username: ${{ steps.un.outputs.un }}
password: ${{ github.token }}
- name: create multiarch
- name: version
run: |
docker buildx imagetools create --tag zoeyvid/npmplus:develop ghcr.io/zoeyvid/npmplus:develop-x86_64 ghcr.io/zoeyvid/npmplus:develop-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:develop ghcr.io/zoeyvid/npmplus:develop-x86_64 ghcr.io/zoeyvid/npmplus:develop-aarch64
version="$(cat .version)+$(git rev-parse --short HEAD)"
# todo: embed version somewhere
#sed -i "s|\"0.0.0\"|\"$version\"|g" src/
- name: Build
uses: docker/build-push-action@v6
if: ${{ github.event_name != 'pull_request' }}
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64 #,linux/amd64/v2,linux/amd64/v3,linux/amd64/v4 #,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6
push: ${{ github.event_name != 'pull_request' }}
tags: |
${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }}
ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }}
- name: show version
if: ${{ github.event_name != 'pull_request' }}
run: |
docker run --rm --entrypoint nginx ${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }} -V
docker run --rm --entrypoint nginx ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ github.ref_name }} -V
- name: Set PR-Number (PR)
if: ${{ github.event_name == 'pull_request' }}
id: pr
run: echo "pr=$(echo pr-${{ github.ref_name }} | sed "s|refs/pull/:||g" | sed "s|/merge||g")" >> $GITHUB_OUTPUT
- name: Build (PR)
uses: docker/build-push-action@v6
if: ${{ github.event_name == 'pull_request' }}
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64 #,linux/amd64/v2,linux/amd64/v3,linux/amd64/v4 #,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6
push: ${{ github.event_name == 'pull_request' }}
tags: ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ steps.pr.outputs.pr }}
- name: show version (PR)
if: ${{ github.event_name == 'pull_request' }}
run: docker run --rm --entrypoint nginx ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ steps.pr.outputs.pr }} -V
- name: add comment (PR)
uses: mshick/add-pr-comment@v2
if: ${{ github.event_name == 'pull_request' }}
with:
message: "The Docker Image can now be found here: `ghcr.io/${{ steps.un.outputs.un }}/${{ steps.rn.outputs.rn }}:${{ steps.pr.outputs.pr }}`"
repo-token: ${{ github.token }}
refresh-message-position: true

View File

@@ -1,8 +1,6 @@
name: Dockerlint
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
@@ -11,7 +9,7 @@ jobs:
name: docker-lint
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@v6
- name: Install hadolint
run: |
sudo wget https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -O /usr/bin/hadolint

View File

@@ -1,16 +1,14 @@
name: JSON check
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
test-json:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: actions/checkout@v6
- name: json-syntax-check
uses: limitusus/json-syntax-check@77d5756026b93886eaa3dc6ca1c4b17dd19dc703 # v2
uses: limitusus/json-syntax-check@v2
with:
pattern: "\\.json"

View File

@@ -1,47 +0,0 @@
name: lint-and-format
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
lint-and-format:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6
with:
node-version: lts/*
- uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061 # v4
with:
version: latest
- name: install-sponge
run: sudo apt-get install -y moreutils
- name: biome-backend
run: |
cd backend
pnpm install --frozen-lockfile
pnpm biome lint --write
pnpm biome format --write
- name: biome-frontend
run: |
cd frontend
pnpm install --frozen-lockfile
pnpm biome lint --write
pnpm biome format --write
pnpm formatjs compile-folder src/locale/src src/locale/lang
pnpm vitest
./src/locale/scripts/locale-sort.sh
- name: nginxbeautifier
run: |
pnpm add -g nginxbeautifier
nginxbeautifier -s 4 -r rootfs/usr/local/nginx/conf
- name: push changes
run: |
git add -A
git config user.name "GitHub"
git config user.email "noreply@github.com"
git commit -sm "update and lint" || true
git push || true

27
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: lint
on:
push:
schedule:
- cron: "0 */6 * * *"
workflow_dispatch:
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Setup PHP with PECL extension
uses: shivammathur/setup-php@v2
with:
php-version: '8.4'
- name: lint
run: |
cd src
cp -v config.php.example config.php
find . -name "*.php" -exec php -l {} \;
curl -sSfL https://github.com/wapmorgan/PhpDeprecationDetector/releases/download/"$(git ls-remote --tags https://github.com/wapmorgan/PhpDeprecationDetector | cut -d/ -f3 | grep -v v1 | sort -V | tail -1)"/phpdd-"$(git ls-remote --tags https://github.com/wapmorgan/PhpDeprecationDetector | cut -d/ -f3 | grep -v v1 | sort -V | tail -1)".phar -o phpdd.phar
chmod +x phpdd.phar
find . -name "*.php" -exec ./phpdd.phar -n {} \;
curl -sSfL https://github.com/vimeo/psalm/releases/latest/download/psalm.phar -o psalm.phar
chmod +x psalm.phar
./psalm.phar --no-cache

25
.github/workflows/prettier.yml vendored Normal file
View File

@@ -0,0 +1,25 @@
name: prettier
on:
push:
schedule:
- cron: "0 */6 * * *"
workflow_dispatch:
jobs:
prettier:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: prettier
run: |
yarn global add prettier @prettier/plugin-php prettier-plugin-tailwindcss
sed -i "s|doctype|DOCTYPE|g" /home/runner/.config/yarn/global/node_modules/prettier/plugins/html.js
sed -i "s|doctype|DOCTYPE|g" /home/runner/.config/yarn/global/node_modules/prettier/plugins/html.mjs
prettier . -w --end-of-line crlf --print-width 10000 --plugin /home/runner/.config/yarn/global/node_modules/@prettier/plugin-php/standalone.js --plugin /home/runner/.config/yarn/global/node_modules/prettier-plugin-tailwindcss/dist/index.mjs
- name: push
run: |
git add -A
git config user.name "GitHub"
git config user.email "noreply@github.com"
git diff-index --quiet HEAD || git commit -sm "prettier"
git push

View File

@@ -1,8 +1,6 @@
name: Shellcheck
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
@@ -10,10 +8,10 @@ jobs:
name: Check Shell
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: actions/checkout@v6
- name: Run Shellcheck
uses: ludeeus/action-shellcheck@master
with:
check_together: 'yes'
# env:
# SHELLCHECK_OPTS: --shell sh -e SC1091 -e SC2153 -e SC2154
check_together: "yes"
env:
SHELLCHECK_OPTS: --shell sh -e SC1091 -e SC2153 -e SC2154

View File

@@ -1,8 +1,6 @@
name: spellcheck
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
@@ -11,11 +9,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code.
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@v6
- name: Check spelling
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2
uses: codespell-project/actions-codespell@v2
with:
check_filenames: true
check_hidden: true
skip: pnpm-lock.yaml,./frontend/src/locale/src
ignore_words_list: afterAll,alog
skip: .git,.gitignore,./rootfs/app/nftd,./src/vendor

View File

@@ -0,0 +1,25 @@
name: tailwindcss-update
on:
push:
schedule:
- cron: "0 */6 * * *"
workflow_dispatch:
jobs:
tailwindcss-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: update tailwindcss (minify)
uses: ZoeyVid/tailwindcss-update@main
with:
input: src/tailwind-input.css
output: src/public/tailwind.css
params: "--minify"
- name: push changes
run: |
git add --force src/public/tailwind.css
git config user.name "GitHub"
git config user.email "noreply@github.com"
git diff-index --quiet HEAD || git commit -sm "tailwindcss-update"
git push

26
.github/workflows/update-and-lint.yml vendored Normal file
View File

@@ -0,0 +1,26 @@
name: update-and-lint
on:
push:
branches:
- develop
- php
schedule:
- cron: "0 */6 * * *"
workflow_dispatch:
jobs:
update-and-lint:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: nginxbeautifier
run: |
yarn global add nginxbeautifier
nginxbeautifier -s 4 -r rootfs/usr/local/nginx/conf
- name: push changes
run: |
git add -A
git config user.name "GitHub"
git config user.email "noreply@github.com"
git diff-index --quiet HEAD || git commit -sm "update and lint"
git push

792
.gitignore vendored
View File

@@ -1,7 +1,789 @@
.DS_Store
backend/certbot-dns-plugins.js
frontend/certbot-dns-plugins.js
# User-specific stuff
.idea
.qodo
desktop.files.json
package-lock.json
yarn.lock
desktop.ini
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
composer.phar
/vendor/
!/jobs
!/.gitignore
!/*.xml
#ignore all files in jobs subdirectories except for folders
#note: git doesn't track folders, only file content
jobs/**
!jobs/**/
#uncomment the following line to save next build numbers with config
#!jobs/**/nextBuildNumber
#exclude only config.xml files in repository subdirectories
!config.xml
#don't track workspaces (when users build on the master)
jobs/**/*workspace
*.iml
*.ipr
*.iws
# IntelliJ
out
# Compiled class file
*.class
# Log file
*.log
# BlueJ files
*.ctxt
# Package Files #
*.jar
*.war
*.nar
*.ear
*.zip
*.tar.gz
*.rar
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
# Icon must end with two \r
Icon
# Thumbnails
._*
.vscode
certbot-help.txt
*/node_modules
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
.gradle
/build/
# Ignore Gradle GUI config
gradle-app.setting
# Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored)
!gradle-wrapper.jar
# Cache of project
.gradletasknamecache
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
pom.xml.next
release.properties
dependency-reduced-pom.xml
buildNumber.properties
.mvn/timing.properties
# https://github.com/takari/maven-wrapper#usage-without-binary-jar
.mvn/wrapper/maven-wrapper.jar
.flattened-pom.xml
# Common working directory
run
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# TypeScript v1 declaration files
typings/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
# Next.js build output
.next
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and *not* Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
/vendor/
node_modules/
npm-debug.log
yarn-error.log
# Laravel 4 specific
bootstrap/compiled.php
app/storage/
# Laravel 5 & Lumen specific
public/storage
public/hot
# Laravel 5 & Lumen specific with changed public path
public_html/storage
public_html/hot
storage/*.key
.env
Homestead.yaml
Homestead.json
/.vagrant
.phpunit.result.cache
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
#lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Oo]bj/
[Ll]og/
[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# ignore everything in the root except the "wp-content" directory.
!wp-content/
# ignore everything in the "wp-content" directory, except:
# "mu-plugins", "plugins", "themes" directory
wp-content/*
!wp-content/mu-plugins/
!wp-content/plugins/
!wp-content/themes/
# ignore these plugins
wp-content/plugins/hello.php
# ignore specific themes
wp-content/themes/twenty*/
# ignore node dependency directories
node_modules/
# ignore log files and databases
*.log
*.sql
*.sqlite
config.php
**/config.php

6
.imgbotconfig Normal file
View File

@@ -0,0 +1,6 @@
{
"schedule": "daily",
"aggressiveCompression": "true",
"compressWiki": "true",
"minKBReduced": 0
}

7
.prettierignore Normal file
View File

@@ -0,0 +1,7 @@
.github
src/public/tailwind.css
src/composer.lock
src/vendor
rootfs/app/fancyindex

View File

@@ -1 +0,0 @@
2.14.0

13
.whitesource Normal file
View File

@@ -0,0 +1,13 @@
{
"scanSettings": {
"baseBranches": []
},
"checkRunSettings": {
"vulnerableCheckRunConclusionLevel": "failure",
"displayMode": "diff"
},
"issueSettings": {
"minSeverityLevel": "LOW",
"issueType": "DEPENDENCY"
}
}

View File

@@ -633,8 +633,8 @@ the "copyright" line and a pointer to where the full notice is found.
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,

6
Caddy.Dockerfile Normal file
View File

@@ -0,0 +1,6 @@
FROM alpine:3.23.0
RUN apk add --no-cache ca-certificates tzdata
COPY --from=caddy:2.10.2 /usr/bin/caddy /usr/bin/caddy
COPY Caddyfile /etc/caddy/Caddyfile
CMD ["caddy", "run", "--config", "/etc/caddy/Caddyfile", "--adapter", "caddyfile"]

30
Caddyfile Normal file
View File

@@ -0,0 +1,30 @@
{
auto_https off
servers :80 {
protocols h1 h2c
}
}
http://captive.apple.com:80 {
respond "Success"
}
http://clients3.google.com:80 {
respond 204
}
http://connectivitycheck.gstatic.com:80 {
respond 204
}
http://www.msftncsi.com:80 {
respond "Microsoft NCSI"
}
http://www.msftconnecttest.com:80 {
respond "Microsoft Connect Test"
}
http://ipv6.msftconnecttest.com:80 {
respond "Microsoft Connect Test"
}
http://detectportal.firefox.com:80 {
respond "<meta http-equiv=\"refresh\" content=\"0;url=https://support.mozilla.org/kb/captive-portal\"/>"
}
http://:80 {
redir https://{host}{uri} permanent
}

View File

@@ -1,246 +1,127 @@
# syntax=docker/dockerfile:1.22.0@sha256:4a43a54dd1fedceb30ba47e76cfcf2b47304f4161c0caeac2db1c61804ea3c91
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS nginx
# syntax=docker/dockerfile:labs
FROM --platform="$BUILDPLATFORM" alpine:3.23.0 AS crowdsec
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
ARG LUAJIT_INC=/usr/include/luajit-2.1
ARG LUAJIT_LIB=/usr/lib
ARG NGINX_VER=release-1.29.5
ARG DTR_VER=1.29.2
ARG RCP_VER=1.29.5
ARG ZNP_VER=1.26.3
ARG NB_VER=master
ARG NUB_VER=main
ARG ZNM_VER=master
ARG NHUZFM_VER=main
ARG NF_VER=v0.6.0
ARG HMNM_VER=v0.39
ARG NDK_VER=v0.3.4
ARG LNM_VER=v0.10.29R2
ARG NJS_VER=0.9.6
ARG NAL_VER=master
ARG VTS_VER=v0.2.5
ARG NNTLM_VER=master
ARG NHG2M_VER=3.4
ARG FLAGS
ARG CC=clang
ARG CFLAGS="$FLAGS -m64 -O3 -pipe -flto=full -fstack-clash-protection -fstack-protector-strong -ftrivial-auto-var-init=zero -fno-delete-null-pointer-checks -fno-strict-overflow -fno-strict-aliasing -fno-plt -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3 -Wformat=2 -Werror=format-security -Wno-sign-compare"
ARG CXX=clang++
ARG CXXFLAGS="$FLAGS -m64 -O3 -pipe -flto=full -fstack-clash-protection -fstack-protector-strong -ftrivial-auto-var-init=zero -fno-delete-null-pointer-checks -fno-strict-overflow -fno-strict-aliasing -fno-plt -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3 -D_GLIBCXX_ASSERTIONS -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS=1 -D_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_FAST -Wformat=2 -Werror=format-security -Wno-sign-compare"
ARG LDFLAGS="-fuse-ld=lld -m64 -Wl,-s -Wl,-O2 -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--sort-common -Wl,--as-needed -Wl,-z,pack-relative-relocs"
ARG CSNB_VER=v1.0.8
WORKDIR /src
COPY patches/*.patch /src
RUN apk upgrade --no-cache -a && \
apk add --no-cache git make clang lld cmake ninja file \
linux-headers libatomic_ops-dev aws-lc aws-lc-dev pcre2-dev luajit-dev zlib-ng-dev brotli-dev zstd-dev libxslt-dev openldap-dev quickjs-ng-dev libmaxminddb-dev clang-dev
RUN git clone --depth 1 https://github.com/nginx/nginx --branch "$NGINX_VER" /src/nginx && \
cd /src/nginx && \
wget -q https://raw.githubusercontent.com/nginx-modules/ngx_http_tls_dyn_size/refs/heads/master/nginx__dynamic_tls_records_"$DTR_VER"%2B.patch -O /src/nginx/1.patch && \
git apply /src/nginx/1.patch && \
wget -q https://raw.githubusercontent.com/openresty/openresty/refs/heads/master/patches/nginx/"$RCP_VER"/nginx-"$RCP_VER"-resolver_conf_parsing.patch -O /src/nginx/2.patch && \
git apply /src/nginx/2.patch && \
wget -q https://patch-diff.githubusercontent.com/raw/nginx/nginx/pull/689.patch -O /src/nginx/3.patch && \
git apply /src/nginx/3.patch && \
wget -q https://raw.githubusercontent.com/zlib-ng/patches/refs/heads/master/nginx/"$ZNP_VER"-zlib-ng.patch -O /src/nginx/4.patch && \
git apply /src/nginx/4.patch && \
git apply /src/nginx.patch && \
\
git clone --depth 1 https://github.com/google/ngx_brotli --branch "$NB_VER" /src/ngx_brotli && \
cd /src/ngx_brotli && \
git apply /src/ngx_brotli.patch && \
git clone --depth 1 https://github.com/clyfish/ngx_unbrotli --branch "$NUB_VER" /src/ngx_unbrotli && \
cd /src/ngx_unbrotli && \
git apply /src/ngx_unbrotli.patch && \
git clone --depth 1 https://github.com/tokers/zstd-nginx-module --branch "$ZNM_VER" /src/zstd-nginx-module && \
cd /src/zstd-nginx-module && \
wget -q https://patch-diff.githubusercontent.com/raw/tokers/zstd-nginx-module/pull/44.patch -O /src/zstd-nginx-module/1.patch && \
wget -q https://patch-diff.githubusercontent.com/raw/tokers/zstd-nginx-module/pull/23.patch -O /src/zstd-nginx-module/2.patch && \
git apply /src/zstd-nginx-module.patch && \
git apply /src/zstd-nginx-module/1.patch && \
git apply /src/zstd-nginx-module/2.patch && \
git clone --depth 1 https://github.com/HanadaLee/ngx_http_unzstd_filter_module --branch "$NHUZFM_VER" /src/ngx_http_unzstd_filter_module && \
git clone --depth 1 https://github.com/aperezdc/ngx-fancyindex --branch "$NF_VER" /src/ngx-fancyindex && \
git clone --depth 1 https://github.com/openresty/headers-more-nginx-module --branch "$HMNM_VER" /src/headers-more-nginx-module && \
git clone --depth 1 https://github.com/vision5/ngx_devel_kit --branch "$NDK_VER" /src/ngx_devel_kit && \
git clone --depth 1 https://github.com/openresty/lua-nginx-module --branch "$LNM_VER" /src/lua-nginx-module && \
cd /src/lua-nginx-module && \
git apply /src/lua-nginx-module.patch && \
\
git clone --depth 1 https://github.com/nginx/njs --branch "$NJS_VER" /src/njs && \
git clone --depth 1 https://github.com/kvspb/nginx-auth-ldap --branch "$NAL_VER" /src/nginx-auth-ldap && \
git clone --depth 1 https://github.com/vozlt/nginx-module-vts --branch "$VTS_VER" /src/nginx-module-vts && \
git clone --depth 1 https://github.com/gabihodoroaga/nginx-ntlm-module --branch "$NNTLM_VER" /src/nginx-ntlm-module && \
git clone --depth 1 https://github.com/leev/ngx_http_geoip2_module --branch "$NHG2M_VER" /src/ngx_http_geoip2_module
RUN cd /src/nginx && \
/src/nginx/auto/configure \
--build=NPMplus \
--with-debug \
--with-compat \
--with-threads \
--with-file-aio \
--with-libatomic \
--with-pcre \
--with-pcre-jit \
--without-select_module \
--without-poll_module \
--with-stream \
--with-stream_ssl_module \
--with-stream_ssl_preread_module \
--with-stream_realip_module \
--with-http_v2_module \
--with-http_v3_module \
--with-http_ssl_module \
--with-http_realip_module \
--with-http_gunzip_module \
--with-http_gzip_static_module \
--with-http_sub_module \
--with-http_addition_module \
--with-http_stub_status_module \
--with-http_auth_request_module \
--add-module=/src/ngx_brotli \
--add-module=/src/ngx_unbrotli \
--add-module=/src/zstd-nginx-module \
--add-module=/src/ngx_http_unzstd_filter_module \
--add-module=/src/ngx-fancyindex \
--add-module=/src/headers-more-nginx-module \
--add-module=/src/ngx_devel_kit \
--add-module=/src/lua-nginx-module \
--add-dynamic-module=/src/njs/nginx \
--add-dynamic-module=/src/nginx-auth-ldap \
--add-dynamic-module=/src/nginx-module-vts \
--add-dynamic-module=/src/nginx-ntlm-module \
--add-dynamic-module=/src/ngx_http_geoip2_module \
--with-ld-opt="$LDFLAGS" && \
\
make -j "$(nproc)" install
RUN git clone --depth 1 https://github.com/openappsec/attachment /src/attachment && \
cd /src/attachment && \
git apply /src/attachment.patch && \
cmake /src/attachment -G Ninja && \
ninja && \
mv -v /src/attachment/attachments/nginx/ngx_module/libngx_module.so /usr/local/nginx/modules/libngx_module.so
RUN find /usr/local/nginx/modules -name "*.so" -exec strip -s {} \; && \
strip -s /usr/local/nginx/sbin/nginx && \
strip -s /src/attachment/core/shmem_ipc/libosrc_shmem_ipc.so && \
strip -s /src/attachment/core/compression/libosrc_compression_utils.so && \
strip -s /src/attachment/attachments/nginx/nginx_attachment_util/libosrc_nginx_attachment_util.so && \
\
find /usr/local/nginx/modules -name "*.so" -exec file {} \; && \
file /usr/local/nginx/sbin/nginx && \
file /src/attachment/core/shmem_ipc/libosrc_shmem_ipc.so && \
file /src/attachment/core/compression/libosrc_compression_utils.so && \
file /src/attachment/attachments/nginx/nginx_attachment_util/libosrc_nginx_attachment_util.so && \
/usr/local/nginx/sbin/nginx -V
apk add --no-cache ca-certificates git build-base && \
git clone --recursive https://github.com/crowdsecurity/cs-nginx-bouncer --branch "$CSNB_VER" /src && \
make && \
tar xzf crowdsec-nginx-bouncer.tgz && \
mv crowdsec-nginx-bouncer-* crowdsec-nginx-bouncer && \
sed -i "/lua_package_path/d" /src/crowdsec-nginx-bouncer/nginx/crowdsec_nginx.conf && \
sed -i "s|/etc/crowdsec/bouncers/crowdsec-nginx-bouncer.conf|/data/etc/crowdsec/crowdsec.conf|g" /src/crowdsec-nginx-bouncer/nginx/crowdsec_nginx.conf && \
sed -i "s|API_KEY=.*|API_KEY=|g" /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf && \
sed -i "s|ENABLED=.*|ENABLED=false|g" /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf && \
sed -i "s|API_URL=.*|API_URL=http://127.0.0.1:8080|g" /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf && \
sed -i "s|BAN_TEMPLATE_PATH=.*|BAN_TEMPLATE_PATH=/data/etc/crowdsec/ban.html|g" /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf && \
sed -i "s|CAPTCHA_TEMPLATE_PATH=.*|CAPTCHA_TEMPLATE_PATH=/data/etc/crowdsec/captcha.html|g" /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf && \
echo "APPSEC_URL=http://127.0.0.1:7422" | tee -a /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf && \
echo "APPSEC_FAILURE_ACTION=deny" | tee -a /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf && \
sed -i "s|BOUNCING_ON_TYPE=all|BOUNCING_ON_TYPE=ban|g" /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf
FROM --platform="$BUILDPLATFORM" alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS frontend
FROM zoeyvid/nginx-quic:656-python
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
ARG NODE_ENV=production
COPY frontend /app
WORKDIR /app/frontend
COPY rootfs /
COPY src /app/src
COPY --from=zoeyvid/valkey-static:72 /usr/local/bin/valkey-cli /usr/local/bin/valkey-cli
COPY --from=zoeyvid/valkey-static:72 /usr/local/bin/valkey-server /usr/local/bin/valkey-server
ARG CRS_VER=v4.7.0
RUN apk upgrade --no-cache -a && \
apk add --no-cache nodejs pnpm && \
pnpm install --frozen-lockfile && \
pnpm formatjs compile-folder src/locale/src src/locale/lang && \
pnpm tsc && \
pnpm vite build
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS backend
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
ARG NODE_ENV=production
COPY backend /app
WORKDIR /app
RUN apk upgrade --no-cache -a && \
apk add --no-cache nodejs pnpm binutils file && \
pnpm install --frozen-lockfile --prod && \
pnpm cache delete && \
find node_modules -name "*.map" -delete && \
rm -r node_modules/better-sqlite3/deps/sqlite3 && \
find /app/node_modules -name "*.node" -type f -exec strip -s {} \; && \
find /app/node_modules -name "*.node" -type f -exec file {} \;
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
ENV NODE_ENV=production
ARG LRC_VER=v0.1.32R1
ARG LRL_VER=v0.15
ARG LCSB_VER=v1.0.13
COPY --from=nginx /usr/local/nginx /usr/local/nginx
COPY --from=nginx /src/attachment/core/shmem_ipc/libosrc_shmem_ipc.so /usr/local/lib/libosrc_shmem_ipc.so
COPY --from=nginx /src/attachment/core/compression/libosrc_compression_utils.so /usr/local/lib/libosrc_compression_utils.so
COPY --from=nginx /src/attachment/attachments/nginx/nginx_attachment_util/libosrc_nginx_attachment_util.so /usr/local/lib/libosrc_nginx_attachment_util.so
COPY --from=backend /app /app
COPY rootfs /
COPY LICENSE /LICENSE
COPY COPYING /COPYING
WORKDIR /app
RUN apk upgrade --no-cache -a && \
apk add --no-cache tzdata tini \
aws-lc pcre2 luajit zlib-ng brotli zstd lua5.1-cjson libxml2 libldap quickjs-ng-libs libmaxminddb-libs \
curl coreutils findutils grep jq openssl shadow su-exec util-linux-misc \
bash bash-completion nano \
logrotate goaccess fcgi \
luarocks5.1 git make \
nodejs python3 && \
apk add --no-cache ca-certificates tzdata tini curl \
bash nano \
logrotate apache2-utils \
lua5.1-lzlib lua5.1-socket \
coreutils grep findutils jq shadow su-exec fcgi \
luarocks5.1 lua5.1-dev lua5.1-sec build-base git \
php83-fpm php83-openssl php83-iconv php83-ctype php83-curl php83-session php83-sqlite3 php83-pecl-redis && \
\
cp -var /etc/php83 /etc/php && \
sed -i "s|;\?listen\s*=.*|listen = /run/php.sock|g" /etc/php/php-fpm.d/www.conf && \
sed -i "s|;\?error_log\s*=.*|error_log = /proc/self/fd/2|g" /etc/php/php-fpm.conf && \
sed -i "s|;\?include\s*=.*|include = /etc/php/php-fpm.d/*.conf|g" /etc/php/php-fpm.conf && \
sed -i "s|;\?session.save_handler\s*=.*|session.save_handler = redis|g" /etc/php/php.ini && \
sed -i "s|;\?session.save_path\s*=.*|session.save_path = unix:///run/valkey.sock|g" /etc/php/php.ini && \
sed -i "s|;\?session.name\s*=.*|session.name = NPMPLUSSESSIONID|g" /etc/php/php.ini && \
sed -i "s|;\?session.auto_start\s*=.*|session.auto_start = 1|g" /etc/php/php.ini && \
sed -i "s|;\?session.use_strict_mode\s*=.*|session.use_strict_mode = 1|g" /etc/php/php.ini && \
sed -i "s|;\?session.cookie_secure\s*=.*|session.cookie_secure = 1|g" /etc/php/php.ini && \
sed -i "s|;\?session.cookie_httponly\s*=.*|session.cookie_httponly = 1|g" /etc/php/php.ini && \
sed -i "s|;\?session.cookie_samesite\s*=.*|session.cookie_samesite = Strict|g" /etc/php/php.ini && \
\
mv -v /app/src/config.php.example /app/src/config.php && \
\
curl https://raw.githubusercontent.com/acmesh-official/acme.sh/master/acme.sh | sh -s -- --install-online --home /usr/local/acme.sh --nocron && \
ln -s /usr/local/acme.sh/acme.sh /usr/local/bin/acme.sh && \
\
curl https://raw.githubusercontent.com/tomwassenberg/certbot-ocsp-fetcher/refs/heads/main/certbot-ocsp-fetcher -o /usr/local/bin/certbot-ocsp-fetcher.sh && \
chmod +x /usr/local/bin/certbot-ocsp-fetcher.sh && \
\
git clone https://github.com/coreruleset/coreruleset --branch "$CRS_VER" /tmp/coreruleset && \
mkdir -v /usr/local/nginx/conf/conf.d/include/coreruleset && \
mv -v /tmp/coreruleset/crs-setup.conf.example /usr/local/nginx/conf/conf.d/include/coreruleset/crs-setup.conf.example && \
mv -v /tmp/coreruleset/plugins /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
mv -v /tmp/coreruleset/rules /usr/local/nginx/conf/conf.d/include/coreruleset/rules && \
rm -r /tmp/* && \
\
luarocks-5.1 install lua-cjson && \
luarocks-5.1 install lua-resty-http && \
luarocks-5.1 install lua-resty-string && \
luarocks-5.1 install lua-resty-openssl && \
luarocks-5.1 install lua-resty-openidc && \
luarocks-5.1 install lua-resty-session && \
\
git clone --depth 1 https://github.com/openresty/lua-resty-core --branch "$LRC_VER" /src/lua-resty-core && \
cd /src/lua-resty-core && \
make -j "$(nproc)" install LUA_LIB_DIR=/usr/local/share/lua/5.1 && \
\
git clone --depth 1 https://github.com/openresty/lua-resty-lrucache --branch "$LRL_VER" /src/lua-resty-lrucache && \
cd /src/lua-resty-lrucache && \
make -j "$(nproc)" install LUA_LIB_DIR=/usr/local/share/lua/5.1 && \
\
git clone --depth 1 https://github.com/crowdsecurity/lua-cs-bouncer --branch "$LCSB_VER" /src/lua-cs-bouncer && \
mv /src/lua-cs-bouncer/lib/* /usr/local/share/lua/5.1 && \
mv /src/lua-cs-bouncer/templates/captcha.html /etc/captcha.html.original && \
mv /src/lua-cs-bouncer/templates/ban.html /etc/ban.html.original && \
\
cd && \
rm -r /src /tmp/luarocks_local_cache-* && \
apk del --no-cache luarocks5.1 git make && \
\
sed -i "s|placeholder|$(cat /app/package.json | jq -r .version)|g" /usr/local/nginx/conf/conf.d/crowdsec.conf.disabled && \
\
python3 -m venv /usr/local && \
pip install --no-cache-dir --upgrade pip certbot && \
\
wget -q https://raw.githubusercontent.com/tomwassenberg/certbot-ocsp-fetcher/refs/heads/main/certbot-ocsp-fetcher -O - | sed "s|/live||g" > /usr/local/bin/certbot-ocsp-fetcher.sh && \
wget -q https://raw.githubusercontent.com/vasilevich/nginxbeautifier/5cee8db2a505f2a253e24691399c828c043071fc/index.js -O /usr/local/bin/nginxbeautifier && \
wget -q https://raw.githubusercontent.com/vasilevich/nginxbeautifier/5cee8db2a505f2a253e24691399c828c043071fc/nginxbeautifier.js -O /usr/local/bin/nginxbeautifier.js && \
\
ln -s /usr/local/nginx/sbin/nginx /usr/local/bin/nginx && \
ln -s /app/password-reset.js /usr/local/bin/password-reset.js && \
ln -s /app/sqlite-vaccum.js /usr/local/bin/sqlite-vaccum.js && \
ln -s /app/index.js /usr/local/bin/index.js && \
\
chmod +x /usr/local/bin/*
apk del --no-cache luarocks5.1 lua5.1-dev lua5.1-sec build-base git
COPY --from=frontend /app/dist /app/frontend
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/lib/plugins /usr/local/nginx/lib/lua/plugins
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/lib/crowdsec.lua /usr/local/nginx/lib/lua/crowdsec.lua
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/templates/ban.html /usr/local/nginx/conf/conf.d/include/ban.html
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/templates/captcha.html /usr/local/nginx/conf/conf.d/include/captcha.html
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf /usr/local/nginx/conf/conf.d/include/crowdsec.conf
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/nginx/crowdsec_nginx.conf /usr/local/nginx/conf/conf.d/include/crowdsec_nginx.conf
ENTRYPOINT ["tini", "--", "entrypoint.sh"]
HEALTHCHECK CMD healthcheck.sh
# todo move to ui
ENV PUID=0 \
PGID=0 \
GOAIWSP=48683 \
NPM_PORT=81 \
GOA_PORT=91 \
IPV4_BINDING=0.0.0.0 \
NPM_IPV4_BINDING=0.0.0.0 \
GOA_IPV4_BINDING=0.0.0.0 \
IPV6_BINDING=[::] \
NPM_IPV6_BINDING=[::] \
GOA_IPV6_BINDING=[::] \
DISABLE_IPV6=false \
NPM_DISABLE_IPV6=false \
GOA_DISABLE_IPV6=false \
NPM_LISTEN_LOCALHOST=false \
GOA_LISTEN_LOCALHOST=false \
DEFAULT_CERT_ID=0 \
DISABLE_HTTP=false \
DISABLE_H3_QUIC=false \
NGINX_ACCESS_LOG=false \
NGINX_LOG_NOT_FOUND=false \
NGINX_404_REDIRECT=false \
NGINX_DISABLE_PROXY_BUFFERING=false \
DISABLE_NGINX_BEAUTIFIER=false \
CLEAN=true \
FULLCLEAN=false \
SKIP_IP_RANGES=false \
LOGROTATE=false \
LOGROTATIONS=3 \
CRT=24 \
IPRT=1 \
GOA=false \
GOACLA="--agent-list --real-os --double-decode --anonymize-ip --anonymize-level=1 --keep-last=30 --with-output-resolver --no-query-string" \
PHP82=false \
PHP83=false
LABEL com.centurylinklabs.watchtower.monitor-only="true"
LABEL wud.watch="false"
LABEL wud.watch.digest="false"
ENTRYPOINT ["tini", "--", "entrypoint.sh"]
HEALTHCHECK CMD healthcheck.sh
EXPOSE 80/tcp
EXPOSE 81/tcp
EXPOSE 443/tcp
EXPOSE 443/udp

562
README.md
View File

@@ -1,337 +1,44 @@
# NPMplus
If you don't need the web GUI of NPMplus, you may also have a look at caddy: https://caddyserver.com
<!-- todo: rewrite -->
This project comes as a pre-built docker image that enables you to easily forward to your websites
running at home or otherwise, including free TLS, without having to know too much about Nginx or Certbot.
- [Compatibility (to Upstream)](#compatibility-to-upstream)
- [Quick Setup](#quick-setup)
- [Migration from upstream/vanilla nginx-proxy-manager](#migration-from-upstreamvanilla-nginx-proxy-manager)
<!---
- [Screenshots](https://nginxproxymanager.com/screenshots)
--->
**Note: this fork is distributed under the GNU Affero General Public License version 3. It is based on the MIT licensed [nginx-proxy-manager](https://github.com/NginxProxyManager/nginx-proxy-manager).** <br>
**Note: by running NPMplus you agree to the TOS of Let's Encrypt/your custom CA.** <br>
**Note: remember to expose udp/quic for the https port (443/upd).** <br>
**Note: remember to add your domain to the [hsts preload list](https://hstspreload.org) if you enabled hsts for your domain.** <br>
**Note: please report issues first to this fork before reporting them to the upstream repository.** <br>
<!---
**Note: To fix [this issue](https://github.com/SpiderLabs/ModSecurity/issues/2848), instead of running `nginx -s reload`, this fork stops nginx and starts it again. This can result in a 502 error when you update your hosts. See https://github.com/ZoeyVid/NPMplus/issues/296 and https://github.com/ZoeyVid/NPMplus/issues/283.** <br>
--->
## List of some changes
**Note: Reloading the NPMplus UI can cause a 502 error. See https://github.com/ZoeyVid/NPMplus/issues/241.** <br>
**Note: NO armv7, route53 and aws cloudfront ip ranges support.** <br>
**Note: add `net.ipv4.ip_unprivileged_port_start=0` at the end of `/etc/sysctl.conf` to support PUID/PGID in network mode host.** <br>
**Note: If you don't use network mode host, which I don't recommend, don't forget to expose port 443 on tcp AND udp (http3/quic needs udp).** <br>
**Note: If you don't use network mode host, which I don't recommend, don't forget to enable IPv6 in Docker, see [here](https://github.com/nextcloud/all-in-one/blob/main/docker-ipv6-support.md), you only need to follow step one and two before deploying NPMplus!** <br>
**Note: Don't forget to open Port 80 (tcp) and 443 (tcp AND udp, http3/quic needs udp) in your firewall (because of network mode host, you also need to open this ports in ufw, if you use ufw).** <br>
**Note: ModSecurity overblocking (403 Error)? Please see `/opt/npm/etc/modsecurity`, if you also use CRS please see [here](https://coreruleset.org/docs/concepts/false_positives_tuning).** <br>
**Note: Other Databases like MariaDB may work, but are unsupported.** <br>
**Note: access.log/stream.log, logrotate and goaccess are NOT enabled by default bceuase of GDPR, you can enable them in the compose.yaml.** <br>
**Note: if you remove a cert, which is still used by a host, NPM/NPMplus will crash.** <br>
- Supports HTTP/3 (QUIC), requires you to expose https with udp
- Support for crowdsec and openappsec
- Support for acme profiles (letsencrypt shortlived is used by default)
- Improved support for different acme servers (like ocsp/must-staple)
- OIDC support
- smaller image based on alpine
- ML-KEM support (also hardened TLS settings enforced)
- https for the NPMplus interface
- Goaccess included
- punycode domain support
- zstd and brotli
- basic security headers always send
- allow empty ports to support loadbalancing
- proxy protocol support
- improved nginx build and nginx templates
- file and php server support (and fancyindex)
- option to edit custom certs
- gravatars are cached locally and fetched by the backend (better privacy by not exposing you directly to gravatar)
- qrcodes for totp are generated locally in your browser instead of using a third-party api (better privacy/security by not exposing you and the secret to the third-party api)
- re-added some things that where removed with upstreams new frontend
- use secure cookied instead of local storage to save the token
- Password reset (only sqlite) using `docker exec -it npmplus password-reset.js USER_EMAIL PASSWORD`
- many other things, see this README.md and the compose.yaml
## Project Goal
## Compatibility (to Upstream)
- Supported architectures: x86_64-v2/amd64v2 (check with `/lib/ld-linux-x86-64.so.2 --help`, plain x86-64 is not supported only v2 and up) and aarch64/arm64 (other archs (including 64-bit ones) and any 32-bit arch (like armhf/armv7 (dropped), armel/armv6) are not supported, because of the duration to compile).
- I test NPMplus with docker, but podman should also work (I disrecommend you to run the NPMplus container inside an LXC container, it will work, but please don't do it, it will work better without, install docker/podman on the host or in a KVM and run NPMplus with this)
- MariaDB(/MySQL)/PostgreSQL may work as Databases for NPMplus (configuration like in upstream), but are unsupported, have no advantage over SQLite (at least with NPMplus) and are not recommended. Please note that you can't migrate from any of these to SQLite without making a fresh install and/or copying everything yourself.
- NPMplus uses https instead of http for the admin interface
- NPMplus won't trust cloudflare until you set the env TRUST_CLOUDFLARE to true, but please read [this](#notes-on-cloudflare) first before setting the env to true.
- route53 is not supported as dns-challenge provider and Amazon CloudFront IPs can't be automatically trusted in NPMplus, even if you set TRUST_CLOUDFLARE env to true.
- The following certbot dns plugins have been replaced, which means that certs using one of these proivder will not renew and need to be recreated (not renewed): `certbot-dns-he`, `certbot-dns-dnspod`, `certbot-dns-online`, `certbot-dns-powerdns` and `certbot-dns-do` (`certbot-dns-do` was replaced in upstream with v2.12.4 and then merged into NPMplus)
- There are many changed and improvements to the nginx config, so please don't follow guides in the internet about custom/advanced config, they are either redundant or should not be used at all with NPMplus
- Many forms have changed behavior, see [Comments on some buttons](#comments-on-some-buttons)
I created this project to fill a personal need to provide users with an easy way to accomplish reverse
proxying hosts with TLS termination and it had to be so easy that a monkey could do it. This goal hasn't changed.
While there might be advanced options they are optional and the project should be as simple as possible
so that the barrier for entry here is low.
## Quick Setup
1. Install Docker and Docker Compose (podman or docker rootless may also work)
- [Docker Install documentation](https://docs.docker.com/engine/install)
- [Docker Compose Install documentation](https://docs.docker.com/compose/install/linux)
2. Download this [compose.yaml](https://raw.githubusercontent.com/ZoeyVid/NPMplus/refs/heads/develop/compose.yaml) (or use its content as a portainer stack)
3. Adjust TZ and ACME_EMAIL to your values and maybe adjust other env options to your needs
4. Start NPMplus by running (or deploy your portainer stack)
```bash
docker compose up -d
```
5. Log in to the Admin UI: When your docker container is running, connect to the admin interface using `https://` on port `81`.
<!---
### Sponsor the original creator (not us):
<a href="https://www.buymeacoffee.com/jc21" target="_blank"><img src="http://public.jc21.com/github/by-me-a-coffee.png" alt="Buy Me A Coffee" style="height: 51px !important;width: 217px !important;" ></a>
--->
## Migration from upstream/vanilla nginx-proxy-manager
- **NOTE: Migrating back to the original version is not possible.** Please make a **backup** before migrating, so you have the option to revert if needed
1. Please read [this](#compatibility-to-upstream) first
2. make a backup of your data and letsencrypt folders (creating a copy using `cp -a` should be enough)
3. download the latest compose.yaml of NPMplus
4. adjust your paths (of /etc/letsencrypt and /data) to the ones you used with nginx-proxy-manager
5. adjust TZ and ACME_EMAIL to your values and maybe adjust other env options to your needs
6. stop nginx-proxy-manager
7. deploy the NPMplus compose.yaml
8. You should now remove the `/etc/letsencrypt` mount, since it was moved to `/data` while migration, then redeploy the compose file
9. Since many forms have changed, please check if they are still correct for every host you have.
10. If you proxy NPM(plus) through NPM(plus) make sure to change the scheme from http to https
11. Because of a added CSP-rules gravatar images will not load, to fix this you need to open the form to edit a users name and save it without changes
12. Maybe setup crowdsec (see below)
13. Please report all (migration) issues you may have
## Features
# Crowdsec
<!--Note: Using Immich behind NPMplus with enabled appsec causes issues, see here: [#1241](https://github.com/ZoeyVid/NPMplus/discussions/1241) <br>-->
Note: If you don't [disable sharing in crowdsec](https://docs.crowdsec.net/docs/next/configuration/crowdsec_configuration/#sharing), you may need to mention that [this](https://docs.crowdsec.net/docs/central_api/intro/#signal-meta-data) is sent to crowdsec in your privacy policy.
1. Install crowdsec and the ZoeyVid/npmplus collection for example by using crowdsec container at the end of the compose.yaml, you may also want to install [this](https://app.crowdsec.net/hub/author/crowdsecurity/collections/http-dos), but be warned of false positives
2. Set LOGROTATE to `true` in your `compose.yaml` and redeploy
3. Open `/opt/crowdsec/conf/acquis.d/npmplus.yaml` (path may be different depending how you installed crowdsec) and fill it with:
```yaml
filenames:
- /opt/npmplus/nginx/logs/*.log
labels:
type: npmplus
---
listen_addr: 0.0.0.0:7422
appsec_config: crowdsecurity/appsec-default
name: appsec
source: appsec
labels:
type: appsec
#---
# If you use open-appsec, uncomment the section below.
# If connecting to open-appsec cloud, you must edit the default 'log trigger'
# in the cloud dashboard: check "Log to > gateway / agent" and click 'enforce'.
# Otherwise, no intrusion events will be logged to the local agent
# for CrowdSec to process.
#source: file
#filenames:
# - /opt/openappsec/logs/cp-nano-http-transaction-handler.log*
#labels:
# type: openappsec
```
4. Make sure to use `network_mode: host` in your compose file for the NPMplus container
5. Run `docker exec crowdsec cscli bouncers add npmplus` and save the api key of the output
6. Open `/opt/npmplus/crowdsec/crowdsec.conf`
7. Set `ENABLED` to `true`
8. Use the output of step 5 as `API_KEY`
9. Save the file
10. Redeploy the `compose.yaml`
11. It is recommended to block at the earliest possible point, so if possible set up a firewall bouncer: https://docs.crowdsec.net/u/bouncers/firewall, make sure to also include the docker iptables in the firewall bouncer config
12. Note that when using crowdsec requests will always be buffered, so setting `proxy_(request_)buffering` to off will not work
## Use of external php-fpm (recommended)
1. Create a new Proxy Host with some dummy data in the details tab (since these get fully ignored)
2. Make other settings (like TLS)
3. Create a custom location `/` set the scheme to `path`, put in the path, the press the gear button and fill this in (edit the last line):
```
location ~* [^/]\.php(?:$|/) {
fastcgi_split_path_info ^(.*\.php)(/.*)$;
try_files $fastcgi_script_name =404;
fastcgi_pass ...; # set this to the address of your php-fpm (socket/tcp): https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass
}
```
## Use of inbuilt php-fpm (not recommended)
1. First enable php inside your compose file (you can add more php extension using envs in the compose file)
2. Set the forwarding port to the php version you want to use and is supported by NPMplus (like 83/84/85)
## Comments on some buttons
- Forward Hostname / IP / Path: if the scheme is set to path you can just put here a path in and nginx works as a file server, otherwise you need to input ip/domain, you can also append a path to the ip/domain like `127.0.0.1/path` to proxy to a subpath.
- For custom locations with a set path, dns will be only refreshed on nginx reloads and the path of the location will be stripped. So a request `GET /cdf/abc` to a custom location `/cdf` which proxies to `127.0.0.1/abc` will proxy to `127.0.0.1/abc/abc`, a custom location `/cdf/` which proxies to `127.0.0.1/` will proxy to `127.0.0.1/abc` and a custom location `/cdf` which proxies to `127.0.0.1` will proxy to `127.0.0.1/cdf/abc`
- If the scheme is set to `path`, a path ending with a `/` will be searched relative to the custom location (is uses nginx alias) and a path ending without a `/` will be searched relative to the main `/` location (it uses nginx root)
- Forward Port (optional): port of upstream or php version if scheme is `path`
- Send noindex header and block some user agents: This does what is says, it appends a header to all responses which says that the site should not be indexed while blocking requests of crawlers based on the user agent sent with the request
- Disable Crowdsec Appsec: this will disable crowdsec appsec only for one host/one location, this will only do something if appsec is configured
- Disable Response Buffering: Most time you want keep buffering enabled, you may want to disable this if you for example want to stream videos and you have a fast and stable connection to the upstream server, this effects the connection from the upstream server to NPMplus
- Disable Request Buffering: Most time you want keep buffering enabled, request buffering will always be enabled if crowdsec appsec is enabled, you may want to disable this if you for example want to upload huge files and have a fast and stable connection to the upstream server, this effects the connection from the NPMplus to the upstream server
- Enable compression by upstream: this will allow the backend to compress files, I recommend you to keep this disabled, there may be cases where this is needed since otherwise the upstream missbehaves for some reason (like collabora in nextcloud all-in-one)
- Enable fancyindex: this will enabled fancyindex, which shows a index of all files in the folder if there is no index file, only enable this if you know what you are doing and you need the index
- Websockets: this button was removed, websockets are now always enabled
- Reuse Key: this will make the new cert always keep its key unless you force renew it, I recommend you to keep this disabled (not to keep the key), a reason to keep the key would be TLSA/pubkey pinning
- TLS to upstream (for Streams): This can be used if your stream target already uses tls but you want to override it with a NPMplus cert, do not enable if you don't set a new cert, since this will downgrade the connecting to be unencrypted
- X-Frame-Options: will control the X-Frame-Options header, none will remove the header, SAMEORIGIN/DENY will set it to these values and upstream will keep what upstream sends
## Examples of implementing some services using auth_request
### Anubis
1. Deploy an anubis container (see the compose.yaml for an example and information)
2. In the mounted anubis bot policy file the "status_codes" should be set to 401 and 403, like this:
```yaml
status_codes:
CHALLENGE: 401
DENY: 403
```
3. Set the AUTH_REQUEST_ANUBIS_UPSTREAM env in the NPMplus compose.yaml and select anubis in the Auth Request selection, no custom/advanced config/locations needed
4. You can override the "allow", "checking" and "blocked" images used by default by setting the `AUTH_REQUEST_ANUBIS_USE_CUSTOM_IMAGES` env to true and putting put your custom images as happy.webp, pensive.webp and reject.webp to /opt/npmplus/anubis
### Tinyauth
1. Set the AUTH_REQUEST_TINYAUTH_UPSTREAM and AUTH_REQUEST_TINYAUTH_DOMAIM env in the NPMplus compose.yaml and select tinyauth in the Auth Request selection, no custom/advanced config/locations needed
### Authelia (modern)
1. Set the AUTH_REQUEST_AUTHELIA_UPSTREAM env in the NPMplus compose.yaml and select authelia (modern) in the Auth Request selection, no custom/advanced config/locations needed
### Authentik
1. Set the AUTH_REQUEST_AUTHENTIK_UPSTREAM env (and optional AUTH_REQUEST_AUTHENTIK_DOMAIN env if you use the "domain level" variant in authentik, do not set this env if you use the "single application" variant) in the NPMplus compose.yaml and select authentik/authentik-send-basic-auth in the Auth Request selection, no custom/advanced config/locations needed
## Load Balancing
1. Open and edit this file: `/opt/npmplus/custom_nginx/http_top.conf` (or `/opt/npmplus/custom_nginx/stream_top.conf` for streams), if you changed /opt/npmplus to a different path make sure to change the path to fit
2. Set the upstream directive(s) with your servers which should be load balanced (https://nginx.org/en/docs/http/ngx_http_upstream_module.html / https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html), they need to run the same protocol (either http(s) or grpc(s) for proxy hosts or tcp/udp/proxy protocol for streams), like this for example:
```
upstream server1 {
server 127.0.0.1:44;
server 127.0.0.1:33;
server 127.0.0.1:22;
server 192.158.168.11:44 backup;
}
```
3. Configure your proxy host/stream like always in the UI, but set the hostname to service1 (or service2 or however you named it) and keep the forward port field empty (since you set the ports within the upstream directive)
## Geoblocking example (mainly community support)
1. set the `NGINX_LOAD_GEOIP2_MODULE` env to true and redeploy NPMplus
2. deploy a geoipupdate container (see the compose.yaml for an example, create credentials [here](https://www.maxmind.com/en/geolite2/signup))
3. open and edit this file: `/opt/npmplus/custom_nginx/http_top.conf`, if you changed /opt/npmplus to a different path make sure to change the path to fit
```yaml
geoip2 /data/goaccess/geoip/GeoLite2-Country.mmdb {
auto_reload 60m;
$geoip2_country_iso_code country iso_code;
}
# whitelist example, you can add as many country codes as you want, country code list: https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#XY
#map $geoip2_country_iso_code $geoip2_country_rule {
# default no;
# AA yes;
# XY yes;
# '' yes; # if you want to allow IPs with unknown country codes, if you don't do this make sure to allow private IPs
#}
# blacklist example, you can add as many country codes as you want, country code list: https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#XY
#map $geoip2_country_iso_code $geoip2_country_rule {
# default yes;
# AA no;
# XY no;
# '' no; # if you want to block IPs with unknown country codes, if you do this make sure to allow private IPs
#}
# uncomment if you block/don't allow IPs with unknown country codes
#geo $is_private_ip {
# default no;
# 127.0.0.0/8 yes;
# 10.0.0.0/8 yes;
# 172.16.0.0/12 yes;
# 192.168.0.0/16 yes;
# 169.254.0.0/16 yes;
# ::1/128 yes;
# fc00::/7 yes;
# fec0::/10 yes;
#}
```
4a. to set it per location: create a custom location / (or the location you want to use), set your proxy settings, then press the gear button and paste the following in the new text field, you may want to adjust the last lines (do not use the advanced tab with this example as it may break cert renewals):
```yaml
# uncomment if you block/don't allow IPs with unknown country codes
#if ($is_private_ip = yes) {
# set $geoip2_country_rule yes;
#}
if ($geoip2_country_rule = no) {
return 444; # this rejects the connection, but you can also return 403 to tell the client that it was denied
}
```
4b. to set it for an entire host: put this in the advanced tab:
```yaml
# uncomment if you block/don't allow IPs with unknown country codes
#if ($is_private_ip = yes) {
# set $geoip2_country_rule yes;
#}
if ($request_uri ~* "^/\.well-known/acme-challenge/") {
set $geoip2_country_rule yes;
}
if ($geoip2_country_rule = no) {
return 444; # this rejects the connection, but you can also return 403 to tell the client that it was denied
}
```
4c. to set it for all http hosts of them same type: put this in the `custom_nginx/server_proxy.conf` / `custom_nginx/server_redirect.conf` / `custom_nginx/server_dead.conf` file(s):
```yaml
# uncomment if you block/don't allow IPs with unknown country codes
#if ($is_private_ip = yes) {
# set $geoip2_country_rule yes;
#}
if ($request_uri ~* "^/\.well-known/acme-challenge/") {
set $geoip2_country_rule yes;
}
if ($geoip2_country_rule = no) {
return 444; # this rejects the connection, but you can also return 403 to tell the client that it was denied
}
```
4d. to set it for all http hosts: put this in the `custom_nginx/server_http.conf` file:
```yaml
# uncomment if you block/don't allow IPs with unknown country codes
#if ($is_private_ip = yes) {
# set $geoip2_country_rule yes;
#}
if ($request_uri ~* "^/\.well-known/acme-challenge/") {
set $geoip2_country_rule yes;
}
if ($geoip2_country_rule = no) {
return 444; # this rejects the connection, but you can also return 403 to tell the client that it was denied
}
```
5. you can create multiple rule lists by adding multiple map directive, but you need to use a unique name instead of `$geoip2_country_rule` for each rule list (you need the unique name also in the custom locations)
## Prerun scripts (EXPERT option) - if you don't know what this is, ignore it
If you need to run scripts before NPMplus launches put them under: `/opt/npmplus/prerun/*.sh` (please add `#!/usr/bin/env sh` / `#!/usr/bin/env bash` to the top of the script) you need to create this folder yourself, also set the `ENABLE_PRERUN` env to `true`
## Notes on Cloudflare
- I strongly advise against using cloudflare proxy/tunnel before NPMplus (so between the users and NPMplus `users <=> cloudflare <=> NPMplus`)
- Why?
- cloudflare acts like a "man in the middle" (if you want you can also call it a "wanted man-in-the-middle attack"), this means all traffic going from your users to you/from you to your users will be decrypted by cloudflare before being encrypted again and being forwarded to you/your users, if you want this is your decision (security, privacy, etc.)
- many optimizations done by NPMplus will because of this only be used between cloudflare and NPMplus, so your users won't notice them
- cloudflare overrides many things done/configured by NPMplus (like headers (including HSTS), HTTP/3 (QUIC), TLS settings and more), so you might need to configure them again in Cloudflare, but this is not always possible
- cloudflare has a limit of 100MB per connection, so uploading/downloading big files my cause problems, if no chunking is used
- because all data does not take direct way between your users and you, the connection time will increase
- cloudflare only forwards/protects http(s) traffic on port 80/443 to you, services running on other ports/different protocols are not forwarded/protected (STUN/TURN/SSH)
- cloudflare can't protect you if the attacker knows your real ip, as cloudflare only rewrites your dns entries to itself and then acts as a reverse proxy, direct ip connectings to you are not protected (use a firewall like ufw, make sure to allow 80/tcp and 443/tcp+udp for NPMplus, if possible don't open SSH and NPMplus GUI to the internet, but secure them behind a VPN like Wireguard)
- if you need a WAF => use [crowdsec](#crowdsec)
- if you want to use the "I'm under attack mode" to protect you from (ai) web scrapes => use [anubis](#anubis-config-supported)
- What are reason for cloudflare?
- The points above don't matter you (enough) and:
- you depend on a not mentioned and unreplaceable feature of cloudflare
- or you are under (a) DDoS-attack(s), which you can't handle yourself and the attacker does not know your real ip/does not use it to attack you, but instead your domain: you could use cloudflare as dns nameserver for your domain with the proxy disabled and only enable it if you are under an attack (only work if the attacker did not cache your real ip)
- or you want to hide your IP and only expose http(s) services, but then: don't use NPMplus at all, install cloudflared and use cloudflare tunnels and point it directly to your upstreams, this way you can still manage everything in a GUI and you don't even need to expose any ports
- If you still want to use cloudflare proxy make sure to set `your domain => SSL/TLS => SSL/TLS encryption => Current encryption mode => Configure` to "Full (strict)"
- Just using cloudflare as a dns nameserver provider for your domain is fine
- If you use cloudflare to forward mails to your inbox, note that cloudflare also acts as man-in-the-middle in this case
## Hints for Your Privacy Policy
**Note: This is not legal advice. The following points are intended to give you hints and help you identify areas that may be relevant to your privacy policy. This list may not be complete or correct.**
1. NPMplus **always** writes the nginx error logs to your Docker logs; it uses the error level “warn” (so every error nginx and the nginx modules mark as error level “warn” or higher will be logged), as it contains user information (like IPs) you should mention it in your privacy policy. With the default installation no user data should leave your system because of NPMplus (except for data sent to your backends, as this is the task of a reverse proxy), this should be the only data created by NPMplus containing user information by default.
2. If you enable `LOGROTATE` the access and error (also level “warn”), logs will be written to your disk and rotated every 25 hours and deleted based on your set number of set rotations. The access logs use these formats: [http](https://github.com/ZoeyVid/NPMplus/blob/c6a2df722390eb3f4377c603e16587fe8c74e54f/rootfs/usr/local/nginx/conf/nginx.conf#L30) and [stream](https://github.com/ZoeyVid/NPMplus/blob/c6a2df722390eb3f4377c603e16587fe8c74e54f/rootfs/usr/local/nginx/conf/nginx.conf#L249). These include user information (like IPs), so make sure to also mention that these exist and what you are doing with them.
3. If you use crowdsec, and you do **not** [disable sharing in crowdsec](https://docs.crowdsec.net/docs/next/configuration/crowdsec_configuration/#sharing), you need to mention that [this](https://docs.crowdsec.net/docs/central_api/intro/#signal-meta-data) is sent to crowdsec in your privacy policy.
4. If you're blocking IPs — for example, using access lists, GeoIP filtering, or CrowdSec block lists — make sure to mention this as well.
5. If GoAccess is enabled, it processes access logs to generate statistics, which are saved on disk for a time you can configure. These statistics include user information (like IPs), so make sure to also mention this.
6. If you use the PHP-FPM option, error logs from PHP-FPM will also be written to Docker logs. These include user information (like IPs), so make sure to also mention this.
7. If you use open-appsec `NGINX_LOAD_OPENAPPSEC_ATTACHMENT_MODULE`, you should also include information about it; since I don't use it myself, I can't give you any further hints.
8. If you collect any user information (like through other custom nginx modules, modules you can load via env, lua scripts, etc.), also mention it.
9. If you use the caddy http to https redirect container, you should also mention the data collected by it, since it will also collect (error) logs.
10. If use use anubis, see here: https://anubis.techaro.lol/docs/admin/configuration/impressum
11. If you do any extra custom/advanced configuration/modification, which is in someway related to the users data, then yes, keep in mind to also mention this.
12. Anything else you do with the users data, should also be mentioned. (Like what your backend does or any other proxies in front of NPMplus (like cloudflare, still not recommended), how data is stored, duration, ads, analytic tools, how data is handled if they contact you, by who/which provider, etc.)
13. I don't think this needs to be mentioned, but you can include it if you want to be thorough (note: this does not apply if you're using Let's Encrypt, as they no longer support OCSP): Some clients (like Firefox) send OCSP requests to the certificate authority (CA) by default if the CA includes OCSP URLs in the certificate. This behavior can be disabled by users in Firefox. In my opinion, it doesn't need to be mentioned, as no data is sent to you — the client communicates directly with the CA. The check is initiated by the client itself; it's neither requested nor required by you. Your certificate simply indicates that the client can perform this check if it chooses to.
14. Also optional and, in my opinion, not required: Some information about the data stored by the nameservers running your domain. I don't think this should be required, since in most cases there's a provider between the users and your nameserver acting as a proxy. This means the DNS requests of your users are hidden behind their provider. Its the provider who should explain to their users how they handle data in their role as a "DNS proxy."
## What connections can be expected from the NPMplus container?
- to your clients
- to your upstreams
- to your acme/ocsp server
- to github for a daily update check
- if not disabled gravatar for profile pictures
- if used to your OIDC
- if used to pypi to download certbot plugins
- if used to your dns provider for acme dns challenges
- if used to www.site24x7.com for the reachability check
- if enabled to cloudflare to download theier IPs
- if enabled to the crowdsec (container) lapi
- if you see more/others please report them
## Features and Project Goal of Upstream
I created this project to fill a personal need to provide users with an easy way to accomplish reverse proxying hosts with TLS termination and it had to be so easy that a monkey could do it. This goal hasn't changed. While advanced configuration options are available, they remain entirely optional. The core idea is to keep things as simple as possible, lowering the barrier to entry for everyone.
- Beautiful and Secure Admin Interface based on [Tabler](https://tabler.github.io)
- Easily create forwarding domains, redirections, streams and 404 hosts without knowing anything about Nginx
- Free trusted TLS certificates using Certbot (Let's Encrypt/other CAs) or provide your own custom TLS certificates
@@ -339,12 +46,205 @@ I created this project to fill a personal need to provide users with an easy way
- Advanced Nginx configuration available for super users
- User management, permissions and audit log
## Contributing
All are welcome to create pull requests for this project, but this does not mean that they will be merged, so better ask if your PR would be merged before creating one (via Discussion), typos and translation are excluded from this.
# List of new features
# Please report issues first to this fork before reporting them to the upstream repository
## Getting Help
1. [Support/Questions](https://github.com/ZoeyVid/NPMplus/discussions) (preferred)
2. [Discord](https://discord.gg/y8DhYhv427) (only in the #support-npmplus forum channel, keep other channels free from NPMplus)
3. [Reddit](https://reddit.com/r/NPMplus) (not recommended)
4. [Bugs](https://github.com/ZoeyVid/NPMplus/issues) (only for feature requests and reproducible bugs)
- Supports HTTP/3 (QUIC) protocol.
- Supports CrowdSec IPS. Please see [here](https://github.com/ZoeyVid/NPMplus#crowdsec) to enable it.
- goaccess included, see compose.yaml to enable, runs by default on https://<ip>:91 (nginx config from [here](https://github.com/xavier-hernandez/goaccess-for-nginxproxymanager/blob/main/resources/nginx/nginx.conf))
- Supports ModSecurity, with coreruleset as an option. You can configure ModSecurity/coreruleset by editing the files in the `/opt/npm/etc/modsecurity` folder.
- If the core ruleset blocks valid requests, please check the `/opt/npm/etc/modsecurity/crs-setup.conf` file.
- Try to whitelist the Content-Type you are sending (for example, `application/activity+json` for Mastodon and `application/dns-message` for DoH).
- Try to whitelist the HTTP request method you are using (for example, `PUT` is blocked by default, which also affects NPM).
<!---
- Note: To fix [this issue](https://github.com/SpiderLabs/ModSecurity/issues/2848), instead of running `nginx -s reload`, this fork stops nginx and starts it again. This will result in a 502 error when you update your hosts. See https://github.com/ZoeyVid/NPMplus/issues/296 and https://github.com/ZoeyVid/NPMplus/issues/283.
--->
- Darkmode button in the footer for comfortable viewing (CSS done by [@theraw](https://github.com/theraw))
- Fixes proxy to https origin when the origin only accepts TLSv1.3
- Only enables TLSv1.2 and TLSv1.3 protocols
- Faster creation of TLS certificates can be achieved by eliminating unnecessary Nginx reloads and configuration creations.
- Uses OCSP Stapling for enhanced security
- Resolved dnspod plugin issue
- To migrate manually, delete all dnspod certs and recreate them OR change the credentials file as per the template given [here](https://github.com/ZoeyVid/NPMplus/blob/develop/global/certbot-dns-plugins.js)
- Smaller docker image with alpine-based distribution
- Admin backend interface runs with https
- Default page also runs with https
- Uses [fancyindex](https://gitHub.com/Naereen/Nginx-Fancyindex-Theme) if used as webserver
- Exposes INTERNAL backend api only to localhost
- Basic security headers are added if you enable HSTS (HSTS has always subdomains and preload enabled)
- access.log is disabled by default, unified and moved to `/opt/npm/nginx/access.log`
- Error Log written to console
- `Server` response header hidden
- PHP 8.2/8.3 optional, with option to add extensions; available packages can added using envs in the compose file
- Allows different acme servers/certbot config file (/opt/npm/tls/certbot/config.ini)
- Supports up to 99 domains per cert
- Brotli compression can be enabled
- HTTP/2 always enabled with fixed upload
- Allows infinite upload size
- Automatic database vacuum (only sqlite)
- Automatic cleaning of old certbot certs (set FULLCLEAN to true)
- Password reset (only sqlite) using `docker exec -it npmplus password-reset.js USER_EMAIL PASSWORD`
- Supports TLS for MariaDB/MySQL; set `DB_MYSQL_TLS` env to true. Self-signed certificates can be uploaded to `/opt/npm/etc/npm/ca.crt` and `DB_MYSQL_CA` set to `/data/etc/npm/ca.crt` (not tested, unsupported)
- Supports PUID/PGID in network mode host; add `net.ipv4.ip_unprivileged_port_start=0` at the end of `/etc/sysctl.conf`
- Option to set IP bindings for multiple instances in network mode host
- Option to change backend port
- See the composefile for all available options
- If you want to redirect all HTTP traffic to HTTPS, you can use the `compose.override.yaml` file.
## migration
- **NOTE: migrating back to the original is not possible**, so make first a **backup** before migration, so you can use the backup to switch back
- since many buttons changed, please edit every host you have and click save. (Please also resave it, if all buttons/values are fine, to update the host config to fully fit the NPMplus template)
- please delete all dnspod certs and recreate them OR you manually change the credentialsfile (see [here](https://github.com/ZoeyVid/npmplus/blob/develop/global/certbot-dns-plugins.js) for the template)
- since this fork has dependency on `network_mode: host`, please don't forget to open port 80/tcp, 443/tcp and 443/udp (and maybe 81/tcp) in your firewall
- if you have a healthcheck defined in your compose yaml file, remove it - this fork defines its own healthcheck in the Dockerfile, so you don't need to have it in compose anymore
- please report all migration issues you have
# Crowdsec
1. Install crowdsec using this compose file: https://github.com/ZoeyVid/NPMplus/blob/develop/compose.crowdsec.yaml and enable LOGROTATE
2. open `/opt/crowdsec/conf/acquis.d/npmplus.yaml` and fill it with:
```yaml
filenames:
- /opt/npm/nginx/access.log
labels:
type: npmplus
---
source: docker
container_name:
- npmplus
labels:
type: npmplus
---
source: docker
container_name:
- npmplus
labels:
type: modsecurity
---
listen_addr: 0.0.0.0:7422
appsec_config: crowdsecurity/appsec-default
name: appsec
source: appsec
labels:
type: appsec
```
3. make sure to use `network_mode: host` in your compose file
4. run `docker exec crowdsec cscli bouncers add npmplus -o raw` and save the output
5. open `/opt/npm/etc/crowdsec/crowdsec.conf`
6. set `ENABLED` to `true`
7. use the output of step 5 as `API_KEY`
8. save the file
9. set LOGROTATE to `true` in your `compose.yaml`
10. redeploy the `compose.yaml`
# coreruleset plugins
1. Download the plugin (all files inside the `plugins` folder of the git repo), most time: `<plugin-name>-before.conf`, `<plugin-name>-config.conf` and `<plugin-name>-after.conf` and sometimes `<plugin-name>.data` and/or `<plugin-name>.lua` or somilar files
2. put them into the `/opt/npm/etc/modsecurity/crs-plugins` folder
3. maybe open the `/opt/npm/etc/modsecurity/crs-plugins/<plugin-name>-config.conf` and configure the plugin
# Use as webserver
1. Create a new Proxy Host
2. Set `Scheme` to `https`, `Forward Hostname / IP` to `0.0.0.0`, `Forward Port` to `1` and enable `Websockets Support` (you can also use other values, since these get fully ignored)
3. Maybe set an Access List
4. Make your TLS Settings
5. a) Custom Nginx Configuration (advanced tab), which looks the following for file server:
- Note: the slash at the end of the file path is important
```
location / {
include conf.d/include/always.conf;
alias /var/www/<your-html-site-folder-name>/;
fancyindex off; # alternative to nginxs "index" option (looks better and has more options)
}
```
b) Custom Nginx Configuration (advanced tab), which looks the following for file server and **php**:
- Note: the slash at the end of the file path is important
- Note: first enable `PHP82` and/or `PHP83` inside your compose file
- Note: you can replace `fastcgi_pass php82;` with `fastcgi_pass php83;`
- Note: to add more php extension using envs you can set in the compose file
```
location / {
include conf.d/include/always.conf;
alias /var/www/<your-html-site-folder-name>/;
fancyindex off; # alternative to nginxs "index" option (looks better and has more options)
location ~ [^/]\.php(/|$) {
fastcgi_pass php82;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
if (!-f $document_root$fastcgi_script_name) {
return 404;
}
}
}
```
# custom acme server
1. Open this file: `nano` `/opt/npm/ssl/certbot/config.ini`
2. uncomment the server line and change it to your acme server
3. maybe set eab keys
4. create your cert using the npm web ui
# Quick Setup
1. Install Docker and Docker Compose (or portainer)
- [Docker Install documentation](https://docs.docker.com/engine)
- [Docker Compose Install documentation](https://docs.docker.com/compose/install/linux)
2. Create a compose.yaml file similar to [this](https://github.com/ZoeyVid/NPMplus/blob/develop/compose.yaml) (or use it as a portainer stack):
3. Bring up your stack by running (or deploy your portainer stack)
```bash
docker compose up -d
```
4. Log in to the Admin UI
When your docker container is running, connect to it on port `81` for the admin interface.
Sometimes this can take a little bit because of the entropy of keys.
You may need to open port 81 in your firewall.
You may need to use another IP-Address.
[https://127.0.0.1:81](https://127.0.0.1:81)
Default Admin User:
```
Email: admin@example.org
Password: iArhP1j7p1P6TA92FA2FMbbUGYqwcYzxC4AVEe12Wbi94FY9gNN62aKyF1shrvG4NycjjX9KfmDQiwkLZH1ZDR9xMjiG2QmoHXi
```
Immediately after logging in with this default user you will be asked to modify your details and change your password.
### prerun scripts (EXPERT option) - if you don't know what this is, ignore it
run order: entrypoint.sh (prerun scripts) => start.sh => launch.sh <br>
if you need to run scripts before NPMplus launches put them under: `/opt/npm/etc/prerun/*.sh` (please add `#!/bin/sh` / `#!/bin/bash` to the top of the script) <br>
you need to create this folder yourself - **NOTE:** I won't help you creating those patches/scripts if you need them you also need to know how to create them
## Contributing
All are welcome to create pull requests for this project, against the `develop` branch.
CI is used in this project. All PR's must pass before being considered. After passing,
docker builds for PR's are available on ghcr for manual verifications.
## Contributors/Sponsor upstream NPM
Special thanks to [all of our contributors](https://github.com/NginxProxyManager/nginx-proxy-manager/graphs/contributors).
If you want to sponsor them, please see [here](https://github.com/NginxProxyManager/nginx-proxy-manager/blob/master/README.md).
# Please report Bugs first to this fork before reporting them to the upstream Repository
## Getting Support
1. [Found a bug?](https://github.com/ZoeyVid/NPMplus/issues)
2. [Discussions](https://github.com/ZoeyVid/NPMplus/discussions)
<!---
3. [Reddit](https://reddit.com/r/nginxproxymanager)
--->

8
backend/.gitignore vendored
View File

@@ -1,8 +0,0 @@
config/development.json
data/*
yarn-error.log
tmp
certbot.log
node_modules
core.*

View File

@@ -1,84 +0,0 @@
import cookieParser from "cookie-parser";
import express from "express";
import fileUpload from "express-fileupload";
import { debug, express as logger } from "./logger.js";
import mainRoutes from "./routes/main.js";
/**
* App
*/
const app = express();
app.use(
fileUpload({
limits: { fileSize: 1024 * 1024 },
}),
);
app.use(cookieParser());
app.use(express.json());
app.use(express.urlencoded({ extended: true }));
/**
* General Logging, BEFORE routes
*/
app.disable("x-powered-by");
app.enable("trust proxy", ["loopback", "linklocal", "uniquelocal"]);
app.enable("strict routing");
app.use((req, res, next) => {
if (["same-origin", undefined, "none"].includes(req.get("sec-fetch-site"))) {
return next();
}
if (
req.method === "GET" &&
req.path === "/api/oidc/callback" &&
req.get("sec-fetch-mode") === "navigate" &&
req.get("sec-fetch-dest") === "document"
) {
return next();
}
res.status(403).json({
error: { message: "Rejected Sec-Fetch-Site Value." },
});
});
// pretty print JSON when not live
app.set("json spaces", 2);
app.use("/", mainRoutes);
// production error handler
// no stacktraces leaked to user
app.use((err, req, res, _) => {
const payload = {
error: {
code: err.status,
message: err.public ? err.message : "Internal Error",
},
};
if (typeof err.message_i18n !== "undefined") {
payload.error.message_i18n = err.message_i18n;
}
if ((req.baseUrl + req.originalUrl).includes("nginx/certificates")) {
payload.debug = {
stack: typeof err.stack !== "undefined" && err.stack ? err.stack.split("\n") : null,
previous: err.previous,
};
}
// Not every error is worth logging - but this is good for now until it gets annoying.
if (typeof err.stack !== "undefined" && err.stack) {
debug(logger, err.stack);
if (typeof err.public === "undefined" || !err.public) {
logger.warn(`${req.method.toUpperCase()} ${req.originalUrl}: ${err}`);
}
}
res.status(err.status || 500).send(payload);
});
export default app;

View File

@@ -1,74 +0,0 @@
{
"$schema": "https://biomejs.dev/schemas/2.4.5/schema.json",
"vcs": {
"enabled": true,
"clientKind": "git",
"useIgnoreFile": true
},
"files": {
"ignoreUnknown": false,
"includes": ["**/*.ts", "**/*.tsx", "**/*.js", "**/*.jsx", "!**/dist/**/*"]
},
"formatter": {
"enabled": true,
"indentStyle": "tab",
"indentWidth": 4,
"lineWidth": 120,
"formatWithErrors": true
},
"assist": {
"actions": {
"source": {
"organizeImports": {
"level": "on",
"options": {
"groups": [
":BUN:",
":NODE:",
["npm:*", "npm:*/**"],
":PACKAGE_WITH_PROTOCOL:",
":URL:",
":PACKAGE:",
["/src/*", "/src/**"],
["/**"],
["#*", "#*/**"],
":PATH:"
]
}
}
}
}
},
"linter": {
"enabled": true,
"rules": {
"recommended": true,
"correctness": {
"useUniqueElementIds": "off"
},
"suspicious": {
"noExplicitAny": "off"
},
"performance": {
"noDelete": "off"
},
"nursery": "off",
"a11y": {
"useSemanticElements": "off",
"useValidAnchor": "off"
},
"style": {
"noParameterAssign": "error",
"useAsConstAssertion": "error",
"useDefaultParameterLast": "error",
"useEnumInitializers": "error",
"useSelfClosingElements": "error",
"useSingleVarDeclarator": "error",
"noUnusedTemplateLiteral": "error",
"useNumberNamespace": "error",
"noInferrableTypes": "error",
"noUselessElse": "error"
}
}
}
}

View File

@@ -1,19 +0,0 @@
# Certbot dns-plugins
This file contains info about available Certbot DNS plugins.
This only works for plugins which use the standard argument structure, so:
`--authenticator <plugin-name> --<plugin-name>-credentials <FILE> --<plugin-name>-propagation-seconds <number>`
File Structure:
```json
{
"cloudflare": {
"name": "Name displayed to the user",
"package_name": "Package name in PyPi repo",
"credentials": "Template of the credentials file",
"full_plugin_name": "The full plugin name as used in the commandline with certbot, e.g. 'dns-njalla'"
},
...
}
```

View File

@@ -1,494 +0,0 @@
{
"acmedns": {
"name": "ACME-DNS",
"package_name": "certbot-dns-acmedns",
"credentials": "dns_acmedns_api_url = http://acmedns-server/\ndns_acmedns_registration_file = /data/tls/certbot/acme-registration.json",
"full_plugin_name": "dns-acmedns"
},
"active24": {
"name": "Active24",
"package_name": "certbot-dns-active24",
"credentials": "dns_active24_api_key = <identifier>\ndns_active24_secret = <secret>",
"full_plugin_name": "dns-active24"
},
"aliyun": {
"name": "Aliyun",
"package_name": "certbot-dns-aliyun",
"credentials": "dns_aliyun_access_key = 12345678\ndns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef",
"full_plugin_name": "dns-aliyun"
},
"arvan": {
"name": "ArvanCloud",
"package_name": "certbot-dns-arvan",
"credentials": "dns_arvan_key = Apikey xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"full_plugin_name": "dns-arvan"
},
"azure": {
"name": "Azure",
"package_name": "certbot-dns-azure",
"credentials": "# This plugin supported API authentication using either Service Principals or utilizing a Managed Identity assigned to the virtual machine.\n# Regardless which authentication method used, the identity will need the “DNS Zone Contributor” role assigned to it.\n# As multiple Azure DNS Zones in multiple resource groups can exist, the config file needs a mapping of zone to resource group ID. Multiple zones -> ID mappings can be listed by using the key dns_azure_zoneX where X is a unique number. At least 1 zone mapping is required.\n\n# Using a service principal (option 1)\ndns_azure_sp_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\ndns_azure_sp_client_secret = E-xqXU83Y-jzTI6xe9fs2YC~mck3ZzUih9\ndns_azure_tenant_id = ed1090f3-ab18-4b12-816c-599af8a88cf7\n\n# Using used assigned MSI (option 2)\n# dns_azure_msi_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\n\n# Using system assigned MSI (option 3)\n# dns_azure_msi_system_assigned = true\n\n# Zones (at least one always required)\ndns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1\ndns_azure_zone2 = example.org:/subscriptions/99800903-fb14-4992-9aff-12eaf2744622/resourceGroups/dns2",
"full_plugin_name": "dns-azure"
},
"baidu": {
"name": "baidu",
"package_name": "certbot-dns-baidu",
"credentials": "dns_baidu_access_key = 12345678\ndns_baidu_secret_key = 1234567890abcdef1234567890abcdef",
"full_plugin_name": "dns-baidu"
},
"beget": {
"name": "Beget",
"package_name": "certbot-beget-plugin",
"credentials": "# Beget API credentials used by Certbot\nbeget_plugin_username = username\nbeget_plugin_password = password",
"full_plugin_name": "beget-plugin"
},
"bunny": {
"name": "bunny.net",
"package_name": "certbot-dns-bunny",
"credentials": "# Bunny API token used by Certbot (see https://dash.bunny.net/account/settings)\ndns_bunny_api_key = xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"full_plugin_name": "dns-bunny"
},
"cdmon": {
"name": "cdmon",
"package_name": "certbot-dns-cdmon",
"credentials": "dns_cdmon_api_key=your-cdmon-api-token\ndns_cdmon_domain=your_domain_is_optional",
"full_plugin_name": "dns-cdmon"
},
"cloudflare": {
"name": "Cloudflare",
"package_name": "certbot-dns-cloudflare",
"credentials": "# Cloudflare restricted API token (recommended)\ndns_cloudflare_api_token=123\n\n# OR Cloudflare Global API Key (not recommended)\n#dns_cloudflare_email=cloudflare@example.org\n#dns_cloudflare_api_key=123",
"full_plugin_name": "dns-cloudflare"
},
"cloudns": {
"name": "ClouDNS",
"package_name": "certbot-dns-cloudns",
"credentials": "# Target user ID (see https://www.cloudns.net/api-settings/)\n\tdns_cloudns_auth_id=1234\n\t# Alternatively, one of the following two options can be set:\n\t# dns_cloudns_sub_auth_id=1234\n\t# dns_cloudns_sub_auth_user=foobar\n\n\t# API password\n\tdns_cloudns_auth_password=password1",
"full_plugin_name": "dns-cloudns"
},
"cloudxns": {
"name": "CloudXNS",
"package_name": "certbot-dns-cloudxns",
"credentials": "dns_cloudxns_api_key = 1234567890abcdef1234567890abcdef\ndns_cloudxns_secret_key = 1122334455667788",
"full_plugin_name": "dns-cloudxns"
},
"constellix": {
"name": "Constellix",
"package_name": "certbot-dns-constellix",
"credentials": "dns_constellix_apikey = 5fb4e76f-ac91-43e5-f982458bc595\ndns_constellix_secretkey = 47d99fd0-32e7-4e07-85b46d08e70b\ndns_constellix_endpoint = https://api.dns.constellix.com/v1",
"full_plugin_name": "dns-constellix"
},
"corenetworks": {
"name": "Core Networks",
"package_name": "certbot-dns-corenetworks",
"credentials": "dns_corenetworks_username = asaHB12r\ndns_corenetworks_password = secure_password",
"full_plugin_name": "dns-corenetworks"
},
"cpanel": {
"name": "cPanel",
"package_name": "certbot-dns-cpanel",
"credentials": "cpanel_url = https://cpanel.example.com:2083\ncpanel_username = your_username\ncpanel_password = your_password\ncpanel_token = your_api_token",
"full_plugin_name": "cpanel"
},
"ddnss": {
"name": "DDNSS",
"package_name": "certbot-dns-ddnss",
"credentials": "dns_ddnss_token = YOUR_DDNSS_API_TOKEN",
"full_plugin_name": "dns-ddnss"
},
"desec": {
"name": "deSEC",
"package_name": "certbot-dns-desec",
"credentials": "dns_desec_token = YOUR_DESEC_API_TOKEN\ndns_desec_endpoint = https://desec.io/api/v1/",
"full_plugin_name": "dns-desec"
},
"duckdns": {
"name": "DuckDNS",
"package_name": "certbot-dns-duckdns",
"credentials": "dns_duckdns_token=your-duckdns-token",
"full_plugin_name": "dns-duckdns"
},
"digitalocean": {
"name": "DigitalOcean",
"package_name": "certbot-dns-digitalocean",
"credentials": "dns_digitalocean_token = 0000111122223333444455556666777788889999aaaabbbbccccddddeeeeffff",
"full_plugin_name": "dns-digitalocean"
},
"directadmin": {
"name": "DirectAdmin",
"package_name": "certbot-dns-directadmin",
"credentials": "directadmin_url = https://my.directadminserver.com:2222\ndirectadmin_username = username\ndirectadmin_password = aSuperStrongPassword",
"full_plugin_name": "directadmin"
},
"dnsimple": {
"name": "DNSimple",
"package_name": "certbot-dns-dnsimple",
"credentials": "dns_dnsimple_token = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw",
"full_plugin_name": "dns-dnsimple"
},
"dnsmadeeasy": {
"name": "DNS Made Easy",
"package_name": "certbot-dns-dnsmadeeasy",
"credentials": "dns_dnsmadeeasy_api_key = 1c1a3c91-4770-4ce7-96f4-54c0eb0e457a\ndns_dnsmadeeasy_secret_key = c9b5625f-9834-4ff8-baba-4ed5f32cae55",
"full_plugin_name": "dns-dnsmadeeasy"
},
"dnspod": {
"name": "DNSPod",
"package_name": "certbot-dnspod",
"credentials": "certbot_dnspod_token = <your token>\ncertbot_dnspod_token_id = <your token id>",
"full_plugin_name": "certbot-dnspod"
},
"domainoffensive": {
"name": "DomainOffensive (do.de)",
"package_name": "certbot-dns-domainoffensive",
"credentials": "dns_domainoffensive_api_token = YOUR_DO_DE_AUTH_TOKEN",
"full_plugin_name": "dns-domainoffensive"
},
"domeneshop": {
"name": "Domeneshop",
"package_name": "certbot-dns-domeneshop",
"credentials": "dns_domeneshop_client_token=YOUR_DOMENESHOP_CLIENT_TOKEN\ndns_domeneshop_client_secret=YOUR_DOMENESHOP_CLIENT_SECRET",
"full_plugin_name": "dns-domeneshop"
},
"dreamhost": {
"name": "Dreamhost",
"package_name": "certbot-dns-dreamhost",
"credentials": "dns_dreamhost_baseurl=API_BASE_URL\ndns_dreamhost_api_key=API_KEY",
"full_plugin_name": "dns-dreamhost"
},
"dynu": {
"name": "Dynu",
"package_name": "certbot-dns-dynu",
"credentials": "dns_dynu_auth_token = YOUR_DYNU_AUTH_TOKEN",
"full_plugin_name": "dns-dynu"
},
"easydns": {
"name": "easyDNS",
"package_name": "certbot-dns-easydns",
"credentials": "dns_easydns_usertoken = YOUR_EASYDNS_USERTOKEN\ndns_easydns_userkey = YOUR_EASYDNS_USERKEY\ndns_easydns_endpoint = https://rest.easydns.net",
"full_plugin_name": "dns-easydns"
},
"edgedns": {
"name": "Akamai Edge DNS",
"package_name": "certbot-plugin-edgedns",
"credentials": "edgedns_client_secret = as3d1asd5d1a32sdfsdfs2d1asd5=\nedgedns_host = sdflskjdf-dfsdfsdf-sdfsdfsdf.luna.akamaiapis.net\nedgedns_access_token = kjdsi3-34rfsdfsdf-234234fsdfsdf\nedgedns_client_token = dkfjdf-342fsdfsd-23fsdfsdfsdf",
"full_plugin_name": "edgedns"
},
"eurodns": {
"name": "EuroDNS",
"package_name": "certbot-dns-eurodns",
"credentials": "dns_eurodns_applicationId = myuser\ndns_eurodns_apiKey = mysecretpassword\ndns_eurodns_endpoint = https://rest-api.eurodns.com/user-api-gateway/proxy",
"full_plugin_name": "dns-eurodns"
},
"firstdomains": {
"name": "First Domains",
"package_name": "certbot-dns-firstdomains",
"credentials": "dns_firstdomains_username = myremoteuser\ndns_firstdomains_password = verysecureremoteuserpassword",
"full_plugin_name": "dns-firstdomains"
},
"freedns": {
"name": "FreeDNS",
"package_name": "certbot-dns-freedns",
"credentials": "dns_freedns_username = myremoteuser\ndns_freedns_password = verysecureremoteuserpassword",
"full_plugin_name": "dns-freedns"
},
"gandi": {
"name": "Gandi Live DNS",
"package_name": "certbot-dns-gandi",
"credentials": "# Gandi personal access token\ndns_gandi_token=PERSONAL_ACCESS_TOKEN",
"full_plugin_name": "dns-gandi"
},
"gcore": {
"name": "Gcore DNS",
"package_name": "certbot-dns-gcore",
"credentials": "dns_gcore_apitoken = 0123456789abcdef0123456789abcdef01234567",
"full_plugin_name": "dns-gcore"
},
"glesys": {
"name": "Glesys",
"package_name": "certbot-dns-glesys",
"credentials": "dns_glesys_user = CL00000\ndns_glesys_password = apikeyvalue",
"full_plugin_name": "dns-glesys"
},
"godaddy": {
"name": "GoDaddy",
"package_name": "certbot-dns-godaddy",
"credentials": "dns_godaddy_secret = 0123456789abcdef0123456789abcdef01234567\ndns_godaddy_key = abcdef0123456789abcdef01234567abcdef0123",
"full_plugin_name": "dns-godaddy"
},
"google": {
"name": "Google",
"package_name": "certbot-dns-google",
"credentials": "{\n\"type\": \"service_account\",\n...\n}",
"full_plugin_name": "dns-google"
},
"googledomains": {
"name": "GoogleDomainsDNS",
"package_name": "certbot-dns-google-domains",
"credentials": "dns_google_domains_access_token = 0123456789abcdef0123456789abcdef01234567\ndns_google_domains_zone = \"example.com\"",
"full_plugin_name": "dns-google-domains"
},
"he": {
"name": "Hurricane Electric",
"package_name": "certbot-dns-hurricane-electric",
"credentials": "dns_hurricane_electric_user = Me\ndns_hurricane_electric_pass = my HE password",
"full_plugin_name": "dns-hurricane_electric"
},
"he-ddns": {
"name": "Hurricane Electric - DDNS",
"package_name": "certbot-dns-he-ddns",
"credentials": "dns_he_ddns_password = verysecurepassword",
"full_plugin_name": "dns-he-ddns"
},
"hetzner": {
"name": "Hetzner",
"package_name": "certbot-dns-hetzner",
"credentials": "dns_hetzner_api_token = 0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-hetzner"
},
"hetzner-cloud": {
"name": "Hetzner Cloud",
"package_name": "certbot-dns-hetzner-cloud",
"credentials": "dns_hetzner_cloud_api_token = your_api_token_here",
"full_plugin_name": "dns-hetzner-cloud"
},
"hostingnl": {
"name": "Hosting.nl",
"package_name": "certbot-dns-hostingnl",
"credentials": "dns_hostingnl_api_key = 0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-hostingnl"
},
"hover": {
"name": "Hover",
"package_name": "certbot-dns-hover",
"credentials": "dns_hover_hoverurl = https://www.hover.com\ndns_hover_username = hover-admin-username\ndns_hover_password = hover-admin-password\ndns_hover_totpsecret = 2fa-totp-secret",
"full_plugin_name": "dns-hover"
},
"infomaniak": {
"name": "Infomaniak",
"package_name": "certbot-dns-infomaniak",
"credentials": "dns_infomaniak_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"full_plugin_name": "dns-infomaniak"
},
"inwx": {
"name": "INWX",
"package_name": "certbot-dns-inwx",
"credentials": "dns_inwx_url = https://api.domrobot.com/xmlrpc/\ndns_inwx_username = your_username\ndns_inwx_password = your_password\ndns_inwx_shared_secret = your_shared_secret optional",
"full_plugin_name": "dns-inwx"
},
"ionos": {
"name": "IONOS",
"package_name": "certbot-dns-ionos",
"credentials": "dns_ionos_prefix = myapikeyprefix\ndns_ionos_secret = verysecureapikeysecret\ndns_ionos_endpoint = https://api.hosting.ionos.com",
"full_plugin_name": "dns-ionos"
},
"ispconfig": {
"name": "ISPConfig",
"package_name": "certbot-dns-ispconfig",
"credentials": "certbot_dns_ispconfig:dns_ispconfig_username = myremoteuser\ncertbot_dns_ispconfig:dns_ispconfig_password = verysecureremoteuserpassword\ncertbot_dns_ispconfig:dns_ispconfig_endpoint = https://you.ipsconfig.host:8080/remote/json.php",
"full_plugin_name": "certbot-dns-ispconfig:dns-ispconfig"
},
"isset": {
"name": "Isset",
"package_name": "certbot-dns-isset",
"credentials": "dns_isset_endpoint=\"https://customer.isset.net/api\"\ndns_isset_token=\"<token>\"",
"full_plugin_name": "dns-isset"
},
"joker": {
"name": "Joker",
"package_name": "certbot-dns-joker",
"credentials": "dns_joker_username = <Dynamic DNS Authentication Username>\ndns_joker_password = <Dynamic DNS Authentication Password>\ndns_joker_domain = <Dynamic DNS Domain>",
"full_plugin_name": "dns-joker"
},
"kas": {
"name": "All-Inkl",
"package_name": "certbot-dns-kas",
"credentials": "dns_kas_user = your_kas_user\ndns_kas_password = your_kas_password",
"full_plugin_name": "dns-kas"
},
"leaseweb": {
"name": "LeaseWeb",
"package_name": "certbot-dns-leaseweb",
"credentials": "dns_leaseweb_api_token = 01234556789",
"full_plugin_name": "dns-leaseweb"
},
"linode": {
"name": "Linode",
"package_name": "certbot-dns-linode",
"credentials": "dns_linode_key = 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ64\ndns_linode_version = [<blank>|3|4]",
"full_plugin_name": "dns-linode"
},
"loopia": {
"name": "Loopia",
"package_name": "certbot-dns-loopia",
"credentials": "dns_loopia_user = user@loopiaapi\ndns_loopia_password = abcdef0123456789abcdef01234567abcdef0123",
"full_plugin_name": "dns-loopia"
},
"luadns": {
"name": "LuaDNS",
"package_name": "certbot-dns-luadns",
"credentials": "dns_luadns_email = user@example.com\ndns_luadns_token = 0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-luadns"
},
"mchost24": {
"name": "MC-HOST24",
"package_name": "certbot-dns-mchost24",
"credentials": "# Obtain API token using https://github.com/JoeJoeTV/mchost24-api-python\ndns_mchost24_api_token=<insert obtained API token here>",
"full_plugin_name": "dns-mchost24"
},
"mijnhost": {
"name": "mijn.host",
"package_name": "certbot-dns-mijn-host",
"credentials": "dns_mijn_host_api_key=0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-mijn-host"
},
"namecheap": {
"name": "Namecheap",
"package_name": "certbot-dns-namecheap",
"credentials": "dns_namecheap_username = 123456\ndns_namecheap_api_key = 0123456789abcdef0123456789abcdef01234567",
"full_plugin_name": "dns-namecheap"
},
"netcup": {
"name": "netcup",
"package_name": "certbot-dns-netcup",
"credentials": "dns_netcup_customer_id = 123456\ndns_netcup_api_key = 0123456789abcdef0123456789abcdef01234567\ndns_netcup_api_password = abcdef0123456789abcdef01234567abcdef0123",
"full_plugin_name": "dns-netcup"
},
"nicru": {
"name": "nic.ru",
"package_name": "certbot-dns-nicru",
"credentials": "dns_nicru_client_id = application-id\ndns_nicru_client_secret = application-token\ndns_nicru_username = 0001110/NIC-D\ndns_nicru_password = password\ndns_nicru_scope = .+:.+/zones/example.com(/.+)?\ndns_nicru_service = DNS_SERVICE_NAME\ndns_nicru_zone = example.com",
"full_plugin_name": "dns-nicru"
},
"njalla": {
"name": "Njalla",
"package_name": "certbot-dns-njalla",
"credentials": "dns_njalla_token = 0123456789abcdef0123456789abcdef01234567",
"full_plugin_name": "dns-njalla"
},
"nsone": {
"name": "NS1",
"package_name": "certbot-dns-nsone",
"credentials": "dns_nsone_api_key = MDAwMDAwMDAwMDAwMDAw",
"full_plugin_name": "dns-nsone"
},
"oci": {
"name": "Oracle Cloud Infrastructure DNS",
"package_name": "certbot-dns-oci",
"credentials": "[DEFAULT]\nuser = ocid1.user.oc1...\nfingerprint = xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx\ntenancy = ocid1.tenancy.oc1...\nregion = us-ashburn-1\nkey_file = ~/.oci/oci_api_key.pem",
"full_plugin_name": "dns-oci"
},
"ovh": {
"name": "OVH",
"package_name": "certbot-dns-ovh",
"credentials": "dns_ovh_endpoint = ovh-eu\ndns_ovh_application_key = MDAwMDAwMDAwMDAw\ndns_ovh_application_secret = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw\ndns_ovh_consumer_key = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw",
"full_plugin_name": "dns-ovh"
},
"plesk": {
"name": "Plesk",
"package_name": "certbot-dns-plesk",
"credentials": "dns_plesk_username = your-username\ndns_plesk_password = secret\ndns_plesk_api_url = https://plesk-api-host:8443",
"full_plugin_name": "dns-plesk"
},
"porkbun": {
"name": "Porkbun",
"package_name": "certbot-dns-porkbun",
"credentials": "dns_porkbun_key=your-porkbun-api-key\ndns_porkbun_secret=your-porkbun-api-secret",
"full_plugin_name": "dns-porkbun"
},
"powerdns": {
"name": "PowerDNS",
"package_name": "certbot-dns-pdns",
"credentials": "dns_pdns_endpoint = https://pdns-api.example.com\ndns_pdns_api_key = <Your API Key>\ndns_pdns_server_id = localhost # see https://doc.powerdns.com/authoritative/http-api/server.html\ndns_pdns_disable_notify = false # Disable notification of secondaries after record changes",
"full_plugin_name": "dns-pdns"
},
"regru": {
"name": "reg.ru",
"package_name": "certbot-regru",
"credentials": "dns_username=username\ndns_password=password",
"full_plugin_name": "dns"
},
"rfc2136": {
"name": "RFC 2136",
"package_name": "certbot-dns-rfc2136",
"credentials": "# Target DNS server\ndns_rfc2136_server = 192.0.2.1\n# Target DNS port\ndns_rfc2136_port = 53\n# TSIG key name\ndns_rfc2136_name = keyname.\n# TSIG key secret\ndns_rfc2136_secret = 4q4wM/2I180UXoMyN4INVhJNi8V9BCV+jMw2mXgZw/CSuxUT8C7NKKFs AmKd7ak51vWKgSl12ib86oQRPkpDjg==\n# TSIG key algorithm\ndns_rfc2136_algorithm = HMAC-SHA512",
"full_plugin_name": "dns-rfc2136"
},
"rockenstein": {
"name": "rockenstein AG",
"package_name": "certbot-dns-rockenstein",
"credentials": "dns_rockenstein_token=<token>",
"full_plugin_name": "dns-rockenstein"
},
"scaleway": {
"name": "Scaleway",
"package_name": "certbot-dns-scaleway",
"credentials": "dns_scaleway_application_token = b3a0b9a9-3814-4f12-95b0-a7473bf8b306",
"full_plugin_name": "dns-scaleway"
},
"selectelv2": {
"name": "Selectel api v2",
"package_name": "certbot-dns-selectel-api-v2",
"credentials": "dns_selectel_api_v2_account_id = your_account_id\ndns_selectel_api_v2_project_name = your_project\ndns_selectel_api_v2_username = your_username\ndns_selectel_api_v2_password = your_password",
"full_plugin_name": "dns-selectel-api-v2"
},
"simply": {
"name": "Simply",
"package_name": "certbot-dns-simply",
"credentials": "dns_simply_account_name = UExxxxxx\ndns_simply_api_key = DsHJdsjh2812872sahj",
"full_plugin_name": "dns-simply"
},
"spaceship": {
"name": "Spaceship",
"package_name": "certbot-dns-spaceship",
"credentials": "[spaceship]\napi_key=your_api_key\napi_secret=your_api_secret",
"full_plugin_name": "dns-spaceship"
},
"strato": {
"name": "Strato",
"package_name": "certbot-dns-strato",
"credentials": "dns_strato_username = user\ndns_strato_password = pass\n# uncomment if you are using two factor authentication:\n# dns_strato_totp_devicename = 2fa_device\n# dns_strato_totp_secret = 2fa_secret\n#\n# uncomment if domain name contains special characters\n# insert domain display name as seen on your account page here\n# dns_strato_domain_display_name = my-punicode-url.de\n#\n# if you are not using strato.de or another special endpoint you can customise it below\n# you will probably only need to adjust the host, but you can also change the complete endpoint url\n# dns_strato_custom_api_scheme = https\n# dns_strato_custom_api_host = www.strato.de\n# dns_strato_custom_api_port = 443\n# dns_strato_custom_api_path = \"/apps/CustomerService\"",
"full_plugin_name": "dns-strato"
},
"timeweb": {
"name": "Timeweb Cloud",
"package_name": "certbot-dns-timeweb",
"credentials": "dns_timeweb_api_key = XXXXXXXXXXXXXXXXXXX",
"full_plugin_name": "dns-timeweb"
},
"transip": {
"name": "TransIP",
"package_name": "certbot-dns-transip",
"credentials": "dns_transip_username = my_username\ndns_transip_key_file = /data/tls/certbot/transip-rsa.key",
"full_plugin_name": "dns-transip"
},
"tencentcloud": {
"name": "Tencent Cloud",
"package_name": "certbot-dns-tencentcloud",
"credentials": "dns_tencentcloud_secret_id = TENCENT_CLOUD_SECRET_ID\ndns_tencentcloud_secret_key = TENCENT_CLOUD_SECRET_KEY",
"full_plugin_name": "dns-tencentcloud"
},
"vultr": {
"name": "Vultr",
"package_name": "certbot-dns-vultr",
"credentials": "dns_vultr_key = YOUR_VULTR_API_KEY",
"full_plugin_name": "dns-vultr"
},
"websupport": {
"name": "Websupport.sk",
"package_name": "certbot-dns-websupport",
"credentials": "dns_websupport_identifier = <api_key>\ndns_websupport_secret_key = <secret>",
"full_plugin_name": "dns-websupport"
},
"wedos": {
"name": "Wedos",
"package_name": "certbot-dns-wedos",
"credentials": "dns_wedos_user = <wedos_registration>\ndns_wedos_auth = <wapi_password>",
"full_plugin_name": "dns-wedos"
},
"zoneedit": {
"name": "ZoneEdit",
"package_name": "certbot-dns-zoneedit",
"credentials": "dns_zoneedit_user = <login-user-id>\ndns_zoneedit_token = <dyn-authentication-token>",
"full_plugin_name": "dns-zoneedit"
}
}

View File

@@ -1,42 +0,0 @@
import knex from "knex";
import { configGet, configHas } from "./lib/config.js";
let instance = null;
const generateDbConfig = () => {
if (!configHas("database")) {
throw new Error(
"Database config does not exist! Please read the instructions: https://github.com/ZoeyVid/NPMplus",
);
}
const cfg = configGet("database");
if (cfg.engine === "knex-native") {
return cfg.knex;
}
return {
client: cfg.engine,
connection: {
host: cfg.host,
user: cfg.user,
password: cfg.password,
database: cfg.name,
port: cfg.port,
...(cfg.ssl ? { ssl: cfg.ssl } : {}),
},
migrations: {
tableName: "migrations",
},
};
};
const getInstance = () => {
if (!instance) {
instance = knex(generateDbConfig());
}
return instance;
};
export default getInstance;

View File

@@ -1,52 +0,0 @@
#!/usr/bin/env node
import app from "./app.js";
import internalNginx from "./internal/nginx.js";
import internalCertificate from "./internal/certificate.js";
import internalIpRanges from "./internal/ip_ranges.js";
import { global as logger } from "./logger.js";
import { migrateUp } from "./migrate.js";
import { getCompiledSchema } from "./schema/index.js";
import setup from "./setup.js";
async function appStart() {
return migrateUp()
.then(setup)
.then(getCompiledSchema)
.then(() => {
if (process.env.TRUST_CLOUDFLARE === "false") {
logger.info("Cloudflares IPs are NOT trusted");
return;
}
logger.info("Cloudflares IPs are trusted");
internalIpRanges.initTimer();
return internalIpRanges.fetch();
})
.then(() => {
internalCertificate.initTimer();
internalNginx.reload();
const server = app.listen("/run/npmplus.sock", () => {
logger.info(`Backend PID ${process.pid} listening on unix socket...`);
process.on("SIGTERM", () => {
logger.info(`PID ${process.pid} received SIGTERM`);
server.close(() => {
logger.info("Stopping.");
process.exit(0);
});
});
});
})
.catch((err) => {
logger.error(`Startup Error: ${err.message}`, err);
setTimeout(appStart, 1000);
});
}
try {
appStart();
} catch (err) {
logger.fatal(err);
process.exit(1);
}

View File

@@ -1,388 +0,0 @@
import crypto from "node:crypto";
import bcrypt from "bcryptjs";
import { createGuardrails, generateSecret, generateURI, verify } from "otplib";
import errs from "../lib/error.js";
import authModel from "../models/auth.js";
import internalUser from "./user.js";
const APP_NAME = "NPMplus";
const BACKUP_CODE_COUNT = 8;
/**
* Generate backup codes
* @returns {Promise<{plain: string[], hashed: string[]}>}
*/
const generateBackupCodes = async () => {
const plain = [];
const hashed = [];
for (let i = 0; i < BACKUP_CODE_COUNT; i++) {
const code = crypto.randomBytes(4).toString("hex").toUpperCase();
plain.push(code);
const hash = await bcrypt.hash(code, 10);
hashed.push(hash);
}
return { plain, hashed };
};
const internal2fa = {
/**
* Check if user has 2FA enabled
* @param {number} userId
* @returns {Promise<boolean>}
*/
isEnabled: async (userId) => {
const auth = await internal2fa.getUserPasswordAuth(userId);
return auth?.meta?.totp_enabled === true;
},
/**
* Get 2FA status for user
* @param {Access} access
* @param {number} userId
* @returns {Promise<{enabled: boolean, backup_codes_remaining: number}>}
*/
getStatus: async (access, userId) => {
await access.can("users:password", userId);
await internalUser.get(access, { id: userId });
const auth = await internal2fa.getUserPasswordAuth(userId);
const enabled = auth?.meta?.totp_enabled === true;
let backup_codes_remaining = 0;
if (enabled) {
const backupCodes = auth.meta.backup_codes || [];
backup_codes_remaining = backupCodes.length;
}
return {
enabled,
backup_codes_remaining,
};
},
/**
* Start 2FA setup - store pending secret
*
* @param {Access} access
* @param {number} userId
* @returns {Promise<{secret: string, otpauth_url: string}>}
*/
startSetup: async (access, userId) => {
await access.can("users:password", userId);
const user = await internalUser.get(access, { id: userId });
const secret = generateSecret();
const otpauth_url = generateURI({
issuer: APP_NAME,
label: user.email,
secret: secret,
});
const auth = await internal2fa.getUserPasswordAuth(userId);
// ensure user isn't already setup for 2fa
const enabled = auth?.meta?.totp_enabled === true;
if (enabled) {
throw new errs.ValidationError("2FA is already enabled");
}
const meta = auth.meta || {};
meta.totp_pending_secret = secret;
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
return { secret, otpauth_url };
},
/**
* Enable 2FA after verifying code
*
* @param {Access} access
* @param {number} userId
* @param {string} code
* @returns {Promise<{backup_codes: string[]}>}
*/
enable: async (access, userId, code) => {
await access.can("users:password", userId);
await internalUser.get(access, { id: userId });
const auth = await internal2fa.getUserPasswordAuth(userId);
const secret = auth?.meta?.totp_pending_secret || false;
if (!secret) {
throw new errs.ValidationError("No pending 2FA setup found");
}
const codeTrim = code.trim();
const result = await verify({ token: codeTrim, secret });
if (!result.valid) {
throw new errs.ValidationError("Invalid verification code");
}
const { plain, hashed } = await generateBackupCodes();
const meta = {
...auth.meta,
totp_secret: secret,
totp_enabled: true,
totp_enabled_at: new Date().toISOString(),
backup_codes: hashed,
};
delete meta.totp_pending_secret;
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
return { backup_codes: plain };
},
/**
* Disable 2FA
*
* @param {Access} access
* @param {number} userId
* @param {string} code
* @returns {Promise<void>}
*/
disable: async (access, userId, code) => {
await access.can("users:password", userId);
await internalUser.get(access, { id: userId });
const auth = await internal2fa.getUserPasswordAuth(userId);
const enabled = auth?.meta?.totp_enabled === true;
if (!enabled) {
throw new errs.ValidationError("2FA is not enabled");
}
const codeTrim = code.trim();
if (codeTrim.length !== 6 && codeTrim.length !== 8) {
throw new errs.ValidationError("Invalid verification code");
}
// Try TOTP code first, if it's 6 chars. it will throw errors if it's not 6 chars
// and the backup codes are 8 chars.
if (codeTrim.length === 6) {
const result = await verify({
token: codeTrim,
secret: auth.meta.totp_secret,
// These guardrails lower the minimum length requirement for secrets.
// In v12 of otplib the default minimum length is 10 and in v13 it is 16.
// Since there are 2fa secrets in the wild generated with v12 we need to allow shorter secrets
// so people won't be locked out when upgrading.
guardrails: createGuardrails({
MIN_SECRET_BYTES: 10,
}),
});
if (!result.valid) {
throw new errs.ValidationError("Invalid verification code");
}
}
// Try backup codes
if (codeTrim.length === 8) {
const backupCodes = auth?.meta?.backup_codes || [];
let invalid = true;
for (let i = 0; i < backupCodes.length; i++) {
const match = await bcrypt.compare(codeTrim.toUpperCase(), backupCodes[i]);
if (match) {
// Remove used backup code
const updatedCodes = [...backupCodes];
updatedCodes.splice(i, 1);
const meta = { ...auth.meta, backup_codes: updatedCodes };
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
invalid = false;
}
}
if (invalid) {
throw new errs.ValidationError("Invalid verification code");
}
}
const meta = { ...auth.meta };
delete meta.totp_secret;
delete meta.totp_enabled;
delete meta.totp_enabled_at;
delete meta.backup_codes;
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
},
/**
* Verify 2FA code for login
*
* @param {number} userId
* @param {string} token
* @returns {Promise<boolean>}
*/
verifyForLogin: async (userId, token) => {
const auth = await internal2fa.getUserPasswordAuth(userId);
const secret = auth?.meta?.totp_secret || false;
if (!secret) {
return false;
}
const tokenTrim = token.trim();
// Try TOTP code first, if it's 6 chars. it will throw errors if it's not 6 chars
// and the backup codes are 8 chars.
if (tokenTrim.length === 6) {
const result = await verify({
token: tokenTrim,
secret,
// These guardrails lower the minimum length requirement for secrets.
// In v12 of otplib the default minimum length is 10 and in v13 it is 16.
// Since there are 2fa secrets in the wild generated with v12 we need to allow shorter secrets
// so people won't be locked out when upgrading.
guardrails: createGuardrails({
MIN_SECRET_BYTES: 10,
}),
});
return result.valid;
}
// Try backup codes
if (tokenTrim.length === 8) {
const backupCodes = auth?.meta?.backup_codes || [];
for (let i = 0; i < backupCodes.length; i++) {
const match = await bcrypt.compare(tokenTrim.toUpperCase(), backupCodes[i]);
if (match) {
// Remove used backup code
const updatedCodes = [...backupCodes];
updatedCodes.splice(i, 1);
const meta = { ...auth.meta, backup_codes: updatedCodes };
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
return true;
}
}
}
return false;
},
/**
* Regenerate backup codes
*
* @param {Access} access
* @param {number} userId
* @param {string} token
* @returns {Promise<{backup_codes: string[]}>}
*/
regenerateBackupCodes: async (access, userId, token) => {
await access.can("users:password", userId);
await internalUser.get(access, { id: userId });
const auth = await internal2fa.getUserPasswordAuth(userId);
const enabled = auth?.meta?.totp_enabled === true;
const secret = auth?.meta?.totp_secret || false;
if (!enabled) {
throw new errs.ValidationError("2FA is not enabled");
}
if (!secret) {
throw new errs.ValidationError("No 2FA secret found");
}
const tokenTrim = token.trim();
if (tokenTrim.length !== 6 && tokenTrim.length !== 8) {
throw new errs.ValidationError("Invalid verification code");
}
// Try TOTP code first, if it's 6 chars. it will throw errors if it's not 6 chars
// and the backup codes are 8 chars.
if (tokenTrim.length === 6) {
const result = await verify({
token: tokenTrim,
secret,
// These guardrails lower the minimum length requirement for secrets.
// In v12 of otplib the default minimum length is 10 and in v13 it is 16.
// Since there are 2fa secrets in the wild generated with v12 we need to allow shorter secrets
// so people won't be locked out when upgrading.
guardrails: createGuardrails({
MIN_SECRET_BYTES: 10,
}),
});
if (!result.valid) {
throw new errs.ValidationError("Invalid verification code");
}
}
// Try backup codes
if (tokenTrim.length === 8) {
const backupCodes = auth?.meta?.backup_codes || [];
let invalid = true;
for (let i = 0; i < backupCodes.length; i++) {
const match = await bcrypt.compare(tokenTrim.toUpperCase(), backupCodes[i]);
if (match) {
// Remove used backup code
const updatedCodes = [...backupCodes];
updatedCodes.splice(i, 1);
const meta = { ...auth.meta, backup_codes: updatedCodes };
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
invalid = false;
}
}
if (invalid) {
throw new errs.ValidationError("Invalid verification code");
}
}
const { plain, hashed } = await generateBackupCodes();
const meta = { ...auth.meta, backup_codes: hashed };
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
return { backup_codes: plain };
},
getUserPasswordAuth: async (userId) => {
const auth = await authModel.query().where("user_id", userId).andWhere("type", "password").first();
if (!auth) {
throw new errs.ItemNotFoundError("Auth not found");
}
return auth;
},
};
export default internal2fa;

View File

@@ -1,450 +0,0 @@
import fs from "node:fs";
import bcrypt from "bcryptjs";
import _ from "lodash";
import errs from "../lib/error.js";
import utils from "../lib/utils.js";
import { access as logger } from "../logger.js";
import accessListModel from "../models/access_list.js";
import accessListAuthModel from "../models/access_list_auth.js";
import accessListClientModel from "../models/access_list_client.js";
import proxyHostModel from "../models/proxy_host.js";
import internalAuditLog from "./audit-log.js";
import internalNginx from "./nginx.js";
const omissions = () => {
return ["is_deleted"];
};
const internalAccessList = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: async (access, data) => {
await access.can("access_lists:create", data);
const row = await accessListModel
.query()
.insertAndFetch({
name: data.name,
satisfy_any: data.satisfy_any,
pass_auth: data.pass_auth,
owner_user_id: access.token.getUserId(1),
})
.then(utils.omitRow(omissions()));
data.id = row.id;
const promises = [];
// Items
data.items.map((item) => {
promises.push(
accessListAuthModel.query().insert({
access_list_id: row.id,
username: item.username,
password: bcrypt.hashSync(item.password, 6),
}),
);
return true;
});
// Clients
data.clients?.map((client) => {
promises.push(
accessListClientModel.query().insert({
access_list_id: row.id,
address: client.address,
directive: client.directive,
}),
);
return true;
});
await Promise.all(promises);
// re-fetch with expansions
const freshRow = await internalAccessList.get(
access,
{
id: data.id,
expand: ["owner", "items", "clients", "proxy_hosts.access_list.[clients,items]"],
},
true, // skip masking
);
// Audit log
data.meta = _.assign({}, data.meta || {}, freshRow.meta);
await internalAccessList.build(freshRow);
if (Number.parseInt(freshRow.proxy_host_count, 10)) {
await internalNginx.bulkGenerateConfigs(proxyHostModel, "proxy_host", freshRow.proxy_hosts);
}
// Add to audit log
await internalAuditLog.add(access, {
action: "created",
object_type: "access-list",
object_id: freshRow.id,
meta: internalAccessList.maskItems(data),
});
return internalAccessList.maskItems(freshRow);
},
/**
* @param {Access} access
* @param {Object} data
* @param {Integer} data.id
* @param {String} [data.name]
* @param {String} [data.items]
* @return {Promise}
*/
update: async (access, data) => {
await access.can("access_lists:update", data.id);
const row = await internalAccessList.get(access, { id: data.id });
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Access List could not be updated, IDs do not match: ${row.id} !== ${data.id}`,
);
}
// patch name if specified
if (typeof data.name !== "undefined" && data.name) {
await accessListModel.query().where({ id: data.id }).patch({
name: data.name,
satisfy_any: data.satisfy_any,
pass_auth: data.pass_auth,
});
}
// Check for items and add/update/remove them
if (typeof data.items !== "undefined" && data.items) {
const promises = [];
const itemsToKeep = [];
data.items.map((item) => {
if (item.password) {
promises.push(
accessListAuthModel.query().insert({
access_list_id: data.id,
username: item.username,
password: bcrypt.hashSync(item.password, 6),
}),
);
} else {
// This was supplied with an empty password, which means keep it but don't change the password
itemsToKeep.push(item.username);
}
return true;
});
const query = accessListAuthModel.query().delete().where("access_list_id", data.id);
if (itemsToKeep.length) {
query.andWhere("username", "NOT IN", itemsToKeep);
}
await query;
// Add new items
if (promises.length) {
await Promise.all(promises);
}
}
// Check for clients and add/update/remove them
if (typeof data.clients !== "undefined" && data.clients) {
const clientPromises = [];
data.clients.map((client) => {
if (client.address) {
clientPromises.push(
accessListClientModel.query().insert({
access_list_id: data.id,
address: client.address,
directive: client.directive,
}),
);
}
return true;
});
const query = accessListClientModel.query().delete().where("access_list_id", data.id);
await query;
// Add new clitens
if (clientPromises.length) {
await Promise.all(clientPromises);
}
}
// Add to audit log
await internalAuditLog.add(access, {
action: "updated",
object_type: "access-list",
object_id: data.id,
meta: internalAccessList.maskItems(data),
});
// re-fetch with expansions
const freshRow = await internalAccessList.get(
access,
{
id: data.id,
expand: ["owner", "items", "clients", "proxy_hosts.[certificate,access_list.[clients,items]]"],
},
true, // skip masking
);
await internalAccessList.build(freshRow);
if (Number.parseInt(freshRow.proxy_host_count, 10)) {
await internalNginx.bulkGenerateConfigs(proxyHostModel, "proxy_host", freshRow.proxy_hosts);
}
await internalNginx.reload();
return internalAccessList.maskItems(freshRow);
},
/**
* @param {Access} access
* @param {Object} data
* @param {Integer} data.id
* @param {Array} [data.expand]
* @param {Array} [data.omit]
* @param {Boolean} [skipMasking]
* @return {Promise}
*/
get: async (access, data, skipMasking) => {
const thisData = data || {};
const accessData = await access.can("access_lists:get", thisData.id);
const query = accessListModel
.query()
.select("access_list.*", accessListModel.raw("COUNT(proxy_host.id) as proxy_host_count"))
.leftJoin("proxy_host", function () {
this.on("proxy_host.access_list_id", "=", "access_list.id").andOn("proxy_host.is_deleted", "=", 0);
})
.where("access_list.is_deleted", 0)
.andWhere("access_list.id", thisData.id)
.groupBy("access_list.id")
.allowGraph("[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]")
.first();
if (accessData.permission_visibility !== "all") {
query.andWhere("access_list.owner_user_id", access.token.getUserId(1));
}
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
}
let row = await query.then(utils.omitRow(omissions()));
if (!row || !row.id) {
throw new errs.ItemNotFoundError(thisData.id);
}
if (!skipMasking && typeof row.items !== "undefined" && row.items) {
row = internalAccessList.maskItems(row);
}
// Custom omissions
if (typeof data.omit !== "undefined" && data.omit !== null) {
row = _.omit(row, data.omit);
}
return row;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Integer} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: async (access, data) => {
await access.can("access_lists:delete", data.id);
const row = await internalAccessList.get(access, {
id: data.id,
expand: ["proxy_hosts", "items", "clients"],
});
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
// 1. update row to be deleted
// 2. update any proxy hosts that were using it (ignoring permissions)
// 3. reconfigure those hosts
// 4. audit log
// 1. update row to be deleted
await accessListModel.query().where("id", row.id).patch({
is_deleted: 1,
});
// 2. update any proxy hosts that were using it (ignoring permissions)
if (row.proxy_hosts) {
await proxyHostModel.query().where("access_list_id", "=", row.id).patch({ access_list_id: 0 });
// 3. reconfigure those hosts, then reload nginx
// set the access_list_id to zero for these items
row.proxy_hosts.map((_val, idx) => {
row.proxy_hosts[idx].access_list_id = 0;
return true;
});
await internalNginx.bulkGenerateConfigs(proxyHostModel, "proxy_host", row.proxy_hosts);
}
await internalNginx.reload();
// delete the htpasswd file
try {
fs.unlinkSync(internalAccessList.getFilename(row));
} catch (_err) {
// do nothing
}
// 4. audit log
await internalAuditLog.add(access, {
action: "deleted",
object_type: "access-list",
object_id: row.id,
meta: _.omit(internalAccessList.maskItems(row), ["is_deleted", "proxy_hosts"]),
});
return true;
},
/**
* All Lists
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [searchQuery]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
const accessData = await access.can("access_lists:list");
const query = accessListModel
.query()
.select("access_list.*", accessListModel.raw("COUNT(proxy_host.id) as proxy_host_count"))
.leftJoin("proxy_host", function () {
this.on("proxy_host.access_list_id", "=", "access_list.id").andOn("proxy_host.is_deleted", "=", 0);
})
.where("access_list.is_deleted", 0)
.groupBy("access_list.id")
.allowGraph("[owner,items,clients]")
.orderBy("access_list.name", "ASC");
if (accessData.permission_visibility !== "all") {
query.andWhere("access_list.owner_user_id", access.token.getUserId(1));
}
// Query is used for searching
if (typeof searchQuery === "string") {
query.where(function () {
this.where("name", "like", `%${searchQuery}%`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const rows = await query.then(utils.omitRows(omissions()));
if (rows) {
rows.map((row, idx) => {
if (typeof row.items !== "undefined" && row.items) {
rows[idx] = internalAccessList.maskItems(row);
}
return true;
});
}
return rows;
},
/**
* Count is used in reports
*
* @param {Integer} userId
* @param {String} visibility
* @returns {Promise}
*/
getCount: async (userId, visibility) => {
const query = accessListModel.query().count("id as count").where("is_deleted", 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", userId);
}
const row = await query.first();
return Number.parseInt(row.count, 10);
},
/**
* @param {Object} list
* @returns {Object}
*/
maskItems: (list) => {
if (list && typeof list.items !== "undefined") {
list.items.map((val, idx) => {
let repeatFor = 8;
let firstChar = "*";
if (typeof val.password !== "undefined" && val.password) {
repeatFor = val.password.length - 1;
firstChar = val.password.charAt(0);
}
list.items[idx].hint = firstChar + "*".repeat(repeatFor);
list.items[idx].password = "";
return true;
});
}
return list;
},
/**
* @param {Object} list
* @param {Integer} list.id
* @returns {String}
*/
getFilename: (list) => {
return `/data/access/${list.id}`;
},
/**
* @param {Object} list
* @param {Integer} list.id
* @param {String} list.name
* @param {Array} list.items
* @returns {Promise}
*/
build: async (list) => {
logger.info(`Building Access file #${list.id} for: ${list.name}`);
const htpasswdFile = internalAccessList.getFilename(list);
fs.rmSync(htpasswdFile, { force: true });
fs.writeFileSync(htpasswdFile, "", { encoding: "utf8" });
if (list.items?.length) {
for (const item of list.items) {
if (item.username?.length && item.password?.length) {
logger.info(`Adding: ${item.username}`);
try {
fs.appendFileSync(htpasswdFile, `${item.username}:${item.password}\n`, {
encoding: "utf8",
});
} catch (err) {
logger.error(err);
throw err;
}
}
}
}
logger.success(`Built Access file #${list.id} for: ${list.name}`);
},
};
export default internalAccessList;

View File

@@ -1,97 +0,0 @@
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import auditLogModel from "../models/audit-log.js";
const internalAuditLog = {
/**
* All logs
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [searchQuery]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
await access.can("auditlog:list");
const query = auditLogModel
.query()
.orderBy("created_on", "DESC")
.orderBy("id", "DESC")
.limit(100)
.allowGraph("[user]");
// Query is used for searching
if (typeof searchQuery === "string" && searchQuery.length > 0) {
query.where(function () {
this.where(castJsonIfNeed("meta"), "like", `%${searchQuery}`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
return await query;
},
/**
* @param {Access} access
* @param {Object} [data]
* @param {Integer} [data.id] Defaults to the token user
* @param {Array} [data.expand]
* @return {Promise}
*/
get: async (access, data) => {
await access.can("auditlog:list");
const query = auditLogModel.query().andWhere("id", data.id).allowGraph("[user]").first();
if (typeof data.expand !== "undefined" && data.expand !== null) {
query.withGraphFetched(`[${data.expand.join(", ")}]`);
}
const row = await query;
if (!row?.id) {
throw new errs.ItemNotFoundError(data.id);
}
return row;
},
/**
* This method should not be publicly used, it doesn't check certain things. It will be assumed
* that permission to add to audit log is already considered, however the access token is used for
* default user id determination.
*
* @param {Access} access
* @param {Object} data
* @param {String} data.action
* @param {Number} [data.user_id]
* @param {Number} [data.object_id]
* @param {Number} [data.object_type]
* @param {Object} [data.meta]
* @returns {Promise}
*/
add: async (access, data) => {
if (typeof data.user_id === "undefined" || !data.user_id) {
data.user_id = access.token.getUserId(1);
}
if (typeof data.action === "undefined" || !data.action) {
throw new errs.InternalValidationError("Audit log entry must contain an Action");
}
// Make sure at least 1 of the IDs are set and action
return await auditLogModel.query().insert({
user_id: data.user_id,
action: data.action,
object_type: data.object_type || "",
object_id: data.object_id || 0,
meta: data.meta || {},
});
},
};
export default internalAuditLog;

View File

@@ -1,969 +0,0 @@
import { createPrivateKey, X509Certificate } from "node:crypto";
import { mkdir, readFile, rm, writeFile } from "node:fs/promises";
import fs from "node:fs";
import path from "node:path";
import { domainToASCII } from "node:url";
import archiver from "archiver";
import dayjs from "dayjs";
import _ from "lodash";
import dnsPlugins from "../certbot/dns-plugins.json" with { type: "json" };
import { installPlugin } from "../lib/certbot.js";
import error from "../lib/error.js";
import utils from "../lib/utils.js";
import { debug, ssl as logger } from "../logger.js";
import certificateModel from "../models/certificate.js";
import internalAuditLog from "./audit-log.js";
import internalNginx from "./nginx.js";
import pjson from "../package.json" with { type: "json" };
const omissions = () => {
return ["is_deleted", "owner.is_deleted", "meta.dns_provider_credentials"];
};
const internalCertificate = {
allowedSslFiles: ["certificate", "certificate_key"],
intervalTimeout: 1000 * 60 * 60 * Number.parseInt(process.env.CRT, 10),
interval: null,
intervalProcessing: false,
initTimer: () => {
logger.info("Certbot Renewal Timer initialized");
internalCertificate.interval = setInterval(
internalCertificate.processExpiringHosts,
internalCertificate.intervalTimeout,
);
// And do this now as well
internalCertificate.processExpiringHosts();
},
/**
* Triggered by a timer, this will check for expiring hosts and renew their tls certs if required
*/
processExpiringHosts: async () => {
if (internalCertificate.intervalProcessing) {
return;
}
internalCertificate.intervalProcessing = true;
logger.info("Renewing Certbot TLS certs close to expiry...");
try {
try {
const result = await utils.execFile("certbot", [
"--config",
"/etc/certbot.ini",
"renew",
"--server",
process.env.ACME_SERVER,
"--quiet",
]);
if (result) logger.info(`Renew Result: ${result}`);
} catch (err) {
logger.warn(`Certbot completed with errors: ${err}`);
}
try {
await internalNginx.reload();
} catch (err) {
logger.error(err);
}
const certificates = await certificateModel
.query()
.where("is_deleted", 0)
.andWhere("provider", "letsencrypt");
if (certificates && certificates.length > 0) {
const updatePromises = certificates.map(async (certificate) => {
try {
const certInfo = await internalCertificate.getCertificateInfoFromFile(
`${internalCertificate.getLiveCertPath(certificate.id)}/fullchain.pem`,
);
await certificateModel
.query()
.where("id", certificate.id)
.andWhere("provider", "letsencrypt")
.patch({
expires_on: dayjs.unix(certInfo.dates.to).format("YYYY-MM-DD HH:mm:ss"),
});
} catch (err) {
// Don't want to stop the train here, just log the error
logger.error(err);
}
});
await Promise.all(updatePromises);
logger.info("Renew Complete");
}
} catch (err) {
logger.error(err);
} finally {
internalCertificate.intervalProcessing = false;
}
},
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: async (access, data) => {
await access.can("certificates:create", data);
data.owner_user_id = access.token.getUserId(1);
if (data.provider === "letsencrypt") {
data.nice_name = data.domain_names.join(", ");
}
// this command really should clean up and delete the cert if it can't fully succeed
const certificate = await certificateModel.query().insertAndFetch(data);
try {
if (certificate.provider === "letsencrypt") {
// Request a new Cert with Certbot. Let the fun begin.
if (certificate.meta?.dns_challenge) {
await internalCertificate.requestCertbotWithDnsChallenge(certificate);
} else {
await internalCertificate.requestCertbot(certificate);
}
// At this point, the letsencrypt cert should exist on disk.
// Lets get the expiry date from the file and update the row silently
try {
const certInfo = await internalCertificate.getCertificateInfoFromFile(
`${internalCertificate.getLiveCertPath(certificate.id)}/fullchain.pem`,
);
const savedRow = await certificateModel
.query()
.patchAndFetchById(certificate.id, {
expires_on: dayjs.unix(certInfo.dates.to).format("YYYY-MM-DD HH:mm:ss"),
})
.then(utils.omitRow(omissions()));
// Add cert data for audit log
savedRow.meta = _.assign({}, savedRow.meta, {
letsencrypt_certificate: certInfo,
});
await internalCertificate.addCreatedAuditLog(access, certificate.id, savedRow);
return savedRow;
} catch (err) {
// Delete the certificate from the database if it was not created successfully
await certificateModel.query().deleteById(certificate.id);
throw err;
}
}
} catch (err) {
// Delete the certificate here. This is a hard delete, since it never existed properly
await certificateModel.query().deleteById(certificate.id);
throw err;
}
data.meta = _.assign({}, data.meta || {}, certificate.meta);
// Add to audit log
await internalCertificate.addCreatedAuditLog(access, certificate.id, utils.omitRow(omissions())(data));
return utils.omitRow(omissions())(certificate);
},
addCreatedAuditLog: async (access, certificate_id, meta) => {
await internalAuditLog.add(access, {
action: "created",
object_type: "certificate",
object_id: certificate_id,
meta: meta,
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.email]
* @param {String} [data.name]
* @return {Promise}
*/
update: async (access, data) => {
await access.can("certificates:update", data.id);
const row = await internalCertificate.get(access, { id: data.id });
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new error.InternalValidationError(
`Certificate could not be updated, IDs do not match: ${row.id} !== ${data.id}`,
);
}
const savedRow = await certificateModel
.query()
.patchAndFetchById(row.id, data)
.then(utils.omitRow(omissions()));
savedRow.meta = internalCertificate.cleanMeta(savedRow.meta);
data.meta = internalCertificate.cleanMeta(data.meta);
// Add row.nice_name for custom certs
if (savedRow.provider === "other") {
data.nice_name = savedRow.nice_name;
}
// Add to audit log
await internalAuditLog.add(access, {
action: "updated",
object_type: "certificate",
object_id: row.id,
meta: _.omit(data, ["expires_on"]), // this prevents json circular reference because expires_on might be raw
});
return savedRow;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {Array} [data.expand]
* @param {Array} [data.omit]
* @return {Promise}
*/
get: async (access, data) => {
const accessData = await access.can("certificates:get", data.id);
const query = certificateModel
.query()
.where("is_deleted", 0)
.andWhere("id", data.id)
.allowGraph("[owner,proxy_hosts,redirection_hosts,dead_hosts,streams]")
.first();
if (accessData.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
if (typeof data.expand !== "undefined" && data.expand !== null) {
query.withGraphFetched(`[${data.expand.join(", ")}]`);
}
const row = await query.then(utils.omitRow(omissions()));
if (!row || !row.id) {
throw new error.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof data.omit !== "undefined" && data.omit !== null) {
return _.omit(row, [...data.omit]);
}
return internalCertificate.cleanExpansions(row);
},
cleanExpansions: (row) => {
if (typeof row.proxy_hosts !== "undefined") {
row.proxy_hosts = utils.omitRows(["is_deleted"])(row.proxy_hosts);
}
if (typeof row.redirection_hosts !== "undefined") {
row.redirection_hosts = utils.omitRows(["is_deleted"])(row.redirection_hosts);
}
if (typeof row.dead_hosts !== "undefined") {
row.dead_hosts = utils.omitRows(["is_deleted"])(row.dead_hosts);
}
if (typeof row.streams !== "undefined") {
row.streams = utils.omitRows(["is_deleted"])(row.streams);
}
return row;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @returns {Promise}
*/
download: async (access, data) => {
await access.can("certificates:get", data);
const certificate = await internalCertificate.get(access, data);
if (certificate.provider === "letsencrypt") {
const zipDirectory = internalCertificate.getLiveCertPath(data.id);
if (!fs.existsSync(zipDirectory)) {
throw new error.ItemNotFoundError(`Certificate ${certificate.nice_name} does not exists`);
}
const certFiles = fs
.readdirSync(zipDirectory)
.filter((fn) => fn.endsWith(".pem"))
.map((fn) => fs.realpathSync(path.join(zipDirectory, fn)));
const downloadName = `npm-${data.id}-${Date.now()}.zip`;
const opName = `/tmp/${downloadName}`;
await internalCertificate.zipFiles(certFiles, opName);
debug(logger, "zip completed : ", opName);
return {
fileName: opName,
};
}
throw new error.ValidationError("Only Certbot certificates can be downloaded");
},
/**
* @param {String} source
* @param {String} out
* @returns {Promise}
*/
zipFiles: async (source, out) => {
const archive = archiver("zip", { zlib: { level: 9 } });
const stream = fs.createWriteStream(out);
return new Promise((resolve, reject) => {
source.map((fl) => {
const fileName = path.basename(fl);
debug(logger, fl, "added to certificate zip");
archive.file(fl, { name: fileName });
return true;
});
archive.on("error", (err) => reject(err)).pipe(stream);
stream.on("close", () => resolve());
archive.finalize();
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: async (access, data) => {
await access.can("certificates:delete", data.id);
const row = await internalCertificate.get(access, { id: data.id });
if (!row || !row.id) {
throw new error.ItemNotFoundError(data.id);
}
await certificateModel.query().where("id", row.id).patch({
is_deleted: 1,
});
// Add to audit log
row.meta = internalCertificate.cleanMeta(row.meta);
await internalAuditLog.add(access, {
action: "deleted",
object_type: "certificate",
object_id: row.id,
meta: _.omit(row, omissions()),
});
if (row.provider === "letsencrypt") {
// Revoke the cert
await internalCertificate.revokeCertbot(row);
} else {
await rm(`/data/tls/custom/npm-${row.id}`, { force: true, recursive: true });
await rm(`/data/tls/custom/npm-${row.id}.der`, { force: true });
}
return true;
},
/**
* All Certs
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [searchQuery]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
const accessData = await access.can("certificates:list");
const query = certificateModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph("[owner,proxy_hosts,redirection_hosts,dead_hosts,streams]")
.orderBy("nice_name", "ASC");
if (accessData.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
// Query is used for searching
if (typeof searchQuery === "string") {
query.where(function () {
this.where("nice_name", "like", `%${searchQuery}%`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const r = await query.then(utils.omitRows(omissions()));
for (let i = 0; i < r.length; i++) {
r[i] = internalCertificate.cleanExpansions(r[i]);
}
return r;
},
/**
* Report use
*
* @param {Number} userId
* @param {String} visibility
* @returns {Promise}
*/
getCount: async (userId, visibility) => {
const query = certificateModel.query().count("id as count").where("is_deleted", 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", userId);
}
const row = await query.first();
return Number.parseInt(row.count, 10);
},
/**
* @param {Object} certificate
* @returns {Promise}
*/
writeCustomCert: async (certificate) => {
if (certificate.provider === "letsencrypt") {
throw new Error("Refusing to write certbot certs here");
}
logger.info("Writing Custom Certificate:", certificate.id);
const dir = `/data/tls/custom/npm-${certificate.id}`;
await mkdir(dir, { recursive: true });
await writeFile(`${dir}/fullchain.pem`, certificate.meta.certificate);
await writeFile(`${dir}/privkey.pem`, certificate.meta.certificate_key);
},
/**
* @param {Access} access
* @param {Object} data
* @param {Array} data.domain_names
* @returns {Promise}
*/
createQuickCertificate: async (access, data) => {
return await internalCertificate.create(access, {
provider: "letsencrypt",
domain_names: data.domain_names,
meta: data.meta,
});
},
/**
* Validates that the certs provided are good.
* No access required here, nothing is changed or stored.
*
* @param {Object} data
* @param {Object} data.files
* @returns {Promise}
*/
validate: async (data) => {
const finalData = {};
for (const [name, file] of Object.entries(data.files)) {
if (internalCertificate.allowedSslFiles.includes(name)) {
const content = file.data.toString();
let res;
if (name === "certificate_key") {
res = await internalCertificate.checkPrivateKey(content);
} else {
res = await internalCertificate.getCertificateInfo(content, true);
}
finalData[name] = res;
}
}
return finalData;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {Object} data.files
* @returns {Promise}
*/
upload: async (access, data) => {
const row = await internalCertificate.get(access, { id: data.id });
if (row.provider !== "other") {
throw new error.ValidationError("Cannot upload certificates for this type of provider");
}
const validations = await internalCertificate.validate(data);
if (typeof validations.certificate === "undefined" || typeof validations.certificate_key === "undefined") {
throw new error.ValidationError("Certificate and Certificate Key files were not provided");
}
const certs = {};
_.map(data.files, (file, name) => {
if (internalCertificate.allowedSslFiles.indexOf(name) !== -1) {
certs[name] = file.data.toString();
}
});
const certificate = await internalCertificate.update(access, {
id: data.id,
expires_on: dayjs.unix(validations.certificate.dates.to).format("YYYY-MM-DD HH:mm:ss"),
domain_names: validations.certificate.cn,
meta: _.clone(row.meta), // Prevent the update method from changing this value that we'll use later
});
certificate.meta = _.assign({}, row.meta, certs);
await internalCertificate.writeCustomCert(certificate);
return _.omit(certificate.meta, internalCertificate.allowedSslFiles);
},
/**
* Uses the openssl command to validate the private key.
* It will save the file to disk first, then run commands on it, then delete the file.
*
* @param {String} privateKey This is the entire key contents as a string
*/
checkPrivateKey: async (privateKey) => {
try {
createPrivateKey(privateKey);
return true;
} catch (err) {
throw new error.ValidationError(`Certificate Key is not valid (${err.message})`, err);
}
},
/**
* Uses the openssl command to both validate and get info out of the certificate.
* It will save the file to disk first, then run commands on it, then delete the file.
*
* @param {String} certificate This is the entire cert contents as a string
* @param {Boolean} [throwExpired] Throw when the certificate is out of date
*/
getCertificateInfo: async (certificate, throwExpired) => {
const certData = {};
try {
const cert = new X509Certificate(certificate);
if (cert.subjectAltName) {
certData.cn = cert.subjectAltName.split(", ").map((entry) => {
const firstColonIdx = entry.indexOf(":");
return firstColonIdx === -1 ? entry.trim() : entry.substring(firstColonIdx + 1).trim();
});
} else {
const cnMatch = /\bCN=([^\n]+)/i.exec(cert.subject);
if (cnMatch?.[1]) {
certData.cn = [cnMatch[1].trim()];
} else {
certData.cn = [];
}
}
if (cert.issuer) {
certData.issuer = cert.issuer.replace(/\n/g, ", ");
}
const validFrom = Math.floor(new Date(cert.validFrom).getTime() / 1000);
const validTo = Math.floor(new Date(cert.validTo).getTime() / 1000);
if (Number.isNaN(validFrom) || Number.isNaN(validTo)) {
throw new error.ValidationError("Could not determine dates from certificate");
}
const now = Math.floor(Date.now() / 1000);
if (throwExpired && validTo < now) {
throw new error.ValidationError("Certificate has expired");
}
certData.dates = {
from: validFrom,
to: validTo,
};
return certData;
} catch (err) {
throw new error.ValidationError(`Certificate is not valid (${err.message})`, err);
}
},
/**
* Uses the openssl command to both validate and get info out of the certificate.
* It will save the file to disk first, then run commands on it, then delete the file.
*
* @param {String} certificateFile The file location on disk
* @param {Boolean} [throwExpired] Throw when the certificate is out of date
*/
getCertificateInfoFromFile: async (certificateFile, throwExpired) => {
const certContent = await readFile(certificateFile);
return internalCertificate.getCertificateInfo(certContent, throwExpired);
},
/**
* Cleans the tls keys from the meta object and sets them
* @param {String} email the email address to use for registration to "true"
*
* @param {Object} meta
* @param {Boolean} [remove]
* @returns {Object}
*/
cleanMeta: (meta, remove) => {
internalCertificate.allowedSslFiles.map((key) => {
if (typeof meta[key] !== "undefined" && meta[key]) {
if (remove) {
delete meta[key];
} else {
meta[key] = true;
}
}
return true;
});
return meta;
},
/**
* Request a certificate using the http challenge
* @param {Object} certificate the certificate row
* @returns {Promise}
*/
requestCertbot: async (certificate) => {
logger.info(
`Requesting Certbot certificates for Cert #${certificate.id}: ${certificate.domain_names.join(", ")}`,
);
const result = await utils.execFile("certbot", [
"--config",
"/etc/certbot.ini",
"certonly",
"--server",
process.env.ACME_SERVER,
"--cert-name",
`npm-${certificate.id}`,
"--domains",
certificate.domain_names.map((domain_name) => domainToASCII(domain_name)).join(","),
...(certificate.meta.reuse_key ? ["--reuse-key"] : ["--no-reuse-key"]),
"--authenticator",
"webroot",
]);
logger.success(result);
return result;
},
/**
* @param {Object} certificate the certificate row
* @returns {Promise}
*/
requestCertbotWithDnsChallenge: async (certificate) => {
const dnsPlugin = dnsPlugins[certificate.meta.dns_provider];
if (!dnsPlugin) {
throw Error(`Unknown DNS provider '${certificate.meta.dns_provider}'`);
}
await installPlugin(certificate.meta.dns_provider);
logger.info(
`Requesting Certbot certificates via ${dnsPlugin.name} for Cert #${certificate.id}: ${certificate.domain_names.join(", ")}`,
);
const credentialsLocation = `/tmp/certbot-credentials/credentials-${certificate.id}`;
await writeFile(credentialsLocation, certificate.meta.dns_provider_credentials, { mode: 0o600 });
try {
const result = await utils.execFile("certbot", [
"--config",
"/etc/certbot.ini",
"certonly",
"--server",
process.env.ACME_SERVER,
"--cert-name",
`npm-${certificate.id}`,
"--domains",
certificate.domain_names.map((domain_name) => domainToASCII(domain_name)).join(","),
...(certificate.meta.reuse_key ? ["--reuse-key"] : ["--no-reuse-key"]),
"--authenticator",
dnsPlugin.full_plugin_name,
`--${dnsPlugin.full_plugin_name}-credentials`,
credentialsLocation,
...(certificate.meta.propagation_seconds !== undefined
? [`--${dnsPlugin.full_plugin_name}-propagation-seconds`]
: []),
...(certificate.meta.propagation_seconds !== undefined ? [certificate.meta.propagation_seconds] : []),
]);
logger.info(result);
return result;
} catch (err) {
await rm(credentialsLocation, { force: true });
throw err;
}
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @returns {Promise}
*/
renew: async (access, data) => {
await access.can("certificates:update", data);
const certificate = await internalCertificate.get(access, data);
if (certificate.provider === "letsencrypt") {
const renewMethod = certificate.meta.dns_challenge
? internalCertificate.renewCertbotWithDnsChallenge
: internalCertificate.renewCertbot;
await renewMethod(certificate);
const certInfo = await internalCertificate.getCertificateInfoFromFile(
`${internalCertificate.getLiveCertPath(certificate.id)}/fullchain.pem`,
);
const updatedCertificate = await certificateModel.query().patchAndFetchById(certificate.id, {
expires_on: dayjs.unix(certInfo.dates.to).format("YYYY-MM-DD HH:mm:ss"),
});
// Add to audit log
await internalAuditLog.add(access, {
action: "renewed",
object_type: "certificate",
object_id: updatedCertificate.id,
meta: updatedCertificate,
});
return updatedCertificate;
}
throw new error.ValidationError("Only Certbot certificates can be renewed");
},
/**
* @param {Object} certificate the certificate row
* @returns {Promise}
*/
renewCertbot: async (certificate) => {
logger.info(
`Renewing Certbot certificates for Cert #${certificate.id}: ${certificate.domain_names.join(", ")}`,
);
try {
const revokeResult = await utils.execFile("certbot", [
"--config",
"/etc/certbot.ini",
"revoke",
"--cert-name",
`npm-${certificate.id}`,
"--reason",
"superseded",
"--no-delete-after-revoke",
]);
logger.info(revokeResult);
} catch {
// do nothing
}
const renewResult = await utils.execFile("certbot", [
"--config",
"/etc/certbot.ini",
"renew",
"--server",
process.env.ACME_SERVER,
"--cert-name",
`npm-${certificate.id}`,
"--new-key",
"--force-renewal",
]);
logger.info(renewResult);
return renewResult;
},
/**
* @param {Object} certificate the certificate row
* @returns {Promise}
*/
renewCertbotWithDnsChallenge: async (certificate) => {
const dnsPlugin = dnsPlugins[certificate.meta.dns_provider];
if (!dnsPlugin) {
throw Error(`Unknown DNS provider '${certificate.meta.dns_provider}'`);
}
logger.info(
`Renewing Certbot certificates via ${dnsPlugin.name} for Cert #${certificate.id}: ${certificate.domain_names.join(", ")}`,
);
try {
const revokeResult = await utils.execFile("certbot", [
"--config",
"/etc/certbot.ini",
"revoke",
"--cert-name",
`npm-${certificate.id}`,
"--reason",
"superseded",
"--no-delete-after-revoke",
]);
logger.info(revokeResult);
} catch {
// do nothing
}
const renewResult = await utils.execFile("certbot", [
"--config",
"/etc/certbot.ini",
"renew",
"--server",
process.env.ACME_SERVER,
"--cert-name",
`npm-${certificate.id}`,
"--new-key",
"--force-renewal",
]);
logger.info(renewResult);
return renewResult;
},
/**
* @param {Object} certificate the certificate row
* @param {Boolean} [throwErrors]
* @returns {Promise}
*/
revokeCertbot: async (certificate, throwErrors) => {
logger.info(
`Revoking Certbot certificates for Cert #${certificate.id}: ${certificate.domain_names.join(", ")}`,
);
try {
const result = await utils.execFile("certbot", [
"--config",
"/etc/certbot.ini",
"revoke",
"--cert-name",
`npm-${certificate.id}`,
"--reason",
"unspecified",
"--delete-after-revoke",
]);
await rm(`/data/tls/certbot/live/npm-${certificate.id}.der`, { force: true });
logger.info(result);
return result;
} catch (err) {
logger.error(err.message);
if (throwErrors) {
throw err;
}
}
},
/**
*
* @param {Object} payload
* @param {string[]} payload.domains
* @returns
*/
testHttpsChallenge: async (access, payload) => {
await access.can("certificates:list");
// Create a test challenge file
const testChallengeDir = "/data/tls/certbot/acme-challenge/.well-known/acme-challenge";
const testChallengeFile = `${testChallengeDir}/test-challenge`;
fs.mkdirSync(testChallengeDir, { recursive: true });
await writeFile(testChallengeFile, "Success", { encoding: "utf8" });
const results = [];
for (const domain of payload.domains) {
results.push({
domain: domain,
status: await internalCertificate.performTestForDomain(domain),
});
}
// Remove the test challenge file
await rm(testChallengeFile, { force: true });
return results;
},
performTestForDomain: async (domain) => {
logger.info(`Testing http challenge for ${domain}`);
let result;
try {
const response = await fetch("https://www.site24x7.com/tools/restapi-tester", {
method: "POST",
headers: {
"User-Agent": `NPMplus/${pjson.version}`,
"Content-Type": "application/x-www-form-urlencoded",
},
body: `method=G&url=${encodeURI(`http://${domainToASCII(domain)}/.well-known/acme-challenge/test-challenge`)}&bodytype=T&locationid=10`,
});
try {
result = await response.json();
if (!response.ok) {
logger.warn(
`Failed to test HTTP challenge for domain ${domain} because HTTP status code ${response.status} was returned: ${result.message}`,
);
}
} catch (err) {
if (!response.ok) {
logger.warn(
`Failed to test HTTP challenge for domain ${domain} because HTTP status code ${response.status} was returned`,
);
} else {
logger.warn(
`Failed to test HTTP challenge for domain ${domain} because response failed to be parsed: ${err.message}`,
);
}
}
} catch (err) {
logger.warn(`Failed to test HTTP challenge for domain ${domain}`, err);
}
if (!result) {
// Some error occurred while trying to get the data
return "failed";
}
if (result.error) {
logger.info(
`HTTP challenge test failed for domain ${domain} because error was returned: ${result.error.msg}`,
);
return `other:${result.error.msg}`;
}
if (`${result.responsecode}` === "200" && result.htmlresponse === "Success") {
// Server exists and has responded with the correct data
return "ok";
}
if (`${result.responsecode}` === "200") {
// Server exists but has responded with wrong data
logger.info(
`HTTP challenge test failed for domain ${domain} because of invalid returned data:`,
result.htmlresponse,
);
return "wrong-data";
}
if (`${result.responsecode}` === "404") {
// Server exists but responded with a 404
logger.info(`HTTP challenge test failed for domain ${domain} because code 404 was returned`);
return "404";
}
if (
`${result.responsecode}` === "0" ||
(typeof result.reason === "string" && result.reason.toLowerCase() === "host unavailable")
) {
// Server does not exist at domain
logger.info(`HTTP challenge test failed for domain ${domain} the host was not found`);
return "no-host";
}
// Other errors
logger.info(`HTTP challenge test failed for domain ${domain} because code ${result.responsecode} was returned`);
return `other:${result.responsecode}`;
},
getLiveCertPath: (certificateId) => {
return `/data/tls/certbot/live/npm-${certificateId}`;
},
};
export default internalCertificate;

View File

@@ -1,378 +0,0 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import utils from "../lib/utils.js";
import deadHostModel from "../models/dead_host.js";
import internalAuditLog from "./audit-log.js";
import internalCertificate from "./certificate.js";
import internalHost from "./host.js";
import internalNginx from "./nginx.js";
const omissions = () => {
return ["is_deleted"];
};
const internalDeadHost = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: async (access, data) => {
const createCertificate = data.certificate_id === "new";
if (createCertificate) {
delete data.certificate_id;
}
await access.can("dead_hosts:create", data);
// Get a list of the domain names and check each of them against existing records
const domainNameCheckPromises = [];
data.domain_names.map((domain_name) => {
domainNameCheckPromises.push(internalHost.isHostnameTaken(domain_name));
return true;
});
await Promise.all(domainNameCheckPromises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
});
});
// At this point the domains should have been checked
data.owner_user_id = access.token.getUserId(1);
const thisData = internalHost.cleanSslHstsData(createCertificate, data);
// Fix for db field not having a default value
// for this optional field.
if (typeof data.advanced_config === "undefined") {
thisData.advanced_config = "";
}
const row = await deadHostModel.query().insertAndFetch(thisData).then(utils.omitRow(omissions()));
// Add to audit log
await internalAuditLog.add(access, {
action: "created",
object_type: "dead-host",
object_id: row.id,
meta: thisData,
});
if (createCertificate) {
const cert = await internalCertificate.createQuickCertificate(access, data);
// update host with cert id
await internalDeadHost.update(access, {
id: row.id,
certificate_id: cert.id,
});
}
// re-fetch with cert
const freshRow = await internalDeadHost.get(access, {
id: row.id,
expand: ["certificate", "owner"],
});
// Sanity check
if (createCertificate && !freshRow.certificate_id) {
throw new errs.InternalValidationError("The host was created but the Certificate creation failed.");
}
// Configure nginx
await internalNginx.configure(deadHostModel, "dead_host", freshRow);
return freshRow;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @return {Promise}
*/
update: async (access, data) => {
const createCertificate = data.certificate_id === "new";
if (createCertificate) {
delete data.certificate_id;
}
await access.can("dead_hosts:update", data.id);
// Get a list of the domain names and check each of them against existing records
const domainNameCheckPromises = [];
if (typeof data.domain_names !== "undefined") {
data.domain_names.map((domainName) => {
domainNameCheckPromises.push(internalHost.isHostnameTaken(domainName, "dead", data.id));
return true;
});
const checkResults = await Promise.all(domainNameCheckPromises);
checkResults.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
});
}
const row = await internalDeadHost.get(access, { id: data.id });
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`404 Host could not be updated, IDs do not match: ${row.id} !== ${data.id}`,
);
}
if (createCertificate) {
const cert = await internalCertificate.createQuickCertificate(access, {
domain_names: data.domain_names || row.domain_names,
meta: _.assign({}, row.meta, data.meta),
});
// update host with cert id
data.certificate_id = cert.id;
}
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
let thisData = _.assign(
{},
{
domain_names: row.domain_names,
},
data,
);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData, row);
// do the row update
await deadHostModel.query().where({ id: data.id }).patch(data);
// Add to audit log
await internalAuditLog.add(access, {
action: "updated",
object_type: "dead-host",
object_id: row.id,
meta: thisData,
});
const thisRow = await internalDeadHost.get(access, {
id: thisData.id,
expand: ["owner", "certificate"],
});
// Configure nginx
const newMeta = await internalNginx.configure(deadHostModel, "dead_host", row);
row.meta = newMeta;
return _.omit(internalHost.cleanRowCertificateMeta(thisRow), omissions());
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {Array} [data.expand]
* @param {Array} [data.omit]
* @return {Promise}
*/
get: async (access, data) => {
const accessData = await access.can("dead_hosts:get", data.id);
const query = deadHostModel
.query()
.where("is_deleted", 0)
.andWhere("id", data.id)
.allowGraph(deadHostModel.defaultAllowGraph)
.first();
if (accessData.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
if (typeof data.expand !== "undefined" && data.expand !== null) {
query.withGraphFetched(`[${data.expand.join(", ")}]`);
}
const row = await query.then(utils.omitRow(omissions()));
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof data.omit !== "undefined" && data.omit !== null) {
return _.omit(row, data.omit);
}
return row;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: async (access, data) => {
await access.can("dead_hosts:delete", data.id);
const row = await internalDeadHost.get(access, { id: data.id });
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
await deadHostModel.query().where("id", row.id).patch({
is_deleted: 1,
});
// Delete Nginx Config
await internalNginx.deleteConfig("dead_host", row);
await internalNginx.reload();
// Add to audit log
await internalAuditLog.add(access, {
action: "deleted",
object_type: "dead-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
return true;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
enable: async (access, data) => {
await access.can("dead_hosts:update", data.id);
const row = await internalDeadHost.get(access, {
id: data.id,
expand: ["certificate", "owner"],
});
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (row.enabled) {
throw new errs.ValidationError("Host is already enabled");
}
row.enabled = 1;
await deadHostModel.query().where("id", row.id).patch({
enabled: 1,
});
// Configure nginx
await internalNginx.configure(deadHostModel, "dead_host", row);
// Add to audit log
await internalAuditLog.add(access, {
action: "enabled",
object_type: "dead-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
return true;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
disable: async (access, data) => {
await access.can("dead_hosts:update", data.id);
const row = await internalDeadHost.get(access, { id: data.id });
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (!row.enabled) {
throw new errs.ValidationError("Host is already disabled");
}
row.enabled = 0;
await deadHostModel.query().where("id", row.id).patch({
enabled: 0,
});
// Delete Nginx Config
await internalNginx.deleteConfig("dead_host", row);
await internalNginx.reload();
// Add to audit log
await internalAuditLog.add(access, {
action: "disabled",
object_type: "dead-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
return true;
},
/**
* All Hosts
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [searchQuery]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
const accessData = await access.can("dead_hosts:list");
const query = deadHostModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph(deadHostModel.defaultAllowGraph)
.orderBy(castJsonIfNeed("domain_names"), "ASC");
if (accessData.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
// Query is used for searching
if (typeof searchQuery === "string" && searchQuery.length > 0) {
query.where(function () {
this.where(castJsonIfNeed("domain_names"), "like", `%${searchQuery}%`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const rows = await query.then(utils.omitRows(omissions()));
if (typeof expand !== "undefined" && expand !== null && expand.indexOf("certificate") !== -1) {
internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
},
/**
* Report use
*
* @param {Number} user_id
* @param {String} visibility
* @returns {Promise}
*/
getCount: async (user_id, visibility) => {
const query = deadHostModel.query().count("id as count").where("is_deleted", 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", user_id);
}
const row = await query.first();
return Number.parseInt(row.count, 10);
},
};
export default internalDeadHost;

View File

@@ -1,230 +0,0 @@
import _ from "lodash";
import { castJsonIfNeed } from "../lib/helpers.js";
import deadHostModel from "../models/dead_host.js";
import proxyHostModel from "../models/proxy_host.js";
import redirectionHostModel from "../models/redirection_host.js";
const internalHost = {
/**
* Makes sure that the ssl_* and hsts_* fields play nicely together.
* ie: if there is no cert, then force_ssl is off.
* if force_ssl is off, then hsts_enabled is definitely off.
*
* @param {object} data
* @param {object} [existing_data]
* @returns {object}
*/
cleanSslHstsData: (newCert, data, existingData) => {
const combinedData = _.assign({}, existingData || {}, data);
if (!combinedData.certificate_id && !newCert) {
combinedData.hsts_subdomains = false;
combinedData.ssl_forced = false;
}
if (!combinedData.ssl_forced) {
combinedData.hsts_enabled = false;
}
return combinedData;
},
/**
* used by the getAll functions of hosts, this removes the certificate meta if present
*
* @param {Array} rows
* @returns {Array}
*/
cleanAllRowsCertificateMeta: (rows) => {
rows.map((_, idx) => {
if (typeof rows[idx].certificate !== "undefined" && rows[idx].certificate) {
rows[idx].certificate.meta = {};
}
return true;
});
return rows;
},
/**
* used by the get/update functions of hosts, this removes the certificate meta if present
*
* @param {Object} row
* @returns {Object}
*/
cleanRowCertificateMeta: (row) => {
if (typeof row.certificate !== "undefined" && row.certificate) {
row.certificate.meta = {};
}
return row;
},
/**
* This returns all the host types with any domain listed in the provided domainNames array.
* This is used by the certificates to temporarily disable any host that is using the domain
*
* @param {Array} domainNames
* @returns {Promise}
*/
getHostsWithDomains: async (domainNames) => {
const responseObject = {
total_count: 0,
dead_hosts: [],
proxy_hosts: [],
redirection_hosts: [],
};
const proxyRes = await proxyHostModel.query().where("is_deleted", 0);
responseObject.proxy_hosts = internalHost._getHostsWithDomains(proxyRes, domainNames);
responseObject.total_count += responseObject.proxy_hosts.length;
const redirRes = await redirectionHostModel.query().where("is_deleted", 0);
responseObject.redirection_hosts = internalHost._getHostsWithDomains(redirRes, domainNames);
responseObject.total_count += responseObject.redirection_hosts.length;
const deadRes = await deadHostModel.query().where("is_deleted", 0);
responseObject.dead_hosts = internalHost._getHostsWithDomains(deadRes, domainNames);
responseObject.total_count += responseObject.dead_hosts.length;
return responseObject;
},
/**
* Internal use only, checks to see if the domain is already taken by any other record
*
* @param {String} hostname
* @param {String} [ignore_type] 'proxy', 'redirection', 'dead'
* @param {Integer} [ignore_id] Must be supplied if type was also supplied
* @returns {Promise}
*/
isHostnameTaken: (hostname, ignore_type, ignore_id) => {
const promises = [
proxyHostModel
.query()
.where("is_deleted", 0)
.andWhere(castJsonIfNeed("domain_names"), "like", `%${hostname}%`),
redirectionHostModel
.query()
.where("is_deleted", 0)
.andWhere(castJsonIfNeed("domain_names"), "like", `%${hostname}%`),
deadHostModel
.query()
.where("is_deleted", 0)
.andWhere(castJsonIfNeed("domain_names"), "like", `%${hostname}%`),
];
return Promise.all(promises).then((promises_results) => {
let is_taken = false;
if (promises_results[0]) {
// Proxy Hosts
if (
internalHost._checkHostnameRecordsTaken(
hostname,
promises_results[0],
ignore_type === "proxy" && ignore_id ? ignore_id : 0,
)
) {
is_taken = true;
}
}
if (promises_results[1]) {
// Redirection Hosts
if (
internalHost._checkHostnameRecordsTaken(
hostname,
promises_results[1],
ignore_type === "redirection" && ignore_id ? ignore_id : 0,
)
) {
is_taken = true;
}
}
if (promises_results[2]) {
// Dead Hosts
if (
internalHost._checkHostnameRecordsTaken(
hostname,
promises_results[2],
ignore_type === "dead" && ignore_id ? ignore_id : 0,
)
) {
is_taken = true;
}
}
return {
hostname: hostname,
is_taken: is_taken,
};
});
},
/**
* Private call only
*
* @param {String} hostname
* @param {Array} existingRows
* @param {Integer} [ignoreId]
* @returns {Boolean}
*/
_checkHostnameRecordsTaken: (hostname, existingRows, ignoreId) => {
let isTaken = false;
if (existingRows?.length) {
existingRows.map((existingRow) => {
existingRow.domain_names.map((existingHostname) => {
// Does this domain match?
if (existingHostname.toLowerCase() === hostname.toLowerCase()) {
if (!ignoreId || ignoreId !== existingRow.id) {
isTaken = true;
}
}
return true;
});
return true;
});
}
return isTaken;
},
/**
* Private call only
*
* @param {Array} hosts
* @param {Array} domainNames
* @returns {Array}
*/
_getHostsWithDomains: (hosts, domainNames) => {
const response = [];
if (hosts?.length) {
hosts.map((host) => {
let hostMatches = false;
domainNames.map((domainName) => {
host.domain_names.map((hostDomainName) => {
if (domainName.toLowerCase() === hostDomainName.toLowerCase()) {
hostMatches = true;
}
return true;
});
return true;
});
if (hostMatches) {
response.push(host);
}
return true;
});
}
return response;
},
};
export default internalHost;

View File

@@ -1,109 +0,0 @@
import { readFile, writeFile } from "node:fs/promises";
import { dirname } from "node:path";
import { fileURLToPath } from "node:url";
import utils from "../lib/utils.js";
import { ipRanges as logger } from "../logger.js";
import internalNginx from "./nginx.js";
import pjson from "../package.json" with { type: "json" };
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const CLOUDFLARE_V4_URL = "https://www.cloudflare.com/ips-v4";
const CLOUDFLARE_V6_URL = "https://www.cloudflare.com/ips-v6";
const regIpV4 = /^(?:[0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]{1,2}$/;
const regIpV6 = /^([0-9a-fA-F:]+)\/[0-9]{1,3}$/;
const internalIpRanges = {
interval_timeout: 1000 * 60 * 60,
interval: null,
interval_processing: false,
initTimer: () => {
logger.info("IP Ranges Renewal Timer initialized");
internalIpRanges.interval = setInterval(internalIpRanges.fetch, internalIpRanges.interval_timeout);
},
fetchUrl: async (url) => {
const res = await fetch(url, {
headers: { "User-Agent": `NPMplus/${pjson.version}` },
});
if (!res.ok) {
throw new Error(`Status code: ${response.status}`);
}
return await res.text();
},
/**
* Triggered at startup and then later by a timer, this will fetch the ip ranges from services and apply them to nginx.
*/
fetch: async () => {
if (internalIpRanges.interval_processing) {
return;
}
internalIpRanges.interval_processing = true;
logger.info("Fetching IP Ranges from online services...");
try {
const [v4Data, v6Data] = await Promise.all([
internalIpRanges.fetchUrl(CLOUDFLARE_V4_URL),
internalIpRanges.fetchUrl(CLOUDFLARE_V6_URL),
]);
const v4Ranges = v4Data
.split("\n")
.map((line) => line.trim())
.filter((line) => regIpV4.test(line));
const v6Ranges = v6Data
.split("\n")
.map((line) => line.trim())
.filter((line) => regIpV6.test(line));
const ip_ranges = [...v4Ranges, ...v6Ranges];
if (await internalIpRanges.generateConfig(ip_ranges)) {
await internalNginx.reload();
}
} catch (err) {
logger.error(err.message);
} finally {
internalIpRanges.interval_processing = false;
}
},
/**
* @param {Array} ip_ranges
* @returns {Promise<boolean>}
*/
generateConfig: async (ip_ranges) => {
try {
const renderEngine = utils.getRenderEngine();
const template = await readFile(`${__dirname}/../templates/ip_ranges.conf`, { encoding: "utf8" });
const newConfig = await renderEngine.parseAndRender(template, { ip_ranges: ip_ranges });
const filePath = "/usr/local/nginx/conf/conf.d/ip_ranges.conf";
try {
const oldConfig = await readFile(filePath, {
encoding: "utf8",
});
if (oldConfig === newConfig) {
logger.info("Not updating Cloudflared IPs");
return false;
}
} catch {}
await writeFile(filePath, newConfig, { encoding: "utf8" });
logger.info("Updated Cloudflared IPs");
return true;
} catch (err) {
logger.error(`Error updating Cloudflare IPs: ${err.message}`);
return false;
}
},
};
export default internalIpRanges;

View File

@@ -1,327 +0,0 @@
import { readFile, rename, rm, writeFile } from "node:fs/promises";
import { dirname } from "node:path";
import { domainToASCII, fileURLToPath } from "node:url";
import _ from "lodash";
import errs from "../lib/error.js";
import utils from "../lib/utils.js";
import { debug, nginx as logger } from "../logger.js";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const internalNginx = {
/**
* This will:
* - test the nginx config first to make sure it's OK
* - create / recreate the config for the host
* - test again
* - IF OK: update the meta with online status
* - IF BAD: update the meta with offline status and rename the config
* - then reload nginx
*
* @param {Object|String} model
* @param {String} host_type
* @param {Object} host
* @returns {Promise}
*/
configure: async (model, host_type, host) => {
let combined_meta = {};
await internalNginx.deleteConfig(host_type, host);
await internalNginx.generateConfig(host_type, host);
try {
await internalNginx.test();
combined_meta = _.assign({}, host.meta, {
nginx_online: true,
nginx_err: null,
});
await model.query().where("id", host.id).patch({
meta: combined_meta,
});
} catch (err) {
logger.error(err.message);
// config is bad, update meta and rename config
combined_meta = _.assign({}, host.meta, {
nginx_online: false,
nginx_err: err.message,
});
await model.query().where("id", host.id).patch({
meta: combined_meta,
});
await internalNginx.renameConfigAsError(host_type, host);
}
await internalNginx.reload();
return combined_meta;
},
/**
* @returns {Promise}
*/
test: async () => {
return utils.execFile("nginx", ["-tq"]);
},
/**
* @returns {Promise}
*/
reload: async () => {
if (process.env.ACME_OCSP_STAPLING === "true") {
try {
await utils.execFile("certbot-ocsp-fetcher.sh", [
"-c",
"/data/tls/certbot/live",
"-o",
"/data/tls/certbot/live",
"--no-reload-webserver",
"--quiet",
]);
} catch {}
}
if (process.env.CUSTOM_OCSP_STAPLING === "true") {
try {
await utils.execFile("certbot-ocsp-fetcher.sh", [
"-c",
"/data/tls/custom",
"-o",
"/data/tls/custom",
"--no-reload-webserver",
"--quiet",
]);
} catch {}
}
await internalNginx.test();
return utils.execFile("nginx", ["-s", "reload"]);
},
/**
* @param {String} host_type
* @param {Integer} host_id
* @returns {String}
*/
getConfigName: (host_type, host_id) => {
if (host_type === "default") {
return "/usr/local/nginx/conf/conf.d/default.conf";
}
return `/data/nginx/${internalNginx.getFileFriendlyHostType(host_type)}/${host_id}.conf`;
},
/**
* Generates custom locations
* @param {Object} host
* @returns {Promise}
*/
renderLocations: async (host) => {
let template;
try {
template = await readFile(`${__dirname}/../templates/_proxy_host_custom_location.conf`, {
encoding: "utf8",
});
} catch (err) {
throw new errs.ConfigurationError(err.message);
}
const renderEngine = utils.getRenderEngine();
let renderedLocations = "";
for (const location of host.locations) {
if (location.npmplus_enabled === false) {
continue;
}
if (
location.forward_host.indexOf("/") > -1 &&
!location.forward_host.startsWith("/") &&
!location.forward_host.startsWith("unix")
) {
const split = location.forward_host.split("/");
location.forward_host = split.shift();
location.forward_path = `/${split.join("/")}`;
}
renderedLocations += await renderEngine.parseAndRender(template, location);
}
return renderedLocations;
},
/**
* @param {String} host_type
* @param {Object} host
* @returns {Promise}
*/
generateConfig: async (host_type, host_row) => {
// Prevent modifying the original object:
const host = JSON.parse(JSON.stringify(host_row));
const nice_host_type = internalNginx.getFileFriendlyHostType(host_type);
const renderEngine = utils.getRenderEngine();
let template = null;
const filename = internalNginx.getConfigName(nice_host_type, host.id);
try {
template = await readFile(`${__dirname}/../templates/${nice_host_type}.conf`, { encoding: "utf8" });
} catch (err) {
throw new errs.ConfigurationError(err.message);
}
let origLocations;
// Manipulate the data a bit before sending it to the template
if (nice_host_type !== "default") {
host.use_default_location = true;
if (typeof host.advanced_config !== "undefined" && host.advanced_config) {
host.use_default_location = !internalNginx.advancedConfigHasDefaultLocation(host.advanced_config);
}
}
// For redirection hosts, if the scheme is not http or https, set it to $scheme
if (
nice_host_type === "redirection_host" &&
["http", "https"].indexOf(host.forward_scheme.toLowerCase()) === -1
) {
host.forward_scheme = "$scheme";
}
if (host.locations) {
_.map(host.locations, (location) => {
if (location.path === "/" && location.location_type !== "= " && location.npmplus_enabled !== false) {
host.use_default_location = false;
}
if (location.npmplus_auth_request === "anubis") {
host.create_anubis_locations = true;
}
if (location.npmplus_auth_request === "tinyauth") {
host.create_tinyauth_locations = true;
}
if (location.npmplus_auth_request === "authelia") {
host.create_authelia_locations = true;
}
if (
location.npmplus_auth_request === "authentik" ||
location.npmplus_auth_request === "authentik-send-basic-auth"
) {
host.create_authentik_locations = true;
}
});
host.locations = await internalNginx.renderLocations(host);
}
if (
host.forward_host &&
host.forward_host.indexOf("/") > -1 &&
!host.forward_host.startsWith("/") &&
!host.forward_host.startsWith("unix")
) {
const split = host.forward_host.split("/");
host.forward_host = split.shift();
host.forward_path = `/${split.join("/")}`;
}
if (host.domain_names) {
host.server_names = host.domain_names.map((domain_name) => domainToASCII(domain_name) || domain_name);
}
host.env = process.env;
try {
const config_text = await renderEngine.parseAndRender(template, host);
await writeFile(filename, config_text, { encoding: "utf8" });
debug(logger, "Wrote config:", filename);
if (process.env.DISABLE_NGINX_BEAUTIFIER === "false") {
await utils.execFile("nginxbeautifier", ["-s", "4", filename]).catch(() => {});
}
return true;
} catch (err) {
debug(logger, `Could not write ${filename}:`, err.message);
throw new errs.ConfigurationError(err.message);
}
},
/**
*
* @param {String} host_type
* @returns String
*/
getFileFriendlyHostType: (host_type) => {
return host_type.replace(/-/g, "_");
},
/**
* @param {String} host_type
* @param {Object} [host]
* @returns {Promise}
*/
deleteConfig: async (host_type, host) => {
const config_file = internalNginx.getConfigName(
internalNginx.getFileFriendlyHostType(host_type),
typeof host === "undefined" ? 0 : host.id,
);
const filesToDelete = [config_file, `${config_file}.err`];
for (const filename of filesToDelete) {
try {
debug(logger, `Deleting file: ${filename}`);
await rm(filename, { force: true });
} catch (err) {
debug(logger, "Could not delete file:", JSON.stringify(err, null, 2));
}
}
},
/**
* @param {String} host_type
* @param {Object} [host]
* @returns {Promise}
*/
renameConfigAsError: async (host_type, host) => {
const config_file = internalNginx.getConfigName(
internalNginx.getFileFriendlyHostType(host_type),
typeof host === "undefined" ? 0 : host.id,
);
try {
await rename(config_file, `${config_file}.err`);
} catch {}
},
/**
* @param {String} hostType
* @param {Array} hosts
* @returns {Promise}
*/
bulkGenerateConfigs: async (model, hostType, hosts) => {
const results = [];
for (const host of hosts) {
const result = await internalNginx.configure(model, hostType, host);
results.push(result);
}
return results;
},
/**
* @param {string} config
* @returns {boolean}
*/
advancedConfigHasDefaultLocation: (cfg) => !!cfg.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im),
};
export default internalNginx;

View File

@@ -1,474 +0,0 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import utils from "../lib/utils.js";
import proxyHostModel from "../models/proxy_host.js";
import internalAuditLog from "./audit-log.js";
import internalCertificate from "./certificate.js";
import internalHost from "./host.js";
import internalNginx from "./nginx.js";
const omissions = () => {
return ["is_deleted", "owner.is_deleted"];
};
const internalProxyHost = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: (access, data) => {
let thisData = data;
const createCertificate = thisData.certificate_id === "new";
if (createCertificate) {
delete thisData.certificate_id;
}
return access
.can("proxy_hosts:create", thisData)
.then(() => {
// Get a list of the domain names and check each of them against existing records
const domain_name_check_promises = [];
thisData.domain_names.map((domain_name) => {
domain_name_check_promises.push(internalHost.isHostnameTaken(domain_name));
return true;
});
return Promise.all(domain_name_check_promises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
});
});
})
.then(() => {
// At this point the domains should have been checked
thisData.owner_user_id = access.token.getUserId(1);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData);
// Fix for db field not having a default value
// for this optional field.
if (typeof thisData.advanced_config === "undefined") {
thisData.advanced_config = "";
}
return proxyHostModel.query().insertAndFetch(thisData).then(utils.omitRow(omissions()));
})
.then((row) => {
if (createCertificate) {
return internalCertificate
.createQuickCertificate(access, thisData)
.then((cert) => {
// update host with cert id
return internalProxyHost.update(access, {
id: row.id,
certificate_id: cert.id,
});
})
.then(() => {
return row;
});
}
return row;
})
.then((row) => {
// re-fetch with cert
return internalProxyHost.get(access, {
id: row.id,
expand: ["certificate", "owner", "access_list.[clients,items]"],
});
})
.then((row) => {
// Configure nginx
return internalNginx.configure(proxyHostModel, "proxy_host", row).then(() => {
return row;
});
})
.then((row) => {
// Audit log
thisData.meta = _.assign({}, thisData.meta || {}, row.meta);
// Add to audit log
return internalAuditLog
.add(access, {
action: "created",
object_type: "proxy-host",
object_id: row.id,
meta: thisData,
})
.then(() => {
return row;
});
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @return {Promise}
*/
update: (access, data) => {
let thisData = data;
const createCertificate = thisData.certificate_id === "new";
if (createCertificate) {
delete thisData.certificate_id;
}
return access
.can("proxy_hosts:update", thisData.id)
.then((/*access_data*/) => {
// Get a list of the domain names and check each of them against existing records
const domain_name_check_promises = [];
if (typeof thisData.domain_names !== "undefined") {
thisData.domain_names.map((domain_name) => {
return domain_name_check_promises.push(
internalHost.isHostnameTaken(domain_name, "proxy", thisData.id),
);
});
return Promise.all(domain_name_check_promises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
});
});
}
})
.then(() => {
return internalProxyHost.get(access, { id: thisData.id });
})
.then((row) => {
if (row.id !== thisData.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Proxy Host could not be updated, IDs do not match: ${row.id} !== ${thisData.id}`,
);
}
if (createCertificate) {
return internalCertificate
.createQuickCertificate(access, {
domain_names: thisData.domain_names || row.domain_names,
meta: _.assign({}, row.meta, thisData.meta),
})
.then((cert) => {
// update host with cert id
thisData.certificate_id = cert.id;
})
.then(() => {
return row;
});
}
return row;
})
.then((row) => {
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
thisData = _.assign(
{},
{
domain_names: row.domain_names,
},
data,
);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData, row);
return proxyHostModel
.query()
.where({ id: thisData.id })
.patch(thisData)
.then(utils.omitRow(omissions()))
.then((saved_row) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "updated",
object_type: "proxy-host",
object_id: row.id,
meta: thisData,
})
.then(() => {
return saved_row;
});
});
})
.then(() => {
return internalProxyHost
.get(access, {
id: thisData.id,
expand: ["owner", "certificate", "access_list.[clients,items]"],
})
.then((row) => {
if (!row.enabled) {
// No need to add nginx config if host is disabled
return row;
}
// Configure nginx
return internalNginx.configure(proxyHostModel, "proxy_host", row).then((new_meta) => {
row.meta = new_meta;
return _.omit(internalHost.cleanRowCertificateMeta(row), omissions());
});
});
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {Array} [data.expand]
* @param {Array} [data.omit]
* @return {Promise}
*/
get: (access, data) => {
const thisData = data || {};
return access
.can("proxy_hosts:get", thisData.id)
.then((access_data) => {
const query = proxyHostModel
.query()
.where("is_deleted", 0)
.andWhere("id", thisData.id)
.allowGraph(proxyHostModel.defaultAllowGraph)
.first();
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(thisData.id);
}
const thisRow = internalHost.cleanRowCertificateMeta(row);
// Custom omissions
if (typeof thisData.omit !== "undefined" && thisData.omit !== null) {
return _.omit(row, thisData.omit);
}
return thisRow;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: (access, data) => {
return access
.can("proxy_hosts:delete", data.id)
.then(() => {
return internalProxyHost.get(access, { id: data.id });
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
return proxyHostModel
.query()
.where("id", row.id)
.patch({
is_deleted: 1,
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("proxy_host", row).then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "deleted",
object_type: "proxy-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
enable: (access, data) => {
return access
.can("proxy_hosts:update", data.id)
.then(() => {
return internalProxyHost.get(access, {
id: data.id,
expand: ["certificate", "owner", "access_list"],
});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (row.enabled) {
throw new errs.ValidationError("Host is already enabled");
}
row.enabled = 1;
return proxyHostModel
.query()
.where("id", row.id)
.patch({
enabled: 1,
})
.then(() => {
// Configure nginx
return internalNginx.configure(proxyHostModel, "proxy_host", row);
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "enabled",
object_type: "proxy-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
disable: (access, data) => {
return access
.can("proxy_hosts:update", data.id)
.then(() => {
return internalProxyHost.get(access, { id: data.id });
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (!row.enabled) {
throw new errs.ValidationError("Host is already disabled");
}
row.enabled = 0;
return proxyHostModel
.query()
.where("id", row.id)
.patch({
enabled: 0,
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("proxy_host", row).then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "disabled",
object_type: "proxy-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* All Hosts
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
const accessData = await access.can("proxy_hosts:list");
const query = proxyHostModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph(proxyHostModel.defaultAllowGraph)
.orderBy(castJsonIfNeed("domain_names"), "ASC");
if (accessData.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
// Query is used for searching
if (typeof searchQuery === "string" && searchQuery.length > 0) {
query.where(function () {
this.where(castJsonIfNeed("domain_names"), "like", `%${searchQuery}%`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const rows = await query.then(utils.omitRows(omissions()));
if (typeof expand !== "undefined" && expand !== null && expand.indexOf("certificate") !== -1) {
return internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
},
/**
* Report use
*
* @param {Number} user_id
* @param {String} visibility
* @returns {Promise}
*/
getCount: (user_id, visibility) => {
const query = proxyHostModel.query().count("id as count").where("is_deleted", 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", user_id);
}
return query.first().then((row) => {
return Number.parseInt(row.count, 10);
});
},
};
export default internalProxyHost;

View File

@@ -1,477 +0,0 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import utils from "../lib/utils.js";
import redirectionHostModel from "../models/redirection_host.js";
import internalAuditLog from "./audit-log.js";
import internalCertificate from "./certificate.js";
import internalHost from "./host.js";
import internalNginx from "./nginx.js";
const omissions = () => {
return ["is_deleted"];
};
const internalRedirectionHost = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: (access, data) => {
let thisData = data || {};
const createCertificate = thisData.certificate_id === "new";
if (createCertificate) {
delete thisData.certificate_id;
}
return access
.can("redirection_hosts:create", thisData)
.then((/*access_data*/) => {
// Get a list of the domain names and check each of them against existing records
const domain_name_check_promises = [];
thisData.domain_names.map((domain_name) => {
domain_name_check_promises.push(internalHost.isHostnameTaken(domain_name));
return true;
});
return Promise.all(domain_name_check_promises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
});
});
})
.then(() => {
// At this point the domains should have been checked
thisData.owner_user_id = access.token.getUserId(1);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData);
// Fix for db field not having a default value
// for this optional field.
if (typeof data.advanced_config === "undefined") {
data.advanced_config = "";
}
return redirectionHostModel.query().insertAndFetch(thisData).then(utils.omitRow(omissions()));
})
.then((row) => {
if (createCertificate) {
return internalCertificate
.createQuickCertificate(access, thisData)
.then((cert) => {
// update host with cert id
return internalRedirectionHost.update(access, {
id: row.id,
certificate_id: cert.id,
});
})
.then(() => {
return row;
});
}
return row;
})
.then((row) => {
// re-fetch with cert
return internalRedirectionHost.get(access, {
id: row.id,
expand: ["certificate", "owner"],
});
})
.then((row) => {
// Configure nginx
return internalNginx.configure(redirectionHostModel, "redirection_host", row).then(() => {
return row;
});
})
.then((row) => {
thisData.meta = _.assign({}, thisData.meta || {}, row.meta);
// Add to audit log
return internalAuditLog
.add(access, {
action: "created",
object_type: "redirection-host",
object_id: row.id,
meta: thisData,
})
.then(() => {
return row;
});
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @return {Promise}
*/
update: (access, data) => {
let thisData = data || {};
const createCertificate = thisData.certificate_id === "new";
if (createCertificate) {
delete thisData.certificate_id;
}
return access
.can("redirection_hosts:update", thisData.id)
.then((/*access_data*/) => {
// Get a list of the domain names and check each of them against existing records
const domain_name_check_promises = [];
if (typeof thisData.domain_names !== "undefined") {
thisData.domain_names.map((domain_name) => {
domain_name_check_promises.push(
internalHost.isHostnameTaken(domain_name, "redirection", thisData.id),
);
return true;
});
return Promise.all(domain_name_check_promises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
});
});
}
})
.then(() => {
return internalRedirectionHost.get(access, { id: thisData.id });
})
.then((row) => {
if (row.id !== thisData.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Redirection Host could not be updated, IDs do not match: ${row.id} !== ${thisData.id}`,
);
}
if (createCertificate) {
return internalCertificate
.createQuickCertificate(access, {
domain_names: thisData.domain_names || row.domain_names,
meta: _.assign({}, row.meta, thisData.meta),
})
.then((cert) => {
// update host with cert id
thisData.certificate_id = cert.id;
})
.then(() => {
return row;
});
}
return row;
})
.then((row) => {
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
thisData = _.assign(
{},
{
domain_names: row.domain_names,
},
thisData,
);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData, row);
return redirectionHostModel
.query()
.where({ id: thisData.id })
.patch(thisData)
.then((saved_row) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "updated",
object_type: "redirection-host",
object_id: row.id,
meta: thisData,
})
.then(() => {
return _.omit(saved_row, omissions());
});
});
})
.then(() => {
return internalRedirectionHost
.get(access, {
id: thisData.id,
expand: ["owner", "certificate"],
})
.then((row) => {
// Configure nginx
return internalNginx
.configure(redirectionHostModel, "redirection_host", row)
.then((new_meta) => {
row.meta = new_meta;
return _.omit(internalHost.cleanRowCertificateMeta(row), omissions());
});
});
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {Array} [data.expand]
* @param {Array} [data.omit]
* @return {Promise}
*/
get: (access, data) => {
const thisData = data || {};
return access
.can("redirection_hosts:get", thisData.id)
.then((access_data) => {
const query = redirectionHostModel
.query()
.where("is_deleted", 0)
.andWhere("id", thisData.id)
.allowGraph(redirectionHostModel.defaultAllowGraph)
.first();
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
let thisRow = row;
if (!thisRow || !thisRow.id) {
throw new errs.ItemNotFoundError(thisData.id);
}
thisRow = internalHost.cleanRowCertificateMeta(thisRow);
// Custom omissions
if (typeof thisData.omit !== "undefined" && thisData.omit !== null) {
return _.omit(thisRow, thisData.omit);
}
return thisRow;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: (access, data) => {
return access
.can("redirection_hosts:delete", data.id)
.then(() => {
return internalRedirectionHost.get(access, { id: data.id });
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
return redirectionHostModel
.query()
.where("id", row.id)
.patch({
is_deleted: 1,
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("redirection_host", row).then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "deleted",
object_type: "redirection-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
enable: (access, data) => {
return access
.can("redirection_hosts:update", data.id)
.then(() => {
return internalRedirectionHost.get(access, {
id: data.id,
expand: ["certificate", "owner"],
});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (row.enabled) {
throw new errs.ValidationError("Host is already enabled");
}
row.enabled = 1;
return redirectionHostModel
.query()
.where("id", row.id)
.patch({
enabled: 1,
})
.then(() => {
// Configure nginx
return internalNginx.configure(redirectionHostModel, "redirection_host", row);
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "enabled",
object_type: "redirection-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
disable: (access, data) => {
return access
.can("redirection_hosts:update", data.id)
.then(() => {
return internalRedirectionHost.get(access, { id: data.id });
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (!row.enabled) {
throw new errs.ValidationError("Host is already disabled");
}
row.enabled = 0;
return redirectionHostModel
.query()
.where("id", row.id)
.patch({
enabled: 0,
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("redirection_host", row).then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "disabled",
object_type: "redirection-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* All Hosts
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: (access, expand, search_query) => {
return access
.can("redirection_hosts:list")
.then((access_data) => {
const query = redirectionHostModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph(redirectionHostModel.defaultAllowGraph)
.orderBy(castJsonIfNeed("domain_names"), "ASC");
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
// Query is used for searching
if (typeof search_query === "string" && search_query.length > 0) {
query.where(function () {
this.where(castJsonIfNeed("domain_names"), "like", `%${search_query}%`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
return query.then(utils.omitRows(omissions()));
})
.then((rows) => {
if (typeof expand !== "undefined" && expand !== null && expand.indexOf("certificate") !== -1) {
return internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
});
},
/**
* Report use
*
* @param {Number} user_id
* @param {String} visibility
* @returns {Promise}
*/
getCount: (user_id, visibility) => {
const query = redirectionHostModel.query().count("id as count").where("is_deleted", 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", user_id);
}
return query.first().then((row) => {
return Number.parseInt(row.count, 10);
});
},
};
export default internalRedirectionHost;

View File

@@ -1,56 +0,0 @@
import { remoteVersion as logger } from "../logger.js";
import pjson from "../package.json" with { type: "json" };
const internalRemoteVersion = {
cache_timeout: 1000 * 60 * 60, // 1 hour
last_result: null,
last_fetch_time: null,
/**
* Fetch the latest version info, using a cached result if within the cache timeout period.
* @return {Promise<{current: string, latest: string, update_available: boolean}>} Version info
*/
get: async () => {
try {
if (
!internalRemoteVersion.last_result ||
!internalRemoteVersion.last_fetch_time ||
Date.now() - internalRemoteVersion.last_fetch_time > internalRemoteVersion.cache_timeout
) {
const response = await fetch("https://api.github.com/repos/ZoeyVid/NPMplus/releases/latest", {
headers: {
"User-Agent": `NPMplus/${pjson.version}`,
},
});
if (!response.ok) {
throw new Error(`Status code: ${response.status}`);
}
const data = await response.json();
internalRemoteVersion.last_result = data;
internalRemoteVersion.last_fetch_time = Date.now();
}
} catch (error) {
logger.error("Failed to fetch remote version:", error.message);
if (!internalRemoteVersion.last_result) {
return {
current: pjson.version,
latest: "unknown",
update_available: false,
};
}
}
const latestVersion = internalRemoteVersion.last_result?.tag_name || "unknown";
const currentVersion = pjson.version;
return {
current: currentVersion,
latest: latestVersion,
update_available: currentVersion < latestVersion && currentVersion.length >= 13,
};
},
};
export default internalRemoteVersion;

View File

@@ -1,37 +0,0 @@
import internalDeadHost from "./dead-host.js";
import internalProxyHost from "./proxy-host.js";
import internalRedirectionHost from "./redirection-host.js";
import internalStream from "./stream.js";
const internalReport = {
/**
* @param {Access} access
* @return {Promise}
*/
getHostsReport: (access) => {
return access
.can("reports:hosts", 1)
.then((access_data) => {
const userId = access.token.getUserId(1);
const promises = [
internalProxyHost.getCount(userId, access_data.permission_visibility),
internalRedirectionHost.getCount(userId, access_data.permission_visibility),
internalStream.getCount(userId, access_data.permission_visibility),
internalDeadHost.getCount(userId, access_data.permission_visibility),
];
return Promise.all(promises);
})
.then((counts) => {
return {
proxy: counts.shift(),
redirection: counts.shift(),
stream: counts.shift(),
dead: counts.shift(),
};
});
},
};
export default internalReport;

View File

@@ -1,125 +0,0 @@
import fs from "node:fs";
import errs from "../lib/error.js";
import settingModel from "../models/setting.js";
import internalNginx from "./nginx.js";
const internalSetting = {
/**
* @param {Access} access
* @param {Object} data
* @param {String} data.id
* @return {Promise}
*/
update: (access, data) => {
return access
.can("settings:update", data.id)
.then((/*access_data*/) => {
return internalSetting.get(access, { id: data.id });
})
.then((row) => {
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Setting could not be updated, IDs do not match: ${row.id} !== ${data.id}`,
);
}
return settingModel.query().where({ id: data.id }).patch(data);
})
.then(() => {
return internalSetting.get(access, {
id: data.id,
});
})
.then((row) => {
if (row.id === "default-site") {
// write the html if we need to
if (row.value === "html") {
fs.writeFileSync("/data/html/index.html", row.meta.html, { encoding: "utf8" });
}
// Configure nginx
return internalNginx
.deleteConfig("default")
.then(() => {
return internalNginx.generateConfig("default", row);
})
.then(() => {
return internalNginx.test();
})
.then(() => {
return internalNginx.reload();
})
.then(() => {
return row;
})
.catch((/*err*/) => {
internalNginx
.deleteConfig("default")
.then(() => {
return internalNginx.test();
})
.then(() => {
return internalNginx.reload();
})
.then(() => {
// I'm being slack here I know..
throw new errs.ValidationError("Could not reconfigure Nginx. Please check logs.");
});
});
}
return row;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {String} data.id
* @return {Promise}
*/
get: (access, data) => {
return access
.can("settings:get", data.id)
.then(() => {
return settingModel.query().where("id", data.id).first();
})
.then((row) => {
if (row) {
return row;
}
throw new errs.ItemNotFoundError(data.id);
});
},
/**
* This will only count the settings
*
* @param {Access} access
* @returns {*}
*/
getCount: (access) => {
return access
.can("settings:list")
.then(() => {
return settingModel.query().count("id as count").first();
})
.then((row) => {
return Number.parseInt(row.count, 10);
});
},
/**
* All settings
*
* @param {Access} access
* @returns {Promise}
*/
getAll: (access) => {
return access.can("settings:list").then(() => {
return settingModel.query().orderBy("description", "ASC");
});
},
};
export default internalSetting;

View File

@@ -1,426 +0,0 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import utils from "../lib/utils.js";
import streamModel from "../models/stream.js";
import internalAuditLog from "./audit-log.js";
import internalCertificate from "./certificate.js";
import internalHost from "./host.js";
import internalNginx from "./nginx.js";
const omissions = () => {
return ["is_deleted", "owner.is_deleted", "certificate.is_deleted"];
};
const internalStream = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: (access, data) => {
const create_certificate = data.certificate_id === "new";
if (create_certificate) {
delete data.certificate_id;
}
return access
.can("streams:create", data)
.then((/*access_data*/) => {
// TODO: At this point the existing ports should have been checked
data.owner_user_id = access.token.getUserId(1);
if (typeof data.meta === "undefined") {
data.meta = {};
}
// streams aren't routed by domain name so don't store domain names in the DB
const data_no_domains = structuredClone(data);
delete data_no_domains.domain_names;
return streamModel.query().insertAndFetch(data_no_domains).then(utils.omitRow(omissions()));
})
.then((row) => {
if (create_certificate) {
return internalCertificate
.createQuickCertificate(access, data)
.then((cert) => {
// update host with cert id
return internalStream.update(access, {
id: row.id,
certificate_id: cert.id,
});
})
.then(() => {
return row;
});
}
return row;
})
.then((row) => {
// re-fetch with cert
return internalStream.get(access, {
id: row.id,
expand: ["certificate", "owner"],
});
})
.then((row) => {
// Configure nginx
return internalNginx.configure(streamModel, "stream", row).then(() => {
return row;
});
})
.then((row) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "created",
object_type: "stream",
object_id: row.id,
meta: data,
})
.then(() => {
return row;
});
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @return {Promise}
*/
update: (access, data) => {
let thisData = data;
const create_certificate = thisData.certificate_id === "new";
if (create_certificate) {
delete thisData.certificate_id;
}
return access
.can("streams:update", thisData.id)
.then((/*access_data*/) => {
// TODO: at this point the existing streams should have been checked
return internalStream.get(access, { id: thisData.id });
})
.then((row) => {
if (row.id !== thisData.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Stream could not be updated, IDs do not match: ${row.id} !== ${thisData.id}`,
);
}
if (create_certificate) {
return internalCertificate
.createQuickCertificate(access, {
domain_names: thisData.domain_names || row.domain_names,
meta: _.assign({}, row.meta, thisData.meta),
})
.then((cert) => {
// update host with cert id
thisData.certificate_id = cert.id;
})
.then(() => {
return row;
});
}
return row;
})
.then((row) => {
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
thisData = _.assign(
{},
{
domain_names: row.domain_names,
},
thisData,
);
return streamModel
.query()
.patchAndFetchById(row.id, thisData)
.then(utils.omitRow(omissions()))
.then((saved_row) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "updated",
object_type: "stream",
object_id: row.id,
meta: thisData,
})
.then(() => {
return saved_row;
});
});
})
.then(() => {
return internalStream.get(access, { id: thisData.id, expand: ["owner", "certificate"] }).then((row) => {
return internalNginx.configure(streamModel, "stream", row).then((new_meta) => {
row.meta = new_meta;
return _.omit(internalHost.cleanRowCertificateMeta(row), omissions());
});
});
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {Array} [data.expand]
* @param {Array} [data.omit]
* @return {Promise}
*/
get: (access, data) => {
const thisData = data || {};
return access
.can("streams:get", thisData.id)
.then((access_data) => {
const query = streamModel
.query()
.where("is_deleted", 0)
.andWhere("id", thisData.id)
.allowGraph(streamModel.defaultAllowGraph)
.first();
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
let thisRow = row;
if (!thisRow || !thisRow.id) {
throw new errs.ItemNotFoundError(thisData.id);
}
thisRow = internalHost.cleanRowCertificateMeta(thisRow);
// Custom omissions
if (typeof thisData.omit !== "undefined" && thisData.omit !== null) {
return _.omit(thisRow, thisData.omit);
}
return thisRow;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: (access, data) => {
return access
.can("streams:delete", data.id)
.then(() => {
return internalStream.get(access, { id: data.id });
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
return streamModel
.query()
.where("id", row.id)
.patch({
is_deleted: 1,
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("stream", row).then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "deleted",
object_type: "stream",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
enable: (access, data) => {
return access
.can("streams:update", data.id)
.then(() => {
return internalStream.get(access, {
id: data.id,
expand: ["certificate", "owner"],
});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (row.enabled) {
throw new errs.ValidationError("Stream is already enabled");
}
row.enabled = 1;
return streamModel
.query()
.where("id", row.id)
.patch({
enabled: 1,
})
.then(() => {
// Configure nginx
return internalNginx.configure(streamModel, "stream", row);
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "enabled",
object_type: "stream",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Number} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
disable: (access, data) => {
return access
.can("streams:update", data.id)
.then(() => {
return internalStream.get(access, { id: data.id });
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (!row.enabled) {
throw new errs.ValidationError("Stream is already disabled");
}
row.enabled = 0;
return streamModel
.query()
.where("id", row.id)
.patch({
enabled: 0,
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("stream", row).then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "disabled",
object_type: "stream",
object_id: row.id,
meta: _.omit(row, omissions()),
});
});
})
.then(() => {
return true;
});
},
/**
* All Streams
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: (access, expand, search_query) => {
return access
.can("streams:list")
.then((access_data) => {
const query = streamModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph(streamModel.defaultAllowGraph)
.orderBy("incoming_port", "ASC");
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
// Query is used for searching
if (typeof search_query === "string" && search_query.length > 0) {
query.where(function () {
this.where(castJsonIfNeed("incoming_port"), "like", `%${search_query}%`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
return query.then(utils.omitRows(omissions()));
})
.then((rows) => {
if (typeof expand !== "undefined" && expand !== null && expand.indexOf("certificate") !== -1) {
return internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
});
},
/**
* Report use
*
* @param {Number} user_id
* @param {String} visibility
* @returns {Promise}
*/
getCount: (user_id, visibility) => {
const query = streamModel.query().count("id AS count").where("is_deleted", 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", user_id);
}
return query.first().then((row) => {
return Number.parseInt(row.count, 10);
});
},
};
export default internalStream;

View File

@@ -1,270 +0,0 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { parseDatePeriod } from "../lib/helpers.js";
import authModel from "../models/auth.js";
import TokenModel from "../models/token.js";
import userModel from "../models/user.js";
import twoFactor from "./2fa.js";
const ERROR_MESSAGE_INVALID_AUTH = "Invalid email or password";
const ERROR_MESSAGE_INVALID_AUTH_I18N = "error.invalid-auth";
const ERROR_MESSAGE_INVALID_2FA = "Invalid verification code";
const ERROR_MESSAGE_INVALID_2FA_I18N = "error.invalid-2fa";
export default {
/**
* @param {Object} data
* @param {String} data.identity
* @param {String} data.secret
* @param {String} [data.scope]
* @param {String} [data.expiry]
* @param {String} [issuer]
* @returns {Promise}
*/
getTokenFromEmail: async (data, issuer) => {
const Token = TokenModel();
data.scope = data.scope || "user";
data.expiry = data.expiry || "1d";
const user = await userModel
.query()
.where("email", data.identity.toLowerCase().trim())
.andWhere("is_deleted", 0)
.andWhere("is_disabled", 0)
.first();
if (!user) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_AUTH);
}
const auth = await authModel.query().where("user_id", "=", user.id).where("type", "=", "password").first();
if (!auth) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_AUTH);
}
const valid = await auth.verifyPassword(data.secret);
if (!valid) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_AUTH, ERROR_MESSAGE_INVALID_AUTH_I18N);
}
if (data.scope !== "user" && _.indexOf(user.roles, data.scope) === -1) {
// The scope requested doesn't exist as a role against the user,
// you shall not pass.
throw new errs.AuthError(`Invalid scope: ${data.scope}`);
}
// Check if 2FA is enabled
const has2FA = await twoFactor.isEnabled(user.id);
if (has2FA) {
// Return challenge token instead of full token
const challengeToken = await Token.create({
iss: issuer || "api",
attrs: {
id: user.id,
},
scope: ["2fa-challenge"],
expiresIn: "5m",
});
return {
requires_2fa: true,
challenge_token: challengeToken.token,
};
}
// Create a dayjs of the expiry expression
const expiry = parseDatePeriod(data.expiry);
if (expiry === null) {
throw new errs.AuthError(`Invalid expiry time: ${data.expiry}`);
}
const signed = await Token.create({
iss: issuer || "api",
attrs: {
id: user.id,
},
scope: [data.scope],
expiresIn: data.expiry,
});
return {
token: signed.token,
expires: expiry.toISOString(),
};
},
/**
* @param {Object} data
* @param {String} data.identity
* @returns {Promise}
*/
getTokenFromOAuthClaim: async (data) => {
const Token = TokenModel();
data.scope = "user";
data.expiry = "1d";
const user = await userModel
.query()
.where("email", data.identity.toLowerCase().trim())
.andWhere("is_deleted", 0)
.andWhere("is_disabled", 0)
.first();
if (!user) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_AUTH);
}
// Create a dayjs of the expiry expression
const expiry = parseDatePeriod(data.expiry);
if (expiry === null) {
throw new errs.AuthError(`Invalid expiry time: ${data.expiry}`);
}
const signed = await Token.create({
iss: "api",
attrs: {
id: user.id,
},
scope: [data.scope],
expiresIn: data.expiry,
});
return {
token: signed.token,
expires: expiry.toISOString(),
};
},
/**
* @param {Access} access
* @param {Object} [data]
* @param {String} [data.expiry]
* @param {String} [data.scope] Only considered if existing token scope is admin
* @returns {Promise}
*/
getFreshToken: async (access, data) => {
const Token = TokenModel();
const thisData = data || {};
thisData.expiry = thisData.expiry || "1d";
if (access?.token.getUserId(0)) {
// Create a dayjs of the expiry expression
const expiry = parseDatePeriod(thisData.expiry);
if (expiry === null) {
throw new errs.AuthError(`Invalid expiry time: ${thisData.expiry}`);
}
const token_attrs = {
id: access.token.getUserId(0),
};
// Only admins can request otherwise scoped tokens
let scope = access.token.get("scope");
if (thisData.scope && access.token.hasScope("admin")) {
scope = [thisData.scope];
if (thisData.scope === "job-board" || thisData.scope === "worker") {
token_attrs.id = 0;
}
}
const signed = await Token.create({
iss: "api",
scope: scope,
attrs: token_attrs,
expiresIn: thisData.expiry,
});
return {
token: signed.token,
expires: expiry.toISOString(),
};
}
throw new errs.AssertionFailedError("Existing token contained invalid user data");
},
/**
* Verify 2FA code and return full token
* @param {string} challengeToken
* @param {string} code
* @param {string} [expiry]
* @returns {Promise}
*/
verify2FA: async (challengeToken, code, expiry) => {
const Token = TokenModel();
const tokenExpiry = expiry || "1d";
// Verify challenge token
let tokenData;
try {
tokenData = await Token.load(challengeToken);
} catch {
throw new errs.AuthError("Invalid or expired challenge token");
}
// Check scope
if (!tokenData.scope || tokenData.scope[0] !== "2fa-challenge") {
throw new errs.AuthError("Invalid challenge token");
}
const userId = tokenData.attrs?.id;
if (!userId) {
throw new errs.AuthError("Invalid challenge token");
}
// Verify 2FA code
const valid = await twoFactor.verifyForLogin(userId, code);
if (!valid) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_2FA, ERROR_MESSAGE_INVALID_2FA_I18N);
}
// Create full token
const expiryDate = parseDatePeriod(tokenExpiry);
if (expiryDate === null) {
throw new errs.AuthError(`Invalid expiry time: ${tokenExpiry}`);
}
const signed = await Token.create({
iss: "api",
attrs: {
id: userId,
},
scope: ["user"],
expiresIn: tokenExpiry,
});
return {
token: signed.token,
expires: expiryDate.toISOString(),
};
},
/**
* @param {Object} user
* @returns {Promise}
*/
getTokenFromUser: async (user) => {
const expire = "1d";
const Token = TokenModel();
const expiry = parseDatePeriod(expire);
const signed = await Token.create({
iss: "api",
attrs: {
id: user.id,
},
scope: ["user"],
expiresIn: expire,
});
return {
token: signed.token,
expires: expiry.toISOString(),
user: user,
};
},
};

View File

@@ -1,592 +0,0 @@
import _ from "lodash";
import crypto from "node:crypto";
import fs from "node:fs";
import { pipeline } from "node:stream/promises";
import errs from "../lib/error.js";
import utils from "../lib/utils.js";
import { gravatar as logger } from "../logger.js";
import authModel from "../models/auth.js";
import userModel from "../models/user.js";
import userPermissionModel from "../models/user_permission.js";
import internalAuditLog from "./audit-log.js";
import internalToken from "./token.js";
import pjson from "../package.json" with { type: "json" };
const omissions = () => {
return ["is_deleted", "permissions.id", "permissions.user_id", "permissions.created_on", "permissions.modified_on"];
};
const internalUser = {
/**
* Create a user can happen unauthenticated only once and only when no active users exist.
* Otherwise, a valid auth method is required.
*
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: async (access, data) => {
const auth = data.auth || null;
delete data.auth;
data.avatar = data.avatar || "";
data.roles = data.roles || [];
data.email = data.email.toLowerCase().trim();
internalUser.isEmailAvailable(data.email).then((available) => {
if (!available) {
throw new errs.ValidationError(`Email address already in use - ${data.email}`);
}
});
if (typeof data.is_disabled !== "undefined") {
data.is_disabled = data.is_disabled ? 1 : 0;
}
await access.can("users:create", data);
if (process.env.DISABLE_GRAVATAR === "true") {
data.avatar = "/images/default-avatar.jpg";
} else {
try {
const hash = crypto.createHash("sha256").update(data.email.trim().toLowerCase()).digest("hex");
const response = await fetch(
`https://www.gravatar.com/avatar/${hash}?s=64&default=initials&name=${encodeURIComponent(
data.name
.split(" ")
.map((n) => n[0])
.join(""),
)}`,
{
headers: {
"User-Agent": `NPMplus/${pjson.version}`,
},
},
);
if (!response.ok) throw new Error(`Status code: ${response.status}`);
let ext;
switch (response.headers.get("content-type")) {
case "image/png":
ext = "png";
break;
case "image/jpeg":
ext = "jpeg";
break;
case "image/gif":
ext = "gif";
break;
default:
throw new Error();
}
await pipeline(response.body, fs.createWriteStream(`/data/npmplus/gravatar/${hash}.${ext}`));
data.avatar = `/images/gravatar/${hash}.${ext}`;
} catch (err) {
logger.error(`Error downloading gravatar: ${err.message}`);
data.avatar = "/images/default-avatar.jpg";
}
}
let user = await userModel.query().insertAndFetch(data).then(utils.omitRow(omissions()));
if (auth) {
user = await authModel.query().insert({
user_id: user.id,
type: auth.type,
secret: auth.secret,
meta: {},
});
}
// Create permissions row as well
const isAdmin = data.roles.indexOf("admin") !== -1;
await userPermissionModel.query().insert({
user_id: user.id,
visibility: isAdmin ? "all" : "user",
proxy_hosts: "manage",
redirection_hosts: "manage",
dead_hosts: "manage",
streams: "manage",
access_lists: "manage",
certificates: "manage",
});
user = await internalUser.get(access, { id: user.id, expand: ["permissions"] });
await internalAuditLog.add(access, {
action: "created",
object_type: "user",
object_id: user.id,
meta: user,
});
return user;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Integer} data.id
* @param {String} [data.email]
* @param {String} [data.name]
* @return {Promise}
*/
update: (access, data) => {
if (typeof data.is_disabled !== "undefined") {
data.is_disabled = data.is_disabled ? 1 : 0;
}
return access
.can("users:update", data.id)
.then(() => {
// Make sure that the user being updated doesn't change their email to another user that is already using it
// 1. get user we want to update
return internalUser.get(access, { id: data.id }).then((user) => {
// 2. if email is to be changed, find other users with that email
if (typeof data.email !== "undefined") {
data.email = data.email.toLowerCase().trim();
if (user.email !== data.email) {
return internalUser.isEmailAvailable(data.email, data.id).then((available) => {
if (!available) {
throw new errs.ValidationError(`Email address already in use - ${data.email}`);
}
return user;
});
}
}
// No change to email:
return user;
});
})
.then(async (user) => {
if (user.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`User could not be updated, IDs do not match: ${user.id} !== ${data.id}`,
);
}
if (process.env.DISABLE_GRAVATAR === "true") {
data.avatar = "/images/default-avatar.jpg";
} else {
try {
const hash = crypto
.createHash("sha256")
.update((data.email || user.email).trim().toLowerCase())
.digest("hex");
const response = await fetch(
`https://www.gravatar.com/avatar/${hash}?s=64&default=initials&name=${encodeURIComponent(
(data.name || user.name)
.split(" ")
.map((n) => n[0])
.join(""),
)}`,
{
headers: {
"User-Agent": `NPMplus/${pjson.version}`,
},
},
);
if (!response.ok) throw new Error(`Status code: ${response.status}`);
let ext;
switch (response.headers.get("content-type")) {
case "image/png":
ext = "png";
break;
case "image/jpeg":
ext = "jpg";
break;
case "image/gif":
ext = "gif";
break;
default:
throw new Error();
}
await pipeline(response.body, fs.createWriteStream(`/data/npmplus/gravatar/${hash}.${ext}`));
data.avatar = `/images/gravatar/${hash}.${ext}`;
} catch (err) {
logger.error(`Error downloading gravatar: ${err.message}`);
data.avatar = "/images/default-avatar.jpg";
}
}
return userModel.query().patchAndFetchById(user.id, data).then(utils.omitRow(omissions()));
})
.then(() => {
return internalUser.get(access, { id: data.id });
})
.then((user) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "updated",
object_type: "user",
object_id: user.id,
meta: { ...data, id: user.id, name: user.name },
})
.then(() => {
return user;
});
});
},
/**
* @param {Access} access
* @param {Object} [data]
* @param {Integer} [data.id] Defaults to the token user
* @param {Array} [data.expand]
* @param {Array} [data.omit]
* @return {Promise}
*/
get: (access, data) => {
const thisData = data || {};
if (typeof thisData.id === "undefined" || !thisData.id) {
thisData.id = access.token.getUserId(0);
}
return access
.can("users:get", thisData.id)
.then(() => {
const query = userModel
.query()
.where("is_deleted", 0)
.andWhere("id", thisData.id)
.allowGraph("[permissions]")
.first();
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(thisData.id);
}
// Custom omissions
if (typeof thisData.omit !== "undefined" && thisData.omit !== null) {
return _.omit(row, thisData.omit);
}
if (row.avatar === "") {
row.avatar = "/images/default-avatar.jpg";
}
return row;
});
},
/**
* Checks if an email address is available, but if a user_id is supplied, it will ignore checking
* against that user.
*
* @param email
* @param user_id
*/
isEmailAvailable: (email, user_id) => {
const query = userModel.query().where("email", "=", email.toLowerCase().trim()).where("is_deleted", 0).first();
if (typeof user_id !== "undefined") {
query.where("id", "!=", user_id);
}
return query.then((user) => {
return !user;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Integer} data.id
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: (access, data) => {
return access
.can("users:delete", data.id)
.then(() => {
return internalUser.get(access, { id: data.id });
})
.then((user) => {
if (!user) {
throw new errs.ItemNotFoundError(data.id);
}
// Make sure user can't delete themselves
if (user.id === access.token.getUserId(0)) {
throw new errs.PermissionError("You cannot delete yourself.");
}
return userModel
.query()
.where("id", user.id)
.patch({
is_deleted: 1,
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "deleted",
object_type: "user",
object_id: user.id,
meta: _.omit(user, omissions()),
});
});
})
.then(() => {
return true;
});
},
deleteAll: async () => {
await userModel.query().patch({
is_deleted: 1,
});
},
/**
* This will only count the users
*
* @param {Access} access
* @param {String} [search_query]
* @returns {*}
*/
getCount: (access, search_query) => {
return access
.can("users:list")
.then(() => {
const query = userModel.query().count("id as count").where("is_deleted", 0).first();
// Query is used for searching
if (typeof search_query === "string") {
query.where(function () {
this.where("user.name", "like", `%${search_query}%`).orWhere(
"user.email",
"like",
`%${search_query}%`,
);
});
}
return query;
})
.then((row) => {
return Number.parseInt(row.count, 10);
});
},
/**
* All users
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: async (access, expand, search_query) => {
await access.can("users:list");
const query = userModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph("[permissions]")
.orderBy("name", "ASC");
// Query is used for searching
if (typeof search_query === "string") {
query.where(function () {
this.where("name", "like", `%${search_query}%`).orWhere("email", "like", `%${search_query}%`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const res = await query;
return utils.omitRows(omissions())(res);
},
/**
* @param {Access} access
* @param {Integer} [id_requested]
* @returns {[String]}
*/
getUserOmisionsByAccess: (access, idRequested) => {
let response = []; // Admin response
if (!access.token.hasScope("admin") && access.token.getUserId(0) !== idRequested) {
response = ["is_deleted"]; // Restricted response
}
return response;
},
/**
* @param {Access} access
* @param {Object} data
* @param {Integer} data.id
* @param {String} data.type
* @param {String} data.secret
* @return {Promise}
*/
setPassword: (access, data) => {
return access
.can("users:password", data.id)
.then(() => {
return internalUser.get(access, { id: data.id });
})
.then((user) => {
if (user.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`User could not be updated, IDs do not match: ${user.id} !== ${data.id}`,
);
}
if (user.id === access.token.getUserId(0)) {
// they're setting their own password. Make sure their current password is correct
if (typeof data.current === "undefined" || !data.current) {
throw new errs.ValidationError("Current password was not supplied");
}
return internalToken
.getTokenFromEmail({
identity: user.email,
secret: data.current,
})
.then(() => {
return user;
});
}
return user;
})
.then((user) => {
// Get auth, patch if it exists
return authModel
.query()
.where("user_id", user.id)
.andWhere("type", data.type)
.first()
.then((existing_auth) => {
if (existing_auth) {
// patch
return authModel.query().where("user_id", user.id).andWhere("type", data.type).patch({
type: data.type, // This is required for the model to encrypt on save
secret: data.secret,
});
}
// insert
return authModel.query().insert({
user_id: user.id,
type: data.type,
secret: data.secret,
meta: {},
});
})
.then(() => {
// Add to Audit Log
return internalAuditLog.add(access, {
action: "updated",
object_type: "user",
object_id: user.id,
meta: {
name: user.name,
password_changed: true,
auth_type: data.type,
},
});
});
})
.then(() => {
return true;
});
},
/**
* @param {Access} access
* @param {Object} data
* @return {Promise}
*/
setPermissions: (access, data) => {
return access
.can("users:permissions", data.id)
.then(() => {
return internalUser.get(access, { id: data.id });
})
.then((user) => {
if (user.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`User could not be updated, IDs do not match: ${user.id} !== ${data.id}`,
);
}
return user;
})
.then((user) => {
// Get perms row, patch if it exists
return userPermissionModel
.query()
.where("user_id", user.id)
.first()
.then((existing_auth) => {
if (existing_auth) {
// patch
return userPermissionModel
.query()
.where("user_id", user.id)
.patchAndFetchById(existing_auth.id, _.assign({ user_id: user.id }, data));
}
// insert
return userPermissionModel.query().insertAndFetch(_.assign({ user_id: user.id }, data));
})
.then((permissions) => {
// Add to Audit Log
return internalAuditLog.add(access, {
action: "updated",
object_type: "user",
object_id: user.id,
meta: {
name: user.name,
permissions: permissions,
},
});
});
})
.then(() => {
return true;
});
},
/**
* @param {Access} access
* @param {Object} data
* @param {Integer} data.id
*/
loginAs: (access, data) => {
return access
.can("users:loginas", data.id)
.then(() => {
return internalUser.get(access, data);
})
.then((user) => {
return internalToken.getTokenFromUser(user);
});
},
};
export default internalUser;

View File

@@ -1,19 +0,0 @@
module.exports = {
development: {
client: "mysql2",
migrations: {
tableName: "migrations",
stub: "lib/migrate_template.js",
directory: "migrations",
},
},
production: {
client: "mysql2",
migrations: {
tableName: "migrations",
stub: "lib/migrate_template.js",
directory: "migrations",
},
},
};

View File

@@ -1,275 +0,0 @@
/**
* Some Notes: This is a friggin complicated piece of code.
*
* "scope" in this file means "where did this token come from and what is using it", so 99% of the time
* the "scope" is going to be "user" because it would be a user token. This is not to be confused with
* the "role" which could be "user" or "admin". The scope in fact, could be "worker" or anything else.
*/
import fs from "node:fs";
import { dirname } from "node:path";
import { fileURLToPath } from "node:url";
import Ajv from "ajv/dist/2020.js";
import _ from "lodash";
import { access as logger } from "../logger.js";
import proxyHostModel from "../models/proxy_host.js";
import TokenModel from "../models/token.js";
import userModel from "../models/user.js";
import permsSchema from "./access/permissions.json" with { type: "json" };
import roleSchema from "./access/roles.json" with { type: "json" };
import errs from "./error.js";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
export default function (tokenString) {
const Token = TokenModel();
let tokenData = null;
let initialised = false;
const objectCache = {};
let allowInternalAccess = false;
let userRoles = [];
let permissions = {};
/**
* Loads the Token object from the token string
*
* @returns {Promise}
*/
this.init = async () => {
if (initialised) {
return;
}
if (!tokenString) {
throw new errs.PermissionError("Permission Denied");
}
tokenData = await Token.load(tokenString);
// At this point we need to load the user from the DB and make sure they:
// - exist (and not soft deleted)
// - still have the appropriate scopes for this token
// This is only required when the User ID is supplied or if the token scope has `user`
if (
tokenData.attrs.id ||
(typeof tokenData.scope !== "undefined" && _.indexOf(tokenData.scope, "user") !== -1)
) {
// Has token user id or token user scope
const user = await userModel
.query()
.where("id", tokenData.attrs.id)
.andWhere("is_deleted", 0)
.andWhere("is_disabled", 0)
.allowGraph("[permissions]")
.withGraphFetched("[permissions]")
.first();
if (user) {
// make sure user has all scopes of the token
// The `user` role is not added against the user row, so we have to just add it here to get past this check.
user.roles.push("user");
let ok = true;
_.forEach(tokenData.scope, (scope_item) => {
if (_.indexOf(user.roles, scope_item) === -1) {
ok = false;
}
});
if (!ok) {
throw new errs.AuthError("Invalid token scope for User");
}
initialised = true;
userRoles = user.roles;
permissions = user.permissions;
} else {
throw new errs.AuthError("User cannot be loaded for Token");
}
}
initialised = true;
};
/**
* Fetches the object ids from the database, only once per object type, for this token.
* This only applies to USER token scopes, as all other tokens are not really bound
* by object scopes
*
* @param {String} objectType
* @returns {Promise}
*/
this.loadObjects = async (objectType) => {
let objects = null;
if (Token.hasScope("user")) {
if (typeof tokenData.attrs.id === "undefined" || !tokenData.attrs.id) {
throw new errs.AuthError("User Token supplied without a User ID");
}
const tokenUserId = tokenData.attrs.id ? tokenData.attrs.id : 0;
if (typeof objectCache[objectType] !== "undefined") {
objects = objectCache[objectType];
} else {
switch (objectType) {
// USERS - should only return yourself
case "users":
objects = tokenUserId ? [tokenUserId] : [];
break;
// Proxy Hosts
case "proxy_hosts": {
const query = proxyHostModel.query().select("id").andWhere("is_deleted", 0);
if (permissions.visibility === "user") {
query.andWhere("owner_user_id", tokenUserId);
}
const rows = await query;
objects = [];
_.forEach(rows, (ruleRow) => {
objects.push(ruleRow.id);
});
// enum should not have less than 1 item
if (!objects.length) {
objects.push(0);
}
break;
}
}
objectCache[objectType] = objects;
}
}
return objects;
};
/**
* Creates a schema object on the fly with the IDs and other values required to be checked against the permissionSchema
*
* @param {String} permissionLabel
* @returns {Object}
*/
this.getObjectSchema = async (permissionLabel) => {
const baseObjectType = permissionLabel.split(":").shift();
const schema = {
$id: "objects",
description: "Actor Properties",
type: "object",
additionalProperties: false,
properties: {
user_id: {
anyOf: [
{
type: "number",
enum: [Token.get("attrs").id],
},
],
},
scope: {
type: "string",
pattern: `^${Token.get("scope")}$`,
},
},
};
const result = await this.loadObjects(baseObjectType);
if (typeof result === "object" && result !== null) {
schema.properties[baseObjectType] = {
type: "number",
enum: result,
minimum: 1,
};
} else {
schema.properties[baseObjectType] = {
type: "number",
minimum: 1,
};
}
return schema;
};
// here:
return {
token: Token,
/**
*
* @param {Boolean} [allowInternal]
* @returns {Promise}
*/
load: async (allowInternal) => {
if (tokenString) {
return await Token.load(tokenString);
}
allowInternalAccess = allowInternal;
return allowInternal || null;
},
reloadObjects: this.loadObjects,
/**
*
* @param {String} permission
* @param {*} [data]
* @returns {Promise}
*/
can: async (permission, data) => {
if (allowInternalAccess === true) {
return true;
}
try {
await this.init();
const objectSchema = await this.getObjectSchema(permission);
const dataSchema = {
[permission]: {
data: data,
scope: Token.get("scope"),
roles: userRoles,
permission_visibility: permissions.visibility,
permission_proxy_hosts: permissions.proxy_hosts,
permission_redirection_hosts: permissions.redirection_hosts,
permission_dead_hosts: permissions.dead_hosts,
permission_streams: permissions.streams,
permission_access_lists: permissions.access_lists,
permission_certificates: permissions.certificates,
},
};
const permissionSchema = {
$async: true,
$id: "permissions",
type: "object",
additionalProperties: false,
properties: {},
};
const rawData = fs.readFileSync(`${__dirname}/access/${permission.replace(/:/gim, "-")}.json`, {
encoding: "utf8",
});
permissionSchema.properties[permission] = JSON.parse(rawData);
const ajv = new Ajv({
verbose: true,
allErrors: true,
breakOnError: true,
coerceTypes: true,
schemas: [roleSchema, permsSchema, objectSchema, permissionSchema],
});
const valid = await ajv.validate("permissions", dataSchema);
return valid && dataSchema[permission];
} catch (err) {
err.permission = permission;
err.permission_data = data;
logger.error(permission, data, err.message);
throw errs.PermissionError("Permission Denied", err);
}
},
};
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,13 +0,0 @@
{
"$id": "perms",
"definitions": {
"view": {
"type": "string",
"pattern": "^(view|manage)$"
},
"manage": {
"type": "string",
"pattern": "^(manage)$"
}
}
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/user"
}
]
}

View File

@@ -1,43 +0,0 @@
{
"$id": "roles",
"definitions": {
"admin": {
"type": "object",
"required": [
"scope",
"roles"
],
"properties": {
"scope": {
"type": "array",
"contains": {
"type": "string",
"pattern": "^user$"
}
},
"roles": {
"type": "array",
"contains": {
"type": "string",
"pattern": "^admin$"
}
}
}
},
"user": {
"type": "object",
"required": [
"scope"
],
"properties": {
"scope": {
"type": "array",
"contains": {
"type": "string",
"pattern": "^user$"
}
}
}
}
}
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/view"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,28 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/manage"
},
"roles": {
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
}
}
}
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,26 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"data",
"scope"
],
"properties": {
"data": {
"$ref": "objects#/properties/users"
},
"scope": {
"type": "array",
"contains": {
"type": "string",
"pattern": "^user$"
}
}
}
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,26 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"data",
"scope"
],
"properties": {
"data": {
"$ref": "objects#/properties/users"
},
"scope": {
"type": "array",
"contains": {
"type": "string",
"pattern": "^user$"
}
}
}
}
]
}

View File

@@ -1,7 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
}
]
}

View File

@@ -1,26 +0,0 @@
{
"anyOf": [
{
"$ref": "roles#/definitions/admin"
},
{
"type": "object",
"required": [
"data",
"scope"
],
"properties": {
"data": {
"$ref": "objects#/properties/users"
},
"scope": {
"type": "array",
"contains": {
"type": "string",
"pattern": "^user$"
}
}
}
}
]
}

View File

@@ -1,58 +0,0 @@
import dnsPlugins from "../certbot/dns-plugins.json" with { type: "json" };
import { certbot as logger } from "../logger.js";
import errs from "./error.js";
import utils from "./utils.js";
/**
* Installs a cerbot plugin given the key for the object from
* ../certbot/dns-plugins.json
*
* @param {string} pluginKey
* @returns {Object}
*/
const installPlugin = async (pluginKey) => {
if (typeof dnsPlugins[pluginKey] === "undefined") {
// throw Error(`Certbot plugin ${pluginKey} not found`);
throw new errs.ItemNotFoundError(pluginKey);
}
const plugin = dnsPlugins[pluginKey];
logger.start(`Installing ${pluginKey}...`);
return utils
.execFile("pip", ["install", "--upgrade", "--no-cache-dir", plugin.package_name])
.then((result) => {
logger.complete(`Installed ${pluginKey}`);
return result;
})
.catch((err) => {
throw err;
});
};
/**
* @param {array} pluginKeys
*/
const installPlugins = async (pluginKeys) => {
if (pluginKeys.length === 0) {
return;
}
let hasErrors = false;
for (const pluginKey of pluginKeys) {
try {
await installPlugin(pluginKey);
} catch (err) {
hasErrors = true;
logger.error(err.message);
break;
}
}
if (hasErrors) {
throw new errs.CommandError("Some plugins failed to install. Please check the logs above", 1);
}
};
export { installPlugins, installPlugin };

Some files were not shown because too many files have changed in this diff Show More