Compare commits

..

1 Commits

Author SHA1 Message Date
renovate[bot]
5d7e6a83e3 dep updates/enable ssl_dyn_rec_enable/fix nginx in background/remove tempwrite
Signed-off-by: Zoey <zoey@z0ey.de>
2023-07-08 17:02:08 +02:00
981 changed files with 32556 additions and 61217 deletions

View File

@@ -1 +0,0 @@
*/node_modules

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
* text=auto

View File

@@ -1,4 +1,4 @@
exclude:
exclude:
- main
- stable
- develop

View File

@@ -1,29 +1,29 @@
name: Format Caddyfile
name: caddy-fmt
on:
push:
branches:
- develop
paths:
- .github/workflows/caddy-fmt.yml
- caddy/Dockerfile
- caddy/Caddyfile
- Caddy.Dockerfile
- Caddyfile
workflow_dispatch:
jobs:
caddy-fmt:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@v3
- name: Read version
id: version
run: echo "version=$(cat caddy/Dockerfile | grep "^COPY --from=caddy:.*$" | head -1 | sed "s|COPY --from=caddy:\([0-9.]\+\).*|\1|g")" >> $GITHUB_OUTPUT
run: echo "version=$(cat Caddy.Dockerfile | grep -wE "FROM caddy:*" | head -1 | sed "s|FROM caddy:||g")" >> $GITHUB_OUTPUT
- name: caddy-fmt
run: |
docker run --rm -v ${{ github.workspace }}/caddy/Caddyfile:/etc/caddy/Caddyfile caddy:${{ steps.version.outputs.version }} caddy fmt --overwrite /etc/caddy/Caddyfile
docker run --rm -v ${{ github.workspace }}/Caddyfile:/etc/caddy/Caddyfile caddy:${{ steps.version.outputs.version }} caddy fmt --overwrite /etc/caddy/Caddyfile
- name: push changes
run: |
git add caddy/Caddyfile
git add -A
git config user.name "GitHub"
git config user.email "noreply@github.com"
git diff-index --quiet HEAD || git commit -sm "caddy fmt"
git diff-index --quiet HEAD || git commit -sm "caddy-fmt"
git push

30
.github/workflows/caddy-latest.yml vendored Normal file
View File

@@ -0,0 +1,30 @@
name: Docker push Caddy develop to latest
on:
workflow_dispatch:
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert Username
id: un
run: echo "un=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ steps.un.outputs.un }}
password: ${{ github.token }}
- name: Push develop to latest
run: |
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:caddy ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:caddy-${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:caddy ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:caddy-${{ github.ref_name }}
- name: Show Caddy version
run: |
docker run --rm --entrypoint caddy ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:caddy version
docker run --rm --entrypoint caddy ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:caddy version

View File

@@ -5,42 +5,46 @@ on:
- develop
paths:
- .github/workflows/caddy.yml
- caddy/Dockerfile
- caddy/Caddyfile
- Caddy.Dockerfile
- Caddyfile
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@ce360397dd3f832beb865e1373c09c0e9f86d70a # v4
uses: docker/setup-qemu-action@v2
with:
platforms: all
platforms: arm64 #all
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
uses: docker/setup-buildx-action@v2
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert Username
id: un
run: echo "un=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
uses: docker/login-action@v2
with:
registry: ghcr.io
username: zoeyvid
username: ${{ steps.un.outputs.un }}
password: ${{ github.token }}
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
uses: docker/build-push-action@v4
if: ${{ github.event_name != 'pull_request' }}
with:
context: caddy
platforms: linux/amd64,linux/arm64
push: true
context: .
file: ./Caddy.Dockerfile
platforms: linux/amd64,linux/arm64 #,linux/amd64/v2,linux/amd64/v3,linux/amd64/v4 #,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6
push: ${{ github.event_name != 'pull_request' }}
tags: |
zoeyvid/npmplus:caddy
ghcr.io/zoeyvid/npmplus:caddy
zoeyvid/npmplus:caddy-${{ github.run_number }}
ghcr.io/zoeyvid/npmplus:caddy-${{ github.run_number }}
${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:caddy-${{ github.ref_name }}
ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:caddy-${{ github.ref_name }}

View File

@@ -1,563 +0,0 @@
name: dependency-updates
on:
push:
branches:
- develop
schedule:
- cron: "0 */3 * * *"
workflow_dispatch:
jobs:
nginx-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update nginx version
id: update
run: |
NGINX_VER="$(
git ls-remote --tags https://github.com/nginx/nginx \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NGINX_VER=.*|ARG NGINX_VER=$NGINX_VER|" Dockerfile
echo "version=$NGINX_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update nginx version to ${{ steps.update.outputs.version }}
branch: update-nginx-version
title: update nginx version to ${{ steps.update.outputs.version }}
body: update nginx version to ${{ steps.update.outputs.version }}
dynamic_tls_records-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update dynamic_tls_records version
id: update
run: |
git clone --depth 1 https://github.com/nginx-modules/ngx_http_tls_dyn_size ngx_http_tls_dyn_size
DTR_VER="$(
ls ngx_http_tls_dyn_size/nginx__dynamic_tls_records_*.patch \
| sed "s|ngx_http_tls_dyn_size/nginx__dynamic_tls_records_\([0-9.]\+\)+.patch|\1|g" \
| sort -V \
| tail -1
)"
rm -r ngx_http_tls_dyn_size
sed -i "s|ARG DTR_VER=.*|ARG DTR_VER=$DTR_VER|" Dockerfile
echo "version=$DTR_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update dynamic_tls_records version to ${{ steps.update.outputs.version }}
branch: update-dynamic_tls_records-version
title: update dynamic_tls_records version to ${{ steps.update.outputs.version }}
body: update dynamic_tls_records version to ${{ steps.update.outputs.version }}
resolver_conf_parsing-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update resolver_conf_parsing version
id: update
run: |
git clone --depth 1 https://github.com/openresty/openresty openresty
RCP_VER="$(
ls openresty/patches/nginx \
| sort -V \
| tail -1
)"
rm -r openresty
sed -i "s|ARG RCP_VER=.*|ARG RCP_VER=$RCP_VER|" Dockerfile
echo "version=$RCP_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update resolver_conf_parsing version to ${{ steps.update.outputs.version }}
branch: update-resolver_conf_parsing-version
title: update resolver_conf_parsing version to ${{ steps.update.outputs.version }}
body: update resolver_conf_parsing version to ${{ steps.update.outputs.version }}
zlib-ng-patch-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update zlib-ng-patch version
id: update
run: |
git clone --depth 1 https://github.com/zlib-ng/patches zlib-ng-patches
ZNP_VER="$(
ls zlib-ng-patches/nginx/*-zlib-ng.patch \
| sed "s|zlib-ng-patches/nginx/\([0-9.]\+\)-zlib-ng.patch|\1|g" \
| sort -V \
| tail -1
)"
rm -r zlib-ng-patches
sed -i "s|ARG ZNP_VER=.*|ARG ZNP_VER=$ZNP_VER|" Dockerfile
echo "version=$ZNP_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update zlib-ng-patch version to ${{ steps.update.outputs.version }}
branch: update-zlib-ng-patch-version
title: update zlib-ng-patch version to ${{ steps.update.outputs.version }}
body: update zlib-ng-patch version to ${{ steps.update.outputs.version }}
ngx_brotli-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_brotli version
id: update
run: |
NB_VER="$(
git ls-remote --tags https://github.com/google/ngx_brotli \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NB_VER=.*|ARG NB_VER=$NB_VER|" Dockerfile
echo "version=$NB_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != '' }}
with:
signoff: true
delete-branch: true
commit-message: update ngx_brotli version to ${{ steps.update.outputs.version }}
branch: update-ngx_brotli-version
title: update ngx_brotli version to ${{ steps.update.outputs.version }}
body: update ngx_brotli version to ${{ steps.update.outputs.version }}
ngx_unbrotli-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_unbrotli version
id: update
run: |
NUB_VER="$(
git ls-remote --tags https://github.com/clyfish/ngx_unbrotli \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NUB_VER=.*|ARG NUB_VER=$NUB_VER|" Dockerfile
echo "version=$NUB_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != '' }}
with:
signoff: true
delete-branch: true
commit-message: update ngx_unbrotli version to ${{ steps.update.outputs.version }}
branch: update-ngx_unbrotli-version
title: update ngx_unbrotli version to ${{ steps.update.outputs.version }}
body: update ngx_unbrotli version to ${{ steps.update.outputs.version }}
zstd-nginx-module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update zstd-nginx-module version
id: update
run: |
ZNM_VER="$(
git ls-remote --tags https://github.com/tokers/zstd-nginx-module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG ZNM_VER=.*|ARG ZNM_VER=$ZNM_VER|" Dockerfile
echo "version=$ZNM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != '0.1.1' }}
with:
signoff: true
delete-branch: true
commit-message: update zstd-nginx-module version to ${{ steps.update.outputs.version }}
branch: update-zstd-nginx-module-version
title: update zstd-nginx-module version to ${{ steps.update.outputs.version }}
body: update zstd-nginx-module version to ${{ steps.update.outputs.version }}
ngx_http_unzstd_filter_module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_http_unzstd_filter_module version
id: update
run: |
NHUZFM_VER="$(
git ls-remote --tags https://github.com/HanadaLee/ngx_http_unzstd_filter_module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NHUZFM_VER=.*|ARG NHUZFM_VER=$NHUZFM_VER|" Dockerfile
echo "version=$NHUZFM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != '' }}
with:
signoff: true
delete-branch: true
commit-message: update ngx_http_unzstd_filter_module version to ${{ steps.update.outputs.version }}
branch: update-ngx_http_unzstd_filter_module-version
title: update ngx_http_unzstd_filter_module version to ${{ steps.update.outputs.version }}
body: update ngx_http_unzstd_filter_module version to ${{ steps.update.outputs.version }}
ngx-fancyindex-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx-fancyindex version
id: update
run: |
NF_VER="$(
git ls-remote --tags https://github.com/aperezdc/ngx-fancyindex \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NF_VER=.*|ARG NF_VER=$NF_VER|" Dockerfile
echo "version=$NF_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != 'v0.5.2' }}
with:
signoff: true
delete-branch: true
commit-message: update ngx-fancyindex version to ${{ steps.update.outputs.version }}
branch: update-ngx-fancyindex-version
title: update ngx-fancyindex version to ${{ steps.update.outputs.version }}
body: update ngx-fancyindex version to ${{ steps.update.outputs.version }}
headers-more-nginx-module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update headers-more-nginx-module version
id: update
run: |
HMNM_VER="$(
git ls-remote --tags https://github.com/openresty/headers-more-nginx-module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG HMNM_VER=.*|ARG HMNM_VER=$HMNM_VER|" Dockerfile
echo "version=$HMNM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update headers-more-nginx-module version to ${{ steps.update.outputs.version }}
branch: update-headers-more-nginx-module-version
title: update headers-more-nginx-module version to ${{ steps.update.outputs.version }}
body: update headers-more-nginx-module version to ${{ steps.update.outputs.version }}
ngx_devel_kit-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_devel_kit version
id: update
run: |
NDK_VER="$(
git ls-remote --tags https://github.com/vision5/ngx_devel_kit \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NDK_VER=.*|ARG NDK_VER=$NDK_VER|" Dockerfile
echo "version=$NDK_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update ngx_devel_kit version to ${{ steps.update.outputs.version }}
branch: update-ngx_devel_kit-version
title: update ngx_devel_kit version to ${{ steps.update.outputs.version }}
body: update ngx_devel_kit version to ${{ steps.update.outputs.version }}
lua-nginx-module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update lua-nginx-module version
id: update
run: |
LNM_VER="$(
git ls-remote --tags https://github.com/openresty/lua-nginx-module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG LNM_VER=.*|ARG LNM_VER=$LNM_VER|" Dockerfile
echo "version=$LNM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update lua-nginx-module version to ${{ steps.update.outputs.version }}
branch: update-lua-nginx-module-version
title: update lua-nginx-module version to ${{ steps.update.outputs.version }}
body: update lua-nginx-module version to ${{ steps.update.outputs.version }}
njs-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update njs version
id: update
run: |
NJS_VER="$(
git ls-remote --tags https://github.com/nginx/njs \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NJS_VER=.*|ARG NJS_VER=$NJS_VER|" Dockerfile
echo "version=$NJS_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update njs version to ${{ steps.update.outputs.version }}
branch: update-njs-version
title: update njs version to ${{ steps.update.outputs.version }}
body: update njs version to ${{ steps.update.outputs.version }}
nginx-auth-ldap-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update nginx-auth-ldap version
id: update
run: |
NAL_VER="$(
git ls-remote --tags https://github.com/kvspb/nginx-auth-ldap \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NAL_VER=.*|ARG NAL_VER=$NAL_VER|" Dockerfile
echo "version=$NAL_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != 'v0.1' }}
with:
signoff: true
delete-branch: true
commit-message: update nginx-auth-ldap version to ${{ steps.update.outputs.version }}
branch: update-nginx-auth-ldap-version
title: update nginx-auth-ldap version to ${{ steps.update.outputs.version }}
body: update nginx-auth-ldap version to ${{ steps.update.outputs.version }}
nginx-module-vts-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update vts version
id: update
run: |
VTS_VER="$(
git ls-remote --tags https://github.com/vozlt/nginx-module-vts \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG VTS_VER=.*|ARG VTS_VER=$VTS_VER|" Dockerfile
echo "version=$VTS_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update vts version to ${{ steps.update.outputs.version }}
branch: update-vts-version
title: update vts version to ${{ steps.update.outputs.version }}
body: update vtsversion to ${{ steps.update.outputs.version }}
nginx-ntlm-module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update nginx-ntlm-module version
id: update
run: |
NNTLM_VER="$(
git ls-remote --tags https://github.com/gabihodoroaga/nginx-ntlm-module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NNTLM_VER=.*|ARG NNTLM_VER=$NNTLM_VER|" Dockerfile
echo "version=$NNTLM_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
if: ${{ steps.update.outputs.version != 'v1.19.3-beta.1' }}
with:
signoff: true
delete-branch: true
commit-message: update nginx-ntlm-module version to ${{ steps.update.outputs.version }}
branch: update-nginx-ntlm-module-version
title: update nginx-ntlm-module version to ${{ steps.update.outputs.version }}
body: update nginx-ntlm-module version to ${{ steps.update.outputs.version }}
ngx_http_geoip2_module-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update ngx_http_geoip2_module version
id: update
run: |
NHG2M_VER="$(
git ls-remote --tags https://github.com/leev/ngx_http_geoip2_module \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG NHG2M_VER=.*|ARG NHG2M_VER=$NHG2M_VER|" Dockerfile
echo "version=$NHG2M_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update ngx_http_geoip2_module version to ${{ steps.update.outputs.version }}
branch: update-ngx_http_geoip2_module-version
title: update ngx_http_geoip2_module version to ${{ steps.update.outputs.version }}
body: update ngx_http_geoip2_module version to ${{ steps.update.outputs.version }}
lua-resty-core-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update lua-resty-core version
id: update
run: |
LRC_VER="$(
git ls-remote --tags https://github.com/openresty/lua-resty-core \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG LRC_VER=.*|ARG LRC_VER=$LRC_VER|" Dockerfile
echo "version=$LRC_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update lua-resty-core version to ${{ steps.update.outputs.version }}
branch: update-lua-resty-core-version
title: update lua-resty-core version to ${{ steps.update.outputs.version }}
body: update lua-resty-core version to ${{ steps.update.outputs.version }}
lua-resty-lrucache-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update lua-resty-lrucache version
id: update
run: |
LRL_VER="$(
git ls-remote --tags https://github.com/openresty/lua-resty-lrucache \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed "s|\^{}||g"
)"
sed -i "s|ARG LRL_VER=.*|ARG LRL_VER=$LRL_VER|" Dockerfile
echo "version=$LRL_VER" >> $GITHUB_OUTPUT
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update lua-resty-lrucache version to ${{ steps.update.outputs.version }}
branch: update-lua-resty-lrucache-version
title: update lua-resty-lrucache version to ${{ steps.update.outputs.version }}
body: update lua-resty-lrucache version to ${{ steps.update.outputs.version }}
lua-cs-bouncer-update:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: update lua-cs-bouncer version
id: update
run: |
LCSB_VER="$(
git ls-remote --tags https://github.com/crowdsecurity/lua-cs-bouncer \
| cut -d/ -f3 \
| sort -V \
| grep -v rc \
| tail -1 \
| sed -E "s/\^\{\}//"
)"
sed -i "s|ARG LCSB_VER=.*|ARG LCSB_VER=$LCSB_VER|" Dockerfile
echo "version=$LCSB_VER" >> $GITHUB_OUTPUT
wget https://raw.githubusercontent.com/crowdsecurity/cs-nginx-bouncer/refs/heads/main/nginx/crowdsec_nginx.conf -O rootfs/usr/local/nginx/conf/conf.d/crowdsec.conf.original
wget https://raw.githubusercontent.com/crowdsecurity/lua-cs-bouncer/refs/tags/"$LCSB_VER"/config_example.conf -O rootfs/etc/crowdsec.conf.original
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0 # v8
with:
signoff: true
delete-branch: true
commit-message: update lua-cs-bouncer version to ${{ steps.update.outputs.version }}
branch: update-lua-cs-bouncer-version
title: update lua-cs-bouncer version to ${{ steps.update.outputs.version }}
body: update lua-cs-bouncer version to ${{ steps.update.outputs.version }}

View File

@@ -1,91 +0,0 @@
name: Build beta Docker Image
on:
workflow_dispatch:
inputs:
tag:
description: 'name of the beta'
required: true
type: string
jobs:
build-x86_64:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:beta-x86_64
build-args: |
FLAGS=-march=x86-64-v2 -mtune=generic -fcf-protection=full
build-aarch64:
runs-on: ubuntu-24.04-arm
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:beta-aarch64
build-args: |
FLAGS=-mbranch-protection=standard
merge:
runs-on: ubuntu-latest
needs: [build-x86_64, build-aarch64]
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Login to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: create multiarch
run: |
docker buildx imagetools create --tag zoeyvid/npmplus:beta ghcr.io/zoeyvid/npmplus:beta-x86_64 ghcr.io/zoeyvid/npmplus:beta-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:beta ghcr.io/zoeyvid/npmplus:beta-x86_64 ghcr.io/zoeyvid/npmplus:beta-aarch64
docker buildx imagetools create --tag zoeyvid/npmplus:${{ inputs.tag }} ghcr.io/zoeyvid/npmplus:beta-x86_64 ghcr.io/zoeyvid/npmplus:beta-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:${{ inputs.tag }} ghcr.io/zoeyvid/npmplus:beta-x86_64 ghcr.io/zoeyvid/npmplus:beta-aarch64

View File

@@ -1,95 +1,32 @@
name: Build latest Docker Image
name: Docker push develop to latest
on:
workflow_dispatch:
inputs:
tag:
description: 'name of the release'
required: true
type: string
jobs:
build-x86_64:
docker:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:latest-x86_64
build-args: |
FLAGS=-march=x86-64-v2 -mtune=generic -fcf-protection=full
build-aarch64:
runs-on: ubuntu-24.04-arm
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"${{ inputs.tag }}-$(git rev-parse --short HEAD)-$(cat .version)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:latest-aarch64
build-args: |
FLAGS=-mbranch-protection=standard
merge:
runs-on: ubuntu-latest
needs: [build-x86_64, build-aarch64]
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Login to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
if: github.event_name != 'pull_request'
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert Username
id: un
run: echo "un=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
uses: docker/login-action@v2
with:
registry: ghcr.io
username: zoeyvid
username: ${{ steps.un.outputs.un }}
password: ${{ github.token }}
- name: create multiarch
- name: Push develop to latest
run: |
docker buildx imagetools create --tag zoeyvid/npmplus:beta ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:beta ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag zoeyvid/npmplus:latest ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:latest ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag zoeyvid/npmplus:${{ inputs.tag }} ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:${{ inputs.tag }} ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag zoeyvid/nginx-proxy-manager:latest ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/nginx-proxy-manager:latest ghcr.io/zoeyvid/npmplus:latest-x86_64 ghcr.io/zoeyvid/npmplus:latest-aarch64
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:latest ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.ref_name }}
docker buildx imagetools create --tag ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.run_number }} ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:latest ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.ref_name }}
docker buildx imagetools create --tag ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.run_number }} ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.ref_name }}
- name: Show Nginx version
run: |
docker run --rm --entrypoint nginx ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:latest -V
docker run --rm --entrypoint nginx ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:latest -V

View File

@@ -3,85 +3,96 @@ on:
push:
branches:
- develop
paths:
- .github/workflows/docker.yml
- Dockerfile
- frontend/**
- backend/**
- global/**
- rootfs/**
pull_request:
paths:
- .github/workflows/docker.yml
- Dockerfile
- frontend/**
- backend/**
- global/**
- rootfs/**
workflow_dispatch:
jobs:
build-x86_64:
build:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
with:
platforms: arm64 #all
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
uses: docker/setup-buildx-action@v2
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"$(git rev-parse --short HEAD)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"$(git rev-parse --short HEAD)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:develop-x86_64
build-args: |
FLAGS=-march=x86-64-v2 -mtune=generic -fcf-protection=full
build-aarch64:
runs-on: ubuntu-24.04-arm
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3
with:
driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=-1
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
with:
registry: ghcr.io
username: zoeyvid
password: ${{ github.token }}
- name: version
run: |
sed -i "s|\"0.0.0\"|\"$(git rev-parse --short HEAD)\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"$(git rev-parse --short HEAD)\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6
with:
context: .
file: ./Dockerfile
push: true
tags: ghcr.io/zoeyvid/npmplus:develop-aarch64
build-args: |
FLAGS=-mbranch-protection=standard
merge:
runs-on: ubuntu-latest
needs: [build-x86_64, build-aarch64]
if: ${{ github.repository_owner == 'ZoeyVid' }}
steps:
- name: Login to DockerHub
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Convert Username
id: un
run: echo "un=$(echo "${{ github.repository_owner }}" | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Login to GitHub Container Registry
uses: docker/login-action@b45d80f862d83dbcd57f89517bcf500b2ab88fb2 # v4
uses: docker/login-action@v2
with:
registry: ghcr.io
username: zoeyvid
username: ${{ steps.un.outputs.un }}
password: ${{ github.token }}
- name: create multiarch
- name: version
run: |
docker buildx imagetools create --tag zoeyvid/npmplus:develop ghcr.io/zoeyvid/npmplus:develop-x86_64 ghcr.io/zoeyvid/npmplus:develop-aarch64
docker buildx imagetools create --tag ghcr.io/zoeyvid/npmplus:develop ghcr.io/zoeyvid/npmplus:develop-x86_64 ghcr.io/zoeyvid/npmplus:develop-aarch64
version="$(cat .version)+$(git rev-parse --short HEAD)"
sed -i "s|\"0.0.0\"|\"$version\"|g" frontend/js/i18n/messages.json
sed -i "s|\"0.0.0\"|\"$version\"|g" frontend/package.json
sed -i "s|\"0.0.0\"|\"$version\"|g" backend/package.json
- name: Build
uses: docker/build-push-action@v4
if: ${{ github.event_name != 'pull_request' }}
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64 #,linux/amd64/v2,linux/amd64/v3,linux/amd64/v4 #,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6
push: ${{ github.event_name != 'pull_request' }}
tags: |
${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.ref_name }}
ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.ref_name }}
build-args: |
"BUILD=${{ github.event.repository.name }}"
- name: show version
if: ${{ github.event_name != 'pull_request' }}
run: |
docker run --rm --entrypoint nginx ${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.ref_name }} -V
docker run --rm --entrypoint nginx ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ github.ref_name }} -V
- name: Set PR-Number (PR)
if: ${{ github.event_name == 'pull_request' }}
id: pr
run: echo "pr=$(echo pr-${{ github.ref_name }} | sed "s|refs/pull/:||g" | sed "s|/merge||g")" >> $GITHUB_OUTPUT
- name: Build (PR)
uses: docker/build-push-action@v4
if: ${{ github.event_name == 'pull_request' }}
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64 #,linux/amd64/v2,linux/amd64/v3,linux/amd64/v4 #,linux/ppc64le,linux/s390x,linux/386,linux/arm/v7,linux/arm/v6
push: ${{ github.event_name == 'pull_request' }}
tags: ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ steps.pr.outputs.pr }}
build-args: |
"BUILD=${{ github.event.repository.name }}"
- name: show version (PR)
if: ${{ github.event_name == 'pull_request' }}
run: docker run --rm --entrypoint nginx ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ steps.pr.outputs.pr }} -V
- name: add comment (PR)
uses: mshick/add-pr-comment@v2
if: ${{ github.event_name == 'pull_request' }}
with:
message: "The Docker Image can now be found here: `ghcr.io/${{ steps.un.outputs.un }}/${{ github.event.repository.name }}:${{ steps.pr.outputs.pr }}`"
repo-token: ${{ github.token }}

View File

@@ -1,30 +0,0 @@
name: Dockerlint
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
docker-lint:
runs-on: ubuntu-latest
name: docker-lint
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Install hadolint
run: |
sudo wget https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -O /usr/bin/hadolint
sudo chmod +x /usr/bin/hadolint
- name: run lint
run: |
DOCKERFILES="$(find . -name "*Dockerfile*")"
for file in $(echo "$DOCKERFILES" | tr " " "\n"); do
# DL3003 warning: Use WORKDIR to switch to a directory
# DL3018 warning: Pin versions in apk add. Instead of `apk add <package>` use `apk add <package>=<version>`
# DL3013 warning: Pin versions in pip. Instead of `pip install <package>` use `pip install <package>==<version>` or `pip install --requirement <requirements file>`
hadolint "$file" --ignore DL3003 --ignore DL3013 --ignore DL3018 | tee -a hadolint.log
done
if grep -q "DL[0-9]\+\|SC[0-9]\+" hadolint.log; then
exit 1
fi

34
.github/workflows/js.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
name: js
on:
push:
schedule:
- cron: "0 */6 * * *"
workflow_dispatch:
jobs:
js:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 19
- name: eslint
run: |
cd backend
yarn install --no-lockfile
yarn eslint . --fix
- name: update
run: |
curl -L https://unpkg.com/xregexp/xregexp-all.js -o rootfs/nftd/xregexp-all.js
curl -L https://unpkg.com/showdown/dist/showdown.min.js -o rootfs/nftd/showdown.min.js
curl -L https://code.jquery.com/jquery-"$(git ls-remote --tags https://github.com/jquery/jquery | cut -d/ -f3 | sort -V | tail -1 | sed -E "s/\^\{\}//")".min.js -o rootfs/nftd/jquery.min.js
curl -L https://cdn.jsdelivr.net/npm/bootstrap@"$(git ls-remote --tags https://github.com/twbs/bootstrap v3.3.* | cut -d/ -f3 | sort -V | tail -1 | sed -E "s/\^\{\}//")"/dist/css/bootstrap.min.css -o rootfs/html/404/bootstrap.min.css
curl -L https://cdn.jsdelivr.net/npm/bootstrap@"$(git ls-remote --tags https://github.com/twbs/bootstrap v3.3.* | cut -d/ -f3 | sort -V | tail -1 | sed -E "s/\^\{\}//")"/dist/css/bootstrap.min.css -o rootfs/html/default/bootstrap.min.css
- name: push changes
run: |
git add -A
git config user.name "GitHub"
git config user.email "noreply@github.com"
git diff-index --quiet HEAD || git commit -sm "js"
git push

View File

@@ -1,16 +1,14 @@
name: JSON check
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
test-json:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: actions/checkout@v3
- name: json-syntax-check
uses: limitusus/json-syntax-check@77d5756026b93886eaa3dc6ca1c4b17dd19dc703 # v2
uses: limitusus/json-syntax-check@v2
with:
pattern: "\\.json"
pattern: "\\.json$*"

View File

@@ -1,47 +0,0 @@
name: lint-and-format
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
lint-and-format:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6
with:
node-version: lts/*
- uses: pnpm/action-setup@41ff72655975bd51cab0327fa583b6e92b6d3061 # v4
with:
version: latest
- name: install-sponge
run: sudo apt-get install -y moreutils
- name: biome-backend
run: |
cd backend
pnpm install --frozen-lockfile
pnpm biome lint --write
pnpm biome format --write
- name: biome-frontend
run: |
cd frontend
pnpm install --frozen-lockfile
pnpm biome lint --write
pnpm biome format --write
pnpm formatjs compile-folder src/locale/src src/locale/lang
pnpm vitest
./src/locale/scripts/locale-sort.sh
- name: nginxbeautifier
run: |
pnpm add -g nginxbeautifier
nginxbeautifier -s 4 -r rootfs/usr/local/nginx/conf
- name: push changes
run: |
git add -A
git config user.name "GitHub"
git config user.email "noreply@github.com"
git commit -sm "update and lint" || true
git push || true

View File

@@ -1,8 +1,6 @@
name: Shellcheck
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
jobs:
@@ -10,10 +8,10 @@ jobs:
name: Check Shell
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- uses: actions/checkout@v3
- name: Run Shellcheck
uses: ludeeus/action-shellcheck@master
with:
check_together: 'yes'
# env:
# SHELLCHECK_OPTS: --shell sh -e SC1091 -e SC2153 -e SC2154
env:
SHELLCHECK_OPTS: --shell sh

View File

@@ -1,21 +1,14 @@
name: spellcheck
on:
push:
branches:
- develop
pull_request:
workflow_dispatch:
name: reviewdog
on: [pull_request]
jobs:
spellcheck:
name: spellcheck
misspell:
name: runner / misspell
runs-on: ubuntu-latest
steps:
- name: Check out code.
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
- name: Check spelling
uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2
uses: actions/checkout@v3
- name: misspell
uses: reviewdog/action-misspell@v1
with:
check_filenames: true
check_hidden: true
skip: pnpm-lock.yaml,./frontend/src/locale/src
ignore_words_list: afterAll,alog
github_token: ${{ secrets.github_token }}
locale: "US"

20
.github/workflows/yq.yml vendored Normal file
View File

@@ -0,0 +1,20 @@
name: yq
on:
workflow_dispatch:
jobs:
yq:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
token: ${{ secrets.YQ }}
- name: update workflows
run: for workflow in .github/workflows/*.yml; do yq "$workflow" | tee "$workflow".tmp && mv "$workflow".tmp "$workflow"; done
- name: push changes
run: |
git config user.name "GitHub"
git config user.email "noreply@github.com"
git add -A
git diff-index --quiet HEAD || git commit -sm "yq"
git push

789
.gitignore vendored
View File

@@ -1,7 +1,786 @@
.DS_Store
backend/certbot-dns-plugins.js
frontend/certbot-dns-plugins.js
# User-specific stuff
.idea
.qodo
desktop.files.json
package-lock.json
yarn.lock
desktop.ini
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
composer.phar
/vendor/
!/jobs
!/.gitignore
!/*.xml
#ignore all files in jobs subdirectories except for folders
#note: git doesn't track folders, only file content
jobs/**
!jobs/**/
#uncomment the following line to save next build numbers with config
#!jobs/**/nextBuildNumber
#exclude only config.xml files in repository subdirectories
!config.xml
#don't track workspaces (when users build on the master)
jobs/**/*workspace
*.iml
*.ipr
*.iws
# IntelliJ
out
# Compiled class file
*.class
# Log file
*.log
# BlueJ files
*.ctxt
# Package Files #
*.jar
*.war
*.nar
*.ear
*.zip
*.tar.gz
*.rar
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
# Icon must end with two \r
Icon
# Thumbnails
._*
.vscode
certbot-help.txt
*/node_modules
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
.gradle
/build/
# Ignore Gradle GUI config
gradle-app.setting
# Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored)
!gradle-wrapper.jar
# Cache of project
.gradletasknamecache
target/
pom.xml.tag
pom.xml.releaseBackup
pom.xml.versionsBackup
pom.xml.next
release.properties
dependency-reduced-pom.xml
buildNumber.properties
.mvn/timing.properties
# https://github.com/takari/maven-wrapper#usage-without-binary-jar
.mvn/wrapper/maven-wrapper.jar
.flattened-pom.xml
# Common working directory
run
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# TypeScript v1 declaration files
typings/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
# Next.js build output
.next
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and *not* Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# TernJS port file
.tern-port
/vendor/
node_modules/
npm-debug.log
yarn-error.log
# Laravel 4 specific
bootstrap/compiled.php
app/storage/
# Laravel 5 & Lumen specific
public/storage
public/hot
# Laravel 5 & Lumen specific with changed public path
public_html/storage
public_html/hot
storage/*.key
.env
Homestead.yaml
Homestead.json
/.vagrant
.phpunit.result.cache
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Oo]bj/
[Ll]og/
[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# ignore everything in the root except the "wp-content" directory.
!wp-content/
# ignore everything in the "wp-content" directory, except:
# "mu-plugins", "plugins", "themes" directory
wp-content/*
!wp-content/mu-plugins/
!wp-content/plugins/
!wp-content/themes/
# ignore these plugins
wp-content/plugins/hello.php
# ignore specific themes
wp-content/themes/twenty*/
# ignore node dependency directories
node_modules/
# ignore log files and databases
*.log
*.sql
*.sqlite

6
.imgbotconfig Normal file
View File

@@ -0,0 +1,6 @@
{
"schedule": "daily",
"aggressiveCompression": "true",
"compressWiki": "true",
"minKBReduced": 0
}

View File

@@ -1 +1 @@
2.14.0
2.10.3

13
.whitesource Normal file
View File

@@ -0,0 +1,13 @@
{
"scanSettings": {
"baseBranches": []
},
"checkRunSettings": {
"vulnerableCheckRunConclusionLevel": "failure",
"displayMode": "diff"
},
"issueSettings": {
"minSeverityLevel": "LOW",
"issueType": "DEPENDENCY"
}
}

661
COPYING
View File

@@ -1,661 +0,0 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

3
Caddy.Dockerfile Normal file
View File

@@ -0,0 +1,3 @@
FROM caddy:2.6.4
RUN apk add --no-cache ca-certificates tzdata
COPY Caddyfile /etc/caddy/Caddyfile

29
Caddyfile Normal file
View File

@@ -0,0 +1,29 @@
{
servers :80 {
protocols h1 h2c
}
}
http://captive.apple.com:80 {
respond "Success"
}
http://clients3.google.com:80 {
respond 204
}
http://connectivitycheck.gstatic.com:80 {
respond 204
}
http://www.msftncsi.com:80 {
respond "Microsoft NCSI"
}
http://www.msftconnecttest.com:80 {
respond "Microsoft Connect Test"
}
http://ipv6.msftconnecttest.com:80 {
respond "Microsoft Connect Test"
}
http://detectportal.firefox.com:80 {
respond "<meta http-equiv=\"refresh\" content=\"0;url=https://support.mozilla.org/kb/captive-portal\"/>"
}
http://:80 {
redir https://{host}{uri} permanent
}

View File

@@ -1,246 +1,127 @@
# syntax=docker/dockerfile:1.22.0@sha256:4a43a54dd1fedceb30ba47e76cfcf2b47304f4161c0caeac2db1c61804ea3c91
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS nginx
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
ARG LUAJIT_INC=/usr/include/luajit-2.1
ARG LUAJIT_LIB=/usr/lib
ARG NGINX_VER=release-1.29.5
ARG DTR_VER=1.29.2
ARG RCP_VER=1.29.5
ARG ZNP_VER=1.26.3
ARG NB_VER=master
ARG NUB_VER=main
ARG ZNM_VER=master
ARG NHUZFM_VER=main
ARG NF_VER=v0.6.0
ARG HMNM_VER=v0.39
ARG NDK_VER=v0.3.4
ARG LNM_VER=v0.10.29R2
ARG NJS_VER=0.9.6
ARG NAL_VER=master
ARG VTS_VER=v0.2.5
ARG NNTLM_VER=master
ARG NHG2M_VER=3.4
ARG FLAGS
ARG CC=clang
ARG CFLAGS="$FLAGS -m64 -O3 -pipe -flto=full -fstack-clash-protection -fstack-protector-strong -ftrivial-auto-var-init=zero -fno-delete-null-pointer-checks -fno-strict-overflow -fno-strict-aliasing -fno-plt -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3 -Wformat=2 -Werror=format-security -Wno-sign-compare"
ARG CXX=clang++
ARG CXXFLAGS="$FLAGS -m64 -O3 -pipe -flto=full -fstack-clash-protection -fstack-protector-strong -ftrivial-auto-var-init=zero -fno-delete-null-pointer-checks -fno-strict-overflow -fno-strict-aliasing -fno-plt -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=3 -D_GLIBCXX_ASSERTIONS -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS=1 -D_LIBCPP_HARDENING_MODE=_LIBCPP_HARDENING_MODE_FAST -Wformat=2 -Werror=format-security -Wno-sign-compare"
ARG LDFLAGS="-fuse-ld=lld -m64 -Wl,-s -Wl,-O2 -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--sort-common -Wl,--as-needed -Wl,-z,pack-relative-relocs"
WORKDIR /src
COPY patches/*.patch /src
RUN apk upgrade --no-cache -a && \
apk add --no-cache git make clang lld cmake ninja file \
linux-headers libatomic_ops-dev aws-lc aws-lc-dev pcre2-dev luajit-dev zlib-ng-dev brotli-dev zstd-dev libxslt-dev openldap-dev quickjs-ng-dev libmaxminddb-dev clang-dev
RUN git clone --depth 1 https://github.com/nginx/nginx --branch "$NGINX_VER" /src/nginx && \
cd /src/nginx && \
wget -q https://raw.githubusercontent.com/nginx-modules/ngx_http_tls_dyn_size/refs/heads/master/nginx__dynamic_tls_records_"$DTR_VER"%2B.patch -O /src/nginx/1.patch && \
git apply /src/nginx/1.patch && \
wget -q https://raw.githubusercontent.com/openresty/openresty/refs/heads/master/patches/nginx/"$RCP_VER"/nginx-"$RCP_VER"-resolver_conf_parsing.patch -O /src/nginx/2.patch && \
git apply /src/nginx/2.patch && \
wget -q https://patch-diff.githubusercontent.com/raw/nginx/nginx/pull/689.patch -O /src/nginx/3.patch && \
git apply /src/nginx/3.patch && \
wget -q https://raw.githubusercontent.com/zlib-ng/patches/refs/heads/master/nginx/"$ZNP_VER"-zlib-ng.patch -O /src/nginx/4.patch && \
git apply /src/nginx/4.patch && \
git apply /src/nginx.patch && \
\
git clone --depth 1 https://github.com/google/ngx_brotli --branch "$NB_VER" /src/ngx_brotli && \
cd /src/ngx_brotli && \
git apply /src/ngx_brotli.patch && \
git clone --depth 1 https://github.com/clyfish/ngx_unbrotli --branch "$NUB_VER" /src/ngx_unbrotli && \
cd /src/ngx_unbrotli && \
git apply /src/ngx_unbrotli.patch && \
git clone --depth 1 https://github.com/tokers/zstd-nginx-module --branch "$ZNM_VER" /src/zstd-nginx-module && \
cd /src/zstd-nginx-module && \
wget -q https://patch-diff.githubusercontent.com/raw/tokers/zstd-nginx-module/pull/44.patch -O /src/zstd-nginx-module/1.patch && \
wget -q https://patch-diff.githubusercontent.com/raw/tokers/zstd-nginx-module/pull/23.patch -O /src/zstd-nginx-module/2.patch && \
git apply /src/zstd-nginx-module.patch && \
git apply /src/zstd-nginx-module/1.patch && \
git apply /src/zstd-nginx-module/2.patch && \
git clone --depth 1 https://github.com/HanadaLee/ngx_http_unzstd_filter_module --branch "$NHUZFM_VER" /src/ngx_http_unzstd_filter_module && \
git clone --depth 1 https://github.com/aperezdc/ngx-fancyindex --branch "$NF_VER" /src/ngx-fancyindex && \
git clone --depth 1 https://github.com/openresty/headers-more-nginx-module --branch "$HMNM_VER" /src/headers-more-nginx-module && \
git clone --depth 1 https://github.com/vision5/ngx_devel_kit --branch "$NDK_VER" /src/ngx_devel_kit && \
git clone --depth 1 https://github.com/openresty/lua-nginx-module --branch "$LNM_VER" /src/lua-nginx-module && \
cd /src/lua-nginx-module && \
git apply /src/lua-nginx-module.patch && \
\
git clone --depth 1 https://github.com/nginx/njs --branch "$NJS_VER" /src/njs && \
git clone --depth 1 https://github.com/kvspb/nginx-auth-ldap --branch "$NAL_VER" /src/nginx-auth-ldap && \
git clone --depth 1 https://github.com/vozlt/nginx-module-vts --branch "$VTS_VER" /src/nginx-module-vts && \
git clone --depth 1 https://github.com/gabihodoroaga/nginx-ntlm-module --branch "$NNTLM_VER" /src/nginx-ntlm-module && \
git clone --depth 1 https://github.com/leev/ngx_http_geoip2_module --branch "$NHG2M_VER" /src/ngx_http_geoip2_module
RUN cd /src/nginx && \
/src/nginx/auto/configure \
--build=NPMplus \
--with-debug \
--with-compat \
--with-threads \
--with-file-aio \
--with-libatomic \
--with-pcre \
--with-pcre-jit \
--without-select_module \
--without-poll_module \
--with-stream \
--with-stream_ssl_module \
--with-stream_ssl_preread_module \
--with-stream_realip_module \
--with-http_v2_module \
--with-http_v3_module \
--with-http_ssl_module \
--with-http_realip_module \
--with-http_gunzip_module \
--with-http_gzip_static_module \
--with-http_sub_module \
--with-http_addition_module \
--with-http_stub_status_module \
--with-http_auth_request_module \
--add-module=/src/ngx_brotli \
--add-module=/src/ngx_unbrotli \
--add-module=/src/zstd-nginx-module \
--add-module=/src/ngx_http_unzstd_filter_module \
--add-module=/src/ngx-fancyindex \
--add-module=/src/headers-more-nginx-module \
--add-module=/src/ngx_devel_kit \
--add-module=/src/lua-nginx-module \
--add-dynamic-module=/src/njs/nginx \
--add-dynamic-module=/src/nginx-auth-ldap \
--add-dynamic-module=/src/nginx-module-vts \
--add-dynamic-module=/src/nginx-ntlm-module \
--add-dynamic-module=/src/ngx_http_geoip2_module \
--with-ld-opt="$LDFLAGS" && \
\
make -j "$(nproc)" install
RUN git clone --depth 1 https://github.com/openappsec/attachment /src/attachment && \
cd /src/attachment && \
git apply /src/attachment.patch && \
cmake /src/attachment -G Ninja && \
ninja && \
mv -v /src/attachment/attachments/nginx/ngx_module/libngx_module.so /usr/local/nginx/modules/libngx_module.so
RUN find /usr/local/nginx/modules -name "*.so" -exec strip -s {} \; && \
strip -s /usr/local/nginx/sbin/nginx && \
strip -s /src/attachment/core/shmem_ipc/libosrc_shmem_ipc.so && \
strip -s /src/attachment/core/compression/libosrc_compression_utils.so && \
strip -s /src/attachment/attachments/nginx/nginx_attachment_util/libosrc_nginx_attachment_util.so && \
\
find /usr/local/nginx/modules -name "*.so" -exec file {} \; && \
file /usr/local/nginx/sbin/nginx && \
file /src/attachment/core/shmem_ipc/libosrc_shmem_ipc.so && \
file /src/attachment/core/compression/libosrc_compression_utils.so && \
file /src/attachment/attachments/nginx/nginx_attachment_util/libosrc_nginx_attachment_util.so && \
/usr/local/nginx/sbin/nginx -V
FROM --platform="$BUILDPLATFORM" alpine:3.18.2 as frontend
COPY frontend /build/frontend
COPY global/certbot-dns-plugins.js /build/frontend/certbot-dns-plugins.js
ARG NODE_ENV=production \
NODE_OPTIONS=--openssl-legacy-provider
RUN apk add --no-cache ca-certificates nodejs yarn git python3 build-base && \
cd /build/frontend && \
yarn --no-lockfile install && \
yarn --no-lockfile build && \
yarn cache clean --all
COPY darkmode.css /build/frontend/dist/css/darkmode.css
COPY security.txt /build/frontend/dist/.well-known/security.txt
FROM --platform="$BUILDPLATFORM" alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS frontend
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
ARG NODE_ENV=production
COPY frontend /app
WORKDIR /app/frontend
RUN apk upgrade --no-cache -a && \
apk add --no-cache nodejs pnpm && \
pnpm install --frozen-lockfile && \
pnpm formatjs compile-folder src/locale/src src/locale/lang && \
pnpm tsc && \
pnpm vite build
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659 AS backend
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
ARG NODE_ENV=production
COPY backend /app
WORKDIR /app
RUN apk upgrade --no-cache -a && \
apk add --no-cache nodejs pnpm binutils file && \
pnpm install --frozen-lockfile --prod && \
pnpm cache delete && \
find node_modules -name "*.map" -delete && \
rm -r node_modules/better-sqlite3/deps/sqlite3 && \
find /app/node_modules -name "*.node" -type f -exec strip -s {} \; && \
find /app/node_modules -name "*.node" -type f -exec file {} \;
FROM --platform="$BUILDPLATFORM" alpine:3.18.2 as backend
COPY backend /build/backend
COPY global/certbot-dns-plugins.js /build/backend/certbot-dns-plugins.js
ARG NODE_ENV=production \
TARGETARCH
RUN apk add --no-cache ca-certificates nodejs-current yarn && \
wget https://gobinaries.com/tj/node-prune -O - | sh && \
cd /build/backend && \
if [ "$TARGETARCH" = "amd64" ]; then \
npm_config_target_platform=linux npm_config_target_arch=x64 yarn install --no-lockfile; \
elif [ "$TARGETARCH" = "arm64" ]; then \
npm_config_target_platform=linux npm_config_target_arch=arm64 yarn install --no-lockfile; \
fi && \
node-prune && \
yarn cache clean --all
FROM alpine:3.23.3@sha256:25109184c71bdad752c8312a8623239686a9a2071e8825f20acb8f2198c3f659
SHELL ["/bin/ash", "-eo", "pipefail", "-c"]
ENV NODE_ENV=production
ARG LRC_VER=v0.1.32R1
ARG LRL_VER=v0.15
ARG LCSB_VER=v1.0.13
FROM python:3.11.4-alpine3.18 as certbot
RUN apk add --no-cache ca-certificates build-base libffi-dev && \
python3 -m venv /usr/local/certbot && \
. /usr/local/certbot/bin/activate && \
pip install --no-cache-dir certbot
COPY --from=nginx /usr/local/nginx /usr/local/nginx
COPY --from=nginx /src/attachment/core/shmem_ipc/libosrc_shmem_ipc.so /usr/local/lib/libosrc_shmem_ipc.so
COPY --from=nginx /src/attachment/core/compression/libosrc_compression_utils.so /usr/local/lib/libosrc_compression_utils.so
COPY --from=nginx /src/attachment/attachments/nginx/nginx_attachment_util/libosrc_nginx_attachment_util.so /usr/local/lib/libosrc_nginx_attachment_util.so
COPY --from=backend /app /app
FROM --platform="$BUILDPLATFORM" alpine:3.18.2 as crowdsec
RUN apk add --no-cache ca-certificates git build-base && \
git clone --recursive https://github.com/crowdsecurity/cs-nginx-bouncer /src && \
cd /src && \
make && \
tar xzf crowdsec-nginx-bouncer.tgz && \
mv crowdsec-nginx-bouncer-* crowdsec-nginx-bouncer && \
cd /src/crowdsec-nginx-bouncer && \
sed -i "/lua_package_path/d" nginx/crowdsec_nginx.conf && \
sed -i "s|/etc/crowdsec/bouncers/crowdsec-nginx-bouncer.conf|/data/etc/crowdsec/crowdsec.conf|g" nginx/crowdsec_nginx.conf && \
sed -i "s|API_KEY=.*|API_KEY=|g" lua-mod/config_example.conf && \
sed -i "s|ENABLED=.*|ENABLED=false|g" lua-mod/config_example.conf && \
sed -i "s|API_URL=.*|API_URL=http://127.0.0.1:8080|g" lua-mod/config_example.conf && \
sed -i "s|BAN_TEMPLATE_PATH=.*|BAN_TEMPLATE_PATH=/data/etc/crowdsec/ban.html|g" lua-mod/config_example.conf && \
sed -i "s|CAPTCHA_TEMPLATE_PATH=.*|CAPTCHA_TEMPLATE_PATH=/data/etc/crowdsec/crowdsec.conf|g" lua-mod/config_example.conf
COPY rootfs /
COPY LICENSE /LICENSE
COPY COPYING /COPYING
WORKDIR /app
RUN apk upgrade --no-cache -a && \
apk add --no-cache tzdata tini \
aws-lc pcre2 luajit zlib-ng brotli zstd lua5.1-cjson libxml2 libldap quickjs-ng-libs libmaxminddb-libs \
curl coreutils findutils grep jq openssl shadow su-exec util-linux-misc \
bash bash-completion nano \
logrotate goaccess fcgi \
luarocks5.1 git make \
nodejs python3 && \
\
FROM zoeyvid/nginx-quic:176
COPY rootfs /
RUN apk add --no-cache ca-certificates tzdata \
lua5.1-lzlib \
nodejs-current \
openssl apache2-utils \
coreutils grep jq curl shadow sudo \
luarocks5.1 wget lua5.1-dev build-base git yarn && \
wget https://raw.githubusercontent.com/SpiderLabs/ModSecurity/v3/master/modsecurity.conf-recommended -O /usr/local/nginx/conf/conf.d/include/modsecurity.conf && \
wget https://raw.githubusercontent.com/SpiderLabs/ModSecurity/v3/master/unicode.mapping -O /usr/local/nginx/conf/conf.d/include/unicode.mapping && \
sed -i "s|SecRuleEngine .*|SecRuleEngine On|g" /usr/local/nginx/conf/conf.d/include/modsecurity.conf && \
echo "Include /data/etc/modsecurity/modsecurity.conf" | tee -a /usr/local/nginx/conf/conf.d/include/modsecurity.conf && \
cp /usr/local/nginx/conf/conf.d/include/modsecurity.conf /usr/local/nginx/conf/conf.d/include/modsecurity-crs.conf && \
echo "Include /data/etc/modsecurity/crs-setup.conf" | tee -a /usr/local/nginx/conf/conf.d/include/modsecurity-crs.conf && \
echo "Include /usr/local/nginx/conf/conf.d/include/coreruleset/crs-setup.conf" | tee -a /usr/local/nginx/conf/conf.d/include/modsecurity-crs.conf && \
echo "#Include /usr/local/nginx/conf/conf.d/include/coreruleset/plugins/*-config.conf" | tee -a /usr/local/nginx/conf/conf.d/include/modsecurity-crs.conf && \
echo "#Include /usr/local/nginx/conf/conf.d/include/coreruleset/plugins/*-before.conf" | tee -a /usr/local/nginx/conf/conf.d/include/modsecurity-crs.conf && \
echo "Include /usr/local/nginx/conf/conf.d/include/coreruleset/rules/*.conf" | tee -a /usr/local/nginx/conf/conf.d/include/modsecurity-crs.conf && \
echo "#Include /usr/local/nginx/conf/conf.d/include/coreruleset/plugins/*-after.conf" | tee -a /usr/local/nginx/conf/conf.d/include/modsecurity-crs.conf && \
git clone https://github.com/coreruleset/coreruleset /tmp/coreruleset && \
wget https://patch-diff.githubusercontent.com/raw/coreruleset/coreruleset/pull/3218.patch -O /tmp/coreruleset/http3.patch && \
cd /tmp/coreruleset && \
git apply /tmp/coreruleset/http3.patch && \
cd / && \
mkdir /usr/local/nginx/conf/conf.d/include/coreruleset && \
cp /tmp/coreruleset/crs-setup.conf.example /usr/local/nginx/conf/conf.d/include/coreruleset/crs-setup.conf.example && \
sed -i '/#/!d' /usr/local/nginx/conf/conf.d/include/coreruleset/crs-setup.conf.example && \
mv /tmp/coreruleset/crs-setup.conf.example /usr/local/nginx/conf/conf.d/include/coreruleset/crs-setup.conf && \
mv /tmp/coreruleset/rules /usr/local/nginx/conf/conf.d/include/coreruleset/rules && \
#git clone --recursive https://github.com/coreruleset/phpmyadmin-rule-exclusions-plugin /tmp/phpmyadmin-rule-exclusions-plugin && \
#git clone --recursive https://github.com/coreruleset/nextcloud-rule-exclusions-plugin /tmp/nextcloud-rule-exclusions-plugin && \
#git clone --recursive https://github.com/coreruleset/wordpress-rule-exclusions-plugin /tmp/wordpress-rule-exclusions-plugin && \
#git clone --recursive https://github.com/coreruleset/cpanel-rule-exclusions-plugin /tmp/cpanel-rule-exclusions-plugin && \
#git clone --recursive https://github.com/coreruleset/body-decompress-plugin /tmp/body-decompress-plugin && \
#git clone --recursive https://github.com/coreruleset/auto-decoding-plugin /tmp/auto-decoding-plugin && \
#git clone --recursive https://github.com/coreruleset/google-oauth2-plugin /tmp/google-oauth2-plugin && \
mv /tmp/coreruleset/plugins /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
#mv /tmp/phpmyadmin-rule-exclusions-plugin/plugins/* /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
#mv /tmp/nextcloud-rule-exclusions-plugin/plugins/* /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
#mv /tmp/wordpress-rule-exclusions-plugin/plugins/* /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
#mv /tmp/cpanel-rule-exclusions-plugin/plugins/* /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
#mv /tmp/body-decompress-plugin/plugins/* /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
#mv /tmp/auto-decoding-plugin/plugins/* /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
#mv /tmp/google-oauth2-plugin/plugins/* /usr/local/nginx/conf/conf.d/include/coreruleset/plugins && \
rm -r /tmp/* && \
luarocks-5.1 install lua-resty-http && \
luarocks-5.1 install lua-resty-string && \
luarocks-5.1 install lua-resty-openssl && \
luarocks-5.1 install lua-resty-openidc && \
luarocks-5.1 install lua-resty-session && \
\
git clone --depth 1 https://github.com/openresty/lua-resty-core --branch "$LRC_VER" /src/lua-resty-core && \
cd /src/lua-resty-core && \
make -j "$(nproc)" install LUA_LIB_DIR=/usr/local/share/lua/5.1 && \
\
git clone --depth 1 https://github.com/openresty/lua-resty-lrucache --branch "$LRL_VER" /src/lua-resty-lrucache && \
cd /src/lua-resty-lrucache && \
make -j "$(nproc)" install LUA_LIB_DIR=/usr/local/share/lua/5.1 && \
\
git clone --depth 1 https://github.com/crowdsecurity/lua-cs-bouncer --branch "$LCSB_VER" /src/lua-cs-bouncer && \
mv /src/lua-cs-bouncer/lib/* /usr/local/share/lua/5.1 && \
mv /src/lua-cs-bouncer/templates/captcha.html /etc/captcha.html.original && \
mv /src/lua-cs-bouncer/templates/ban.html /etc/ban.html.original && \
\
cd && \
rm -r /src /tmp/luarocks_local_cache-* && \
apk del --no-cache luarocks5.1 git make && \
\
sed -i "s|placeholder|$(cat /app/package.json | jq -r .version)|g" /usr/local/nginx/conf/conf.d/crowdsec.conf.disabled && \
\
python3 -m venv /usr/local && \
pip install --no-cache-dir --upgrade pip certbot && \
\
wget -q https://raw.githubusercontent.com/tomwassenberg/certbot-ocsp-fetcher/refs/heads/main/certbot-ocsp-fetcher -O - | sed "s|/live||g" > /usr/local/bin/certbot-ocsp-fetcher.sh && \
wget -q https://raw.githubusercontent.com/vasilevich/nginxbeautifier/5cee8db2a505f2a253e24691399c828c043071fc/index.js -O /usr/local/bin/nginxbeautifier && \
wget -q https://raw.githubusercontent.com/vasilevich/nginxbeautifier/5cee8db2a505f2a253e24691399c828c043071fc/nginxbeautifier.js -O /usr/local/bin/nginxbeautifier.js && \
\
ln -s /usr/local/nginx/sbin/nginx /usr/local/bin/nginx && \
ln -s /app/password-reset.js /usr/local/bin/password-reset.js && \
luarocks-5.1 install lua-cjson && \
yarn global add nginxbeautifier && \
apk del --no-cache luarocks5.1 wget lua5.1-dev build-base git yarn
COPY --from=backend /build/backend /app
COPY --from=frontend /build/frontend/dist /app/frontend
COPY --from=certbot /usr/local/certbot /usr/local/certbot
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/lib/plugins /usr/local/nginx/lib/lua/plugins
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/lib/crowdsec.lua /usr/local/nginx/lib/lua/crowdsec.lua
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/templates/ban.html /usr/local/nginx/conf/conf.d/include/ban.html
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/templates/captcha.html /usr/local/nginx/conf/conf.d/include/captcha.html
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/lua-mod/config_example.conf /usr/local/nginx/conf/conf.d/include/crowdsec.conf
COPY --from=crowdsec /src/crowdsec-nginx-bouncer/nginx/crowdsec_nginx.conf /usr/local/nginx/conf/conf.d/include/crowdsec_nginx.conf
RUN ln -s /app/password-reset.js /usr/local/bin/password-reset.js && \
ln -s /app/sqlite-vaccum.js /usr/local/bin/sqlite-vaccum.js && \
ln -s /app/index.js /usr/local/bin/index.js && \
\
chmod +x /usr/local/bin/*
ln -s /app/index.js /usr/local/bin/index.js
COPY --from=frontend /app/dist /app/frontend
ENV NODE_ENV=production \
NODE_CONFIG_DIR=/data/etc/npm \
PATH="/usr/local/certbot/bin:$PATH" \
DB_SQLITE_FILE=/data/etc/npm/database.sqlite
ENTRYPOINT ["tini", "--", "entrypoint.sh"]
WORKDIR /app
ENTRYPOINT ["start.sh"]
HEALTHCHECK CMD healthcheck.sh
LABEL com.centurylinklabs.watchtower.monitor-only="true"
LABEL wud.watch="false"
LABEL wud.watch.digest="false"

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2017
Copyright (c) 2017
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

546
README.md
View File

@@ -1,350 +1,234 @@
# NPMplus
<p align="center" class="items-center">
<img src="https://nginxproxymanager.com/github.png">
<!---
<br><br>
<img src="https://img.shields.io/badge/version-2.10.3-green.svg?style=for-the-badge">
<a href="https://hub.docker.com/r/zoeyvid/nginx-proxy-manager">
<img src="https://img.shields.io/docker/stars/zoeyvid/nginx-proxy-manager.svg?style=for-the-badge">
</a>
<a href="https://hub.docker.com/r/zoeyvid/nginx-proxy-manager">
<img src="https://img.shields.io/docker/pulls/zoeyvid/nginx-proxy-manager.svg?style=for-the-badge">
</a>
--->
</p>
If you don't need the web GUI of NPMplus, you may also have a look at caddy: https://caddyserver.com
- [Compatibility (to Upstream)](#compatibility-to-upstream)
This project comes as a pre-built docker image that enables you to easily forward to your websites
running at home or otherwise, including free TLS, without having to know too much about Nginx or Letsencrypt.
- [Quick Setup](#quick-setup)
- [Migration from upstream/vanilla nginx-proxy-manager](#migration-from-upstreamvanilla-nginx-proxy-manager)
- [Screenshots](https://nginxproxymanager.com/screenshots)
**Note: this fork is distributed under the GNU Affero General Public License version 3. It is based on the MIT licensed [nginx-proxy-manager](https://github.com/NginxProxyManager/nginx-proxy-manager).** <br>
**Note: by running NPMplus you agree to the TOS of Let's Encrypt/your custom CA.** <br>
**Note: remember to expose udp/quic for the https port (443/upd).** <br>
**Note: remember to add your domain to the [hsts preload list](https://hstspreload.org) if you enabled hsts for your domain.** <br>
**Note: please report issues first to this fork before reporting them to the upstream repository.** <br>
## List of some changes
# Note: To fix [this issue](https://github.com/SpiderLabs/ModSecurity/issues/2848), instead of running `nginx -s reload`, this fork kills nginx and starts it again. This will result in a 502 error when you update your hosts. See https://github.com/ZoeyVid/nginx-proxy-manager/issues/296 and https://github.com/ZoeyVid/nginx-proxy-manager/issues/283.
- Supports HTTP/3 (QUIC), requires you to expose https with udp
- Support for crowdsec and openappsec
- Support for acme profiles (letsencrypt shortlived is used by default)
- Improved support for different acme servers (like ocsp/must-staple)
- OIDC support
- smaller image based on alpine
- ML-KEM support (also hardened TLS settings enforced)
- https for the NPMplus interface
- Goaccess included
- punycode domain support
- zstd and brotli
- basic security headers always send
- allow empty ports to support loadbalancing
- proxy protocol support
- improved nginx build and nginx templates
- file and php server support (and fancyindex)
- option to edit custom certs
- gravatars are cached locally and fetched by the backend (better privacy by not exposing you directly to gravatar)
- qrcodes for totp are generated locally in your browser instead of using a third-party api (better privacy/security by not exposing you and the secret to the third-party api)
- re-added some things that where removed with upstreams new frontend
- use secure cookied instead of local storage to save the token
- Password reset (only sqlite) using `docker exec -it npmplus password-reset.js USER_EMAIL PASSWORD`
- many other things, see this README.md and the compose.yaml
## Compatibility (to Upstream)
- Supported architectures: x86_64-v2/amd64v2 (check with `/lib/ld-linux-x86-64.so.2 --help`, plain x86-64 is not supported only v2 and up) and aarch64/arm64 (other archs (including 64-bit ones) and any 32-bit arch (like armhf/armv7 (dropped), armel/armv6) are not supported, because of the duration to compile).
- I test NPMplus with docker, but podman should also work (I disrecommend you to run the NPMplus container inside an LXC container, it will work, but please don't do it, it will work better without, install docker/podman on the host or in a KVM and run NPMplus with this)
- MariaDB(/MySQL)/PostgreSQL may work as Databases for NPMplus (configuration like in upstream), but are unsupported, have no advantage over SQLite (at least with NPMplus) and are not recommended. Please note that you can't migrate from any of these to SQLite without making a fresh install and/or copying everything yourself.
- NPMplus uses https instead of http for the admin interface
- NPMplus won't trust cloudflare until you set the env TRUST_CLOUDFLARE to true, but please read [this](#notes-on-cloudflare) first before setting the env to true.
- route53 is not supported as dns-challenge provider and Amazon CloudFront IPs can't be automatically trusted in NPMplus, even if you set TRUST_CLOUDFLARE env to true.
- The following certbot dns plugins have been replaced, which means that certs using one of these proivder will not renew and need to be recreated (not renewed): `certbot-dns-he`, `certbot-dns-dnspod`, `certbot-dns-online`, `certbot-dns-powerdns` and `certbot-dns-do` (`certbot-dns-do` was replaced in upstream with v2.12.4 and then merged into NPMplus)
- There are many changed and improvements to the nginx config, so please don't follow guides in the internet about custom/advanced config, they are either redundant or should not be used at all with NPMplus
- Many forms have changed behavior, see [Comments on some buttons](#comments-on-some-buttons)
## Project Goal
## Quick Setup
1. Install Docker and Docker Compose (podman or docker rootless may also work)
- [Docker Install documentation](https://docs.docker.com/engine/install)
- [Docker Compose Install documentation](https://docs.docker.com/compose/install/linux)
2. Download this [compose.yaml](https://raw.githubusercontent.com/ZoeyVid/NPMplus/refs/heads/develop/compose.yaml) (or use its content as a portainer stack)
3. Adjust TZ and ACME_EMAIL to your values and maybe adjust other env options to your needs
4. Start NPMplus by running (or deploy your portainer stack)
```bash
docker compose up -d
```
5. Log in to the Admin UI: When your docker container is running, connect to the admin interface using `https://` on port `81`.
I created this project to fill a personal need to provide users with a easy way to accomplish reverse
proxying hosts with TLS termination and it had to be so easy that a monkey could do it. This goal hasn't changed.
While there might be advanced options they are optional and the project should be as simple as possible
so that the barrier for entry here is low.
## Migration from upstream/vanilla nginx-proxy-manager
- **NOTE: Migrating back to the original version is not possible.** Please make a **backup** before migrating, so you have the option to revert if needed
1. Please read [this](#compatibility-to-upstream) first
2. make a backup of your data and letsencrypt folders (creating a copy using `cp -a` should be enough)
3. download the latest compose.yaml of NPMplus
4. adjust your paths (of /etc/letsencrypt and /data) to the ones you used with nginx-proxy-manager
5. adjust TZ and ACME_EMAIL to your values and maybe adjust other env options to your needs
6. stop nginx-proxy-manager
7. deploy the NPMplus compose.yaml
8. You should now remove the `/etc/letsencrypt` mount, since it was moved to `/data` while migration, then redeploy the compose file
9. Since many forms have changed, please check if they are still correct for every host you have.
10. If you proxy NPM(plus) through NPM(plus) make sure to change the scheme from http to https
11. Because of a added CSP-rules gravatar images will not load, to fix this you need to open the form to edit a users name and save it without changes
12. Maybe setup crowdsec (see below)
13. Please report all (migration) issues you may have
<!---
### Sponsor the original creator (not us):
<a href="https://www.buymeacoffee.com/jc21" target="_blank"><img src="http://public.jc21.com/github/by-me-a-coffee.png" alt="Buy Me A Coffee" style="height: 51px !important;width: 217px !important;" ></a>
--->
# Crowdsec
<!--Note: Using Immich behind NPMplus with enabled appsec causes issues, see here: [#1241](https://github.com/ZoeyVid/NPMplus/discussions/1241) <br>-->
Note: If you don't [disable sharing in crowdsec](https://docs.crowdsec.net/docs/next/configuration/crowdsec_configuration/#sharing), you may need to mention that [this](https://docs.crowdsec.net/docs/central_api/intro/#signal-meta-data) is sent to crowdsec in your privacy policy.
1. Install crowdsec and the ZoeyVid/npmplus collection for example by using crowdsec container at the end of the compose.yaml, you may also want to install [this](https://app.crowdsec.net/hub/author/crowdsecurity/collections/http-dos), but be warned of false positives
2. Set LOGROTATE to `true` in your `compose.yaml` and redeploy
3. Open `/opt/crowdsec/conf/acquis.d/npmplus.yaml` (path may be different depending how you installed crowdsec) and fill it with:
```yaml
filenames:
- /opt/npmplus/nginx/logs/*.log
labels:
type: npmplus
---
listen_addr: 0.0.0.0:7422
appsec_config: crowdsecurity/appsec-default
name: appsec
source: appsec
labels:
type: appsec
#---
# If you use open-appsec, uncomment the section below.
# If connecting to open-appsec cloud, you must edit the default 'log trigger'
# in the cloud dashboard: check "Log to > gateway / agent" and click 'enforce'.
# Otherwise, no intrusion events will be logged to the local agent
# for CrowdSec to process.
#source: file
#filenames:
# - /opt/openappsec/logs/cp-nano-http-transaction-handler.log*
#labels:
# type: openappsec
```
4. Make sure to use `network_mode: host` in your compose file for the NPMplus container
5. Run `docker exec crowdsec cscli bouncers add npmplus` and save the api key of the output
6. Open `/opt/npmplus/crowdsec/crowdsec.conf`
7. Set `ENABLED` to `true`
8. Use the output of step 5 as `API_KEY`
9. Save the file
10. Redeploy the `compose.yaml`
11. It is recommended to block at the earliest possible point, so if possible set up a firewall bouncer: https://docs.crowdsec.net/u/bouncers/firewall, make sure to also include the docker iptables in the firewall bouncer config
12. Note that when using crowdsec requests will always be buffered, so setting `proxy_(request_)buffering` to off will not work
## Use of external php-fpm (recommended)
1. Create a new Proxy Host with some dummy data in the details tab (since these get fully ignored)
2. Make other settings (like TLS)
3. Create a custom location `/` set the scheme to `path`, put in the path, the press the gear button and fill this in (edit the last line):
```
location ~* [^/]\.php(?:$|/) {
fastcgi_split_path_info ^(.*\.php)(/.*)$;
try_files $fastcgi_script_name =404;
fastcgi_pass ...; # set this to the address of your php-fpm (socket/tcp): https://nginx.org/en/docs/http/ngx_http_fastcgi_module.html#fastcgi_pass
}
```
## Features
## Use of inbuilt php-fpm (not recommended)
1. First enable php inside your compose file (you can add more php extension using envs in the compose file)
2. Set the forwarding port to the php version you want to use and is supported by NPMplus (like 83/84/85)
## Comments on some buttons
- Forward Hostname / IP / Path: if the scheme is set to path you can just put here a path in and nginx works as a file server, otherwise you need to input ip/domain, you can also append a path to the ip/domain like `127.0.0.1/path` to proxy to a subpath.
- For custom locations with a set path, dns will be only refreshed on nginx reloads and the path of the location will be stripped. So a request `GET /cdf/abc` to a custom location `/cdf` which proxies to `127.0.0.1/abc` will proxy to `127.0.0.1/abc/abc`, a custom location `/cdf/` which proxies to `127.0.0.1/` will proxy to `127.0.0.1/abc` and a custom location `/cdf` which proxies to `127.0.0.1` will proxy to `127.0.0.1/cdf/abc`
- If the scheme is set to `path`, a path ending with a `/` will be searched relative to the custom location (is uses nginx alias) and a path ending without a `/` will be searched relative to the main `/` location (it uses nginx root)
- Forward Port (optional): port of upstream or php version if scheme is `path`
- Send noindex header and block some user agents: This does what is says, it appends a header to all responses which says that the site should not be indexed while blocking requests of crawlers based on the user agent sent with the request
- Disable Crowdsec Appsec: this will disable crowdsec appsec only for one host/one location, this will only do something if appsec is configured
- Disable Response Buffering: Most time you want keep buffering enabled, you may want to disable this if you for example want to stream videos and you have a fast and stable connection to the upstream server, this effects the connection from the upstream server to NPMplus
- Disable Request Buffering: Most time you want keep buffering enabled, request buffering will always be enabled if crowdsec appsec is enabled, you may want to disable this if you for example want to upload huge files and have a fast and stable connection to the upstream server, this effects the connection from the NPMplus to the upstream server
- Enable compression by upstream: this will allow the backend to compress files, I recommend you to keep this disabled, there may be cases where this is needed since otherwise the upstream missbehaves for some reason (like collabora in nextcloud all-in-one)
- Enable fancyindex: this will enabled fancyindex, which shows a index of all files in the folder if there is no index file, only enable this if you know what you are doing and you need the index
- Websockets: this button was removed, websockets are now always enabled
- Reuse Key: this will make the new cert always keep its key unless you force renew it, I recommend you to keep this disabled (not to keep the key), a reason to keep the key would be TLSA/pubkey pinning
- TLS to upstream (for Streams): This can be used if your stream target already uses tls but you want to override it with a NPMplus cert, do not enable if you don't set a new cert, since this will downgrade the connecting to be unencrypted
- X-Frame-Options: will control the X-Frame-Options header, none will remove the header, SAMEORIGIN/DENY will set it to these values and upstream will keep what upstream sends
## Examples of implementing some services using auth_request
### Anubis
1. Deploy an anubis container (see the compose.yaml for an example and information)
2. In the mounted anubis bot policy file the "status_codes" should be set to 401 and 403, like this:
```yaml
status_codes:
CHALLENGE: 401
DENY: 403
```
3. Set the AUTH_REQUEST_ANUBIS_UPSTREAM env in the NPMplus compose.yaml and select anubis in the Auth Request selection, no custom/advanced config/locations needed
4. You can override the "allow", "checking" and "blocked" images used by default by setting the `AUTH_REQUEST_ANUBIS_USE_CUSTOM_IMAGES` env to true and putting put your custom images as happy.webp, pensive.webp and reject.webp to /opt/npmplus/anubis
### Tinyauth
1. Set the AUTH_REQUEST_TINYAUTH_UPSTREAM and AUTH_REQUEST_TINYAUTH_DOMAIM env in the NPMplus compose.yaml and select tinyauth in the Auth Request selection, no custom/advanced config/locations needed
### Authelia (modern)
1. Set the AUTH_REQUEST_AUTHELIA_UPSTREAM env in the NPMplus compose.yaml and select authelia (modern) in the Auth Request selection, no custom/advanced config/locations needed
### Authentik
1. Set the AUTH_REQUEST_AUTHENTIK_UPSTREAM env (and optional AUTH_REQUEST_AUTHENTIK_DOMAIN env if you use the "domain level" variant in authentik, do not set this env if you use the "single application" variant) in the NPMplus compose.yaml and select authentik/authentik-send-basic-auth in the Auth Request selection, no custom/advanced config/locations needed
## Load Balancing
1. Open and edit this file: `/opt/npmplus/custom_nginx/http_top.conf` (or `/opt/npmplus/custom_nginx/stream_top.conf` for streams), if you changed /opt/npmplus to a different path make sure to change the path to fit
2. Set the upstream directive(s) with your servers which should be load balanced (https://nginx.org/en/docs/http/ngx_http_upstream_module.html / https://nginx.org/en/docs/stream/ngx_stream_upstream_module.html), they need to run the same protocol (either http(s) or grpc(s) for proxy hosts or tcp/udp/proxy protocol for streams), like this for example:
```
upstream server1 {
server 127.0.0.1:44;
server 127.0.0.1:33;
server 127.0.0.1:22;
server 192.158.168.11:44 backup;
}
```
3. Configure your proxy host/stream like always in the UI, but set the hostname to service1 (or service2 or however you named it) and keep the forward port field empty (since you set the ports within the upstream directive)
## Geoblocking example (mainly community support)
1. set the `NGINX_LOAD_GEOIP2_MODULE` env to true and redeploy NPMplus
2. deploy a geoipupdate container (see the compose.yaml for an example, create credentials [here](https://www.maxmind.com/en/geolite2/signup))
3. open and edit this file: `/opt/npmplus/custom_nginx/http_top.conf`, if you changed /opt/npmplus to a different path make sure to change the path to fit
```yaml
geoip2 /data/goaccess/geoip/GeoLite2-Country.mmdb {
auto_reload 60m;
$geoip2_country_iso_code country iso_code;
}
# whitelist example, you can add as many country codes as you want, country code list: https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#XY
#map $geoip2_country_iso_code $geoip2_country_rule {
# default no;
# AA yes;
# XY yes;
# '' yes; # if you want to allow IPs with unknown country codes, if you don't do this make sure to allow private IPs
#}
# blacklist example, you can add as many country codes as you want, country code list: https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#XY
#map $geoip2_country_iso_code $geoip2_country_rule {
# default yes;
# AA no;
# XY no;
# '' no; # if you want to block IPs with unknown country codes, if you do this make sure to allow private IPs
#}
# uncomment if you block/don't allow IPs with unknown country codes
#geo $is_private_ip {
# default no;
# 127.0.0.0/8 yes;
# 10.0.0.0/8 yes;
# 172.16.0.0/12 yes;
# 192.168.0.0/16 yes;
# 169.254.0.0/16 yes;
# ::1/128 yes;
# fc00::/7 yes;
# fec0::/10 yes;
#}
```
4a. to set it per location: create a custom location / (or the location you want to use), set your proxy settings, then press the gear button and paste the following in the new text field, you may want to adjust the last lines (do not use the advanced tab with this example as it may break cert renewals):
```yaml
# uncomment if you block/don't allow IPs with unknown country codes
#if ($is_private_ip = yes) {
# set $geoip2_country_rule yes;
#}
if ($geoip2_country_rule = no) {
return 444; # this rejects the connection, but you can also return 403 to tell the client that it was denied
}
```
4b. to set it for an entire host: put this in the advanced tab:
```yaml
# uncomment if you block/don't allow IPs with unknown country codes
#if ($is_private_ip = yes) {
# set $geoip2_country_rule yes;
#}
if ($request_uri ~* "^/\.well-known/acme-challenge/") {
set $geoip2_country_rule yes;
}
if ($geoip2_country_rule = no) {
return 444; # this rejects the connection, but you can also return 403 to tell the client that it was denied
}
```
4c. to set it for all http hosts of them same type: put this in the `custom_nginx/server_proxy.conf` / `custom_nginx/server_redirect.conf` / `custom_nginx/server_dead.conf` file(s):
```yaml
# uncomment if you block/don't allow IPs with unknown country codes
#if ($is_private_ip = yes) {
# set $geoip2_country_rule yes;
#}
if ($request_uri ~* "^/\.well-known/acme-challenge/") {
set $geoip2_country_rule yes;
}
if ($geoip2_country_rule = no) {
return 444; # this rejects the connection, but you can also return 403 to tell the client that it was denied
}
```
4d. to set it for all http hosts: put this in the `custom_nginx/server_http.conf` file:
```yaml
# uncomment if you block/don't allow IPs with unknown country codes
#if ($is_private_ip = yes) {
# set $geoip2_country_rule yes;
#}
if ($request_uri ~* "^/\.well-known/acme-challenge/") {
set $geoip2_country_rule yes;
}
if ($geoip2_country_rule = no) {
return 444; # this rejects the connection, but you can also return 403 to tell the client that it was denied
}
```
5. you can create multiple rule lists by adding multiple map directive, but you need to use a unique name instead of `$geoip2_country_rule` for each rule list (you need the unique name also in the custom locations)
## Prerun scripts (EXPERT option) - if you don't know what this is, ignore it
If you need to run scripts before NPMplus launches put them under: `/opt/npmplus/prerun/*.sh` (please add `#!/usr/bin/env sh` / `#!/usr/bin/env bash` to the top of the script) you need to create this folder yourself, also set the `ENABLE_PRERUN` env to `true`
## Notes on Cloudflare
- I strongly advise against using cloudflare proxy/tunnel before NPMplus (so between the users and NPMplus `users <=> cloudflare <=> NPMplus`)
- Why?
- cloudflare acts like a "man in the middle" (if you want you can also call it a "wanted man-in-the-middle attack"), this means all traffic going from your users to you/from you to your users will be decrypted by cloudflare before being encrypted again and being forwarded to you/your users, if you want this is your decision (security, privacy, etc.)
- many optimizations done by NPMplus will because of this only be used between cloudflare and NPMplus, so your users won't notice them
- cloudflare overrides many things done/configured by NPMplus (like headers (including HSTS), HTTP/3 (QUIC), TLS settings and more), so you might need to configure them again in Cloudflare, but this is not always possible
- cloudflare has a limit of 100MB per connection, so uploading/downloading big files my cause problems, if no chunking is used
- because all data does not take direct way between your users and you, the connection time will increase
- cloudflare only forwards/protects http(s) traffic on port 80/443 to you, services running on other ports/different protocols are not forwarded/protected (STUN/TURN/SSH)
- cloudflare can't protect you if the attacker knows your real ip, as cloudflare only rewrites your dns entries to itself and then acts as a reverse proxy, direct ip connectings to you are not protected (use a firewall like ufw, make sure to allow 80/tcp and 443/tcp+udp for NPMplus, if possible don't open SSH and NPMplus GUI to the internet, but secure them behind a VPN like Wireguard)
- if you need a WAF => use [crowdsec](#crowdsec)
- if you want to use the "I'm under attack mode" to protect you from (ai) web scrapes => use [anubis](#anubis-config-supported)
- What are reason for cloudflare?
- The points above don't matter you (enough) and:
- you depend on a not mentioned and unreplaceable feature of cloudflare
- or you are under (a) DDoS-attack(s), which you can't handle yourself and the attacker does not know your real ip/does not use it to attack you, but instead your domain: you could use cloudflare as dns nameserver for your domain with the proxy disabled and only enable it if you are under an attack (only work if the attacker did not cache your real ip)
- or you want to hide your IP and only expose http(s) services, but then: don't use NPMplus at all, install cloudflared and use cloudflare tunnels and point it directly to your upstreams, this way you can still manage everything in a GUI and you don't even need to expose any ports
- If you still want to use cloudflare proxy make sure to set `your domain => SSL/TLS => SSL/TLS encryption => Current encryption mode => Configure` to "Full (strict)"
- Just using cloudflare as a dns nameserver provider for your domain is fine
- If you use cloudflare to forward mails to your inbox, note that cloudflare also acts as man-in-the-middle in this case
## Hints for Your Privacy Policy
**Note: This is not legal advice. The following points are intended to give you hints and help you identify areas that may be relevant to your privacy policy. This list may not be complete or correct.**
1. NPMplus **always** writes the nginx error logs to your Docker logs; it uses the error level “warn” (so every error nginx and the nginx modules mark as error level “warn” or higher will be logged), as it contains user information (like IPs) you should mention it in your privacy policy. With the default installation no user data should leave your system because of NPMplus (except for data sent to your backends, as this is the task of a reverse proxy), this should be the only data created by NPMplus containing user information by default.
2. If you enable `LOGROTATE` the access and error (also level “warn”), logs will be written to your disk and rotated every 25 hours and deleted based on your set number of set rotations. The access logs use these formats: [http](https://github.com/ZoeyVid/NPMplus/blob/c6a2df722390eb3f4377c603e16587fe8c74e54f/rootfs/usr/local/nginx/conf/nginx.conf#L30) and [stream](https://github.com/ZoeyVid/NPMplus/blob/c6a2df722390eb3f4377c603e16587fe8c74e54f/rootfs/usr/local/nginx/conf/nginx.conf#L249). These include user information (like IPs), so make sure to also mention that these exist and what you are doing with them.
3. If you use crowdsec, and you do **not** [disable sharing in crowdsec](https://docs.crowdsec.net/docs/next/configuration/crowdsec_configuration/#sharing), you need to mention that [this](https://docs.crowdsec.net/docs/central_api/intro/#signal-meta-data) is sent to crowdsec in your privacy policy.
4. If you're blocking IPs — for example, using access lists, GeoIP filtering, or CrowdSec block lists — make sure to mention this as well.
5. If GoAccess is enabled, it processes access logs to generate statistics, which are saved on disk for a time you can configure. These statistics include user information (like IPs), so make sure to also mention this.
6. If you use the PHP-FPM option, error logs from PHP-FPM will also be written to Docker logs. These include user information (like IPs), so make sure to also mention this.
7. If you use open-appsec `NGINX_LOAD_OPENAPPSEC_ATTACHMENT_MODULE`, you should also include information about it; since I don't use it myself, I can't give you any further hints.
8. If you collect any user information (like through other custom nginx modules, modules you can load via env, lua scripts, etc.), also mention it.
9. If you use the caddy http to https redirect container, you should also mention the data collected by it, since it will also collect (error) logs.
10. If use use anubis, see here: https://anubis.techaro.lol/docs/admin/configuration/impressum
11. If you do any extra custom/advanced configuration/modification, which is in someway related to the users data, then yes, keep in mind to also mention this.
12. Anything else you do with the users data, should also be mentioned. (Like what your backend does or any other proxies in front of NPMplus (like cloudflare, still not recommended), how data is stored, duration, ads, analytic tools, how data is handled if they contact you, by who/which provider, etc.)
13. I don't think this needs to be mentioned, but you can include it if you want to be thorough (note: this does not apply if you're using Let's Encrypt, as they no longer support OCSP): Some clients (like Firefox) send OCSP requests to the certificate authority (CA) by default if the CA includes OCSP URLs in the certificate. This behavior can be disabled by users in Firefox. In my opinion, it doesn't need to be mentioned, as no data is sent to you — the client communicates directly with the CA. The check is initiated by the client itself; it's neither requested nor required by you. Your certificate simply indicates that the client can perform this check if it chooses to.
14. Also optional and, in my opinion, not required: Some information about the data stored by the nameservers running your domain. I don't think this should be required, since in most cases there's a provider between the users and your nameserver acting as a proxy. This means the DNS requests of your users are hidden behind their provider. Its the provider who should explain to their users how they handle data in their role as a "DNS proxy."
## What connections can be expected from the NPMplus container?
- to your clients
- to your upstreams
- to your acme/ocsp server
- to github for a daily update check
- if not disabled gravatar for profile pictures
- if used to your OIDC
- if used to pypi to download certbot plugins
- if used to your dns provider for acme dns challenges
- if used to www.site24x7.com for the reachability check
- if enabled to cloudflare to download theier IPs
- if enabled to the crowdsec (container) lapi
- if you see more/others please report them
## Features and Project Goal of Upstream
I created this project to fill a personal need to provide users with an easy way to accomplish reverse proxying hosts with TLS termination and it had to be so easy that a monkey could do it. This goal hasn't changed. While advanced configuration options are available, they remain entirely optional. The core idea is to keep things as simple as possible, lowering the barrier to entry for everyone.
- Beautiful and Secure Admin Interface based on [Tabler](https://tabler.github.io)
- Easily create forwarding domains, redirections, streams and 404 hosts without knowing anything about Nginx
- Free trusted TLS certificates using Certbot (Let's Encrypt/other CAs) or provide your own custom TLS certificates
- Free trusted TLS certificates using Certbot (Let's Encrypt) or provide your own custom TLS certificates
- Access Lists and basic HTTP Authentication for your hosts
- Advanced Nginx configuration available for super users
- User management, permissions and audit log
## Contributing
All are welcome to create pull requests for this project, but this does not mean that they will be merged, so better ask if your PR would be merged before creating one (via Discussion), typos and translation are excluded from this.
# Please report issues first to this fork before reporting them to the upstream repository
## Getting Help
1. [Support/Questions](https://github.com/ZoeyVid/NPMplus/discussions) (preferred)
2. [Discord](https://discord.gg/y8DhYhv427) (only in the #support-npmplus forum channel, keep other channels free from NPMplus)
3. [Reddit](https://reddit.com/r/NPMplus) (not recommended)
4. [Bugs](https://github.com/ZoeyVid/NPMplus/issues) (only for feature requests and reproducible bugs)
# List of new features
- Supports HTTP/3 (QUIC) protocol.
- Supports CrowdSec IPS. Please see [here](https://github.com/ZoeyVid/nginx-proxy-manager#crowdsec) to enable it.
- Supports ModSecurity, with coreruleset as an option. You can configure ModSecurity/coreruleset by editing the files in the `/opt/npm/etc/modsecurity` folder.
- If the core ruleset blocks valid requests, please check the `/data/etc/modsecurity/crs-setup.conf` file.
- Try to whitelist the Content-Type you are sending (for example, `application/activity+json` for Mastodon and `application/dns-message` for DoH).
- Try to whitelist the HTTP request method you are using (for example, `PUT` is blocked by default, which also affects NPM).
- Note: To fix [this issue](https://github.com/SpiderLabs/ModSecurity/issues/2848), instead of running `nginx -s reload`, this fork kills nginx and relaunches it. This can result in a 502 error when you update your hosts
- Darkmode button in the footer for comfortable viewing (CSS done by [@theraw](https://github.com/theraw))
- Fixes proxy to https origin when the origin only accepts TLSv1.3
- Only enables TLSv1.2 and TLSv1.3 protocols
- Faster creation of TLS certificates can be achieved by eliminating unnecessary Nginx reloads and configuration creations.
- Uses OCSP Stapling for enhanced security
- If using custom certificates, upload the CA/Intermediate Certificate (file name: `chain.pem`) in the `/opt/npm/tls/custom/npm-[certificate-id]` folder (manual migration may be needed)
- Resolved dnspod plugin issue
- To migrate manually, delete all dnspod certs and recreate them OR change the credentials file as per the template given [here](https://github.com/ZoeyVid/nginx-proxy-manager/blob/develop/global/certbot-dns-plugins.js)
- Smaller docker image with alpine-based distribution
- Admin backend interface runs with https
- Default page also runs with https
- Uses [fancyindex](https://gitHub.com/Naereen/Nginx-Fancyindex-Theme) if used as webserver
- Exposes INTERNAL backend api only to localhost
- Easy application of security headers using [ngx_security_headers](https://github.com/GetPageSpeed/ngx_security_headers)
- Access Log disabled
- Error Log written to console
- `Server` response header hidden
- PHP optional, with option to add extensions; available packages can be found [here](https://pkgs.alpinelinux.org/packages?branch=v3.17&repo=community&arch=x86_64&name=php81-*) and [here](https://pkgs.alpinelinux.org/packages?branch=v3.17&repo=community&arch=x86_64&name=php82-*)
- Allows different acme servers/certbot config file (/opt/npm/tls/certbot/config.ini)
- Supports up to 99 domains per cert
- Brotli compression can be enabled
- HTTP/2 always enabled with fixed upload
- Allows infinite upload size
- Automatic database vacuum (only sqlite)
- Automatic cleaning of old certbot certs (set FULLCLEAN to true)
- Password reset (only sqlite) using `docker exec -it nginx-proxy-manager password-reset.js USER_EMAIL PASSWORD`
- Supports TLS for MariaDB/MySQL; set `DB_MYSQL_TLS` env to true. Self-signed certificates can be uploaded to `/data/etc/npm/ca.crt` and `DB_MYSQL_CA` set to `/data/etc/npm/ca.crt` (not tested)
- Supports PUID/PGID in network mode host; add `net.ipv4.ip_unprivileged_port_start=0` at the end of `/etc/sysctl.conf`
- Option to set IP bindings for multiple instances in network mode host
- Option to change backend port
- See the composefile for all available options
- If you want to redirect all HTTP traffic to HTTPS, you can use the `compose.override.yaml` file. This will also enable `h2c` (unencrypted `HTTP/2`), while keeping `HTTP/1.0` and `HTTP/1.1`.
## Soon
- maybe redis and/or sql databases built in
- more
## migration
- **NOTE: migrating back to the original is not possible**, so make first a **backup** before migration, so you can use the backup to switch back
- if you use custom certificates, you need to upload the CA/Intermediate Certificate (file name: `chain.pem`) in the `/opt/npm/tls/custom/npm-[certificate-id]` folder
- some buttons have changed, check if they are still correct
- please delete all dnspod certs and recreate them OR you manually change the credentialsfile (see [here](https://github.com/ZoeyVid/nginx-proxy-manager/blob/develop/global/certbot-dns-plugins.js) for the template)
- since this fork has dependency on `network_mode: host`, please don't forget to open port 80 and 443 (and maybe 81) in your firewall
# Crowdsec
1. Install crowdsec: https://doc.crowdsec.net/docs/getting_started/install_crowdsec
2. make sure to use `network_mode: host` in your compose file
3. run `cscli bouncers add npm -o raw` and save the output
4. run `cscli config show --key "Config.API.Client.Credentials.URL"` and save the output
5. open `/data/etc/crowdsec/crowdsec.conf`
6. set `ENABLED` to `true`
7. use the output of step 4 as `API_KEY`
8. use the output of step 5 as `API_URL` - But remove the `/` at the end (correct: `http://127.0.0.1:8080` - incorrect: `http://127.0.0.1:8080/`)
9. make your changes
10. save the file
11. restart the npm
# Use as webserver
1. Create a new Proxy Host
2. Set `Scheme` to `https`, `Forward Hostname / IP` to `0.0.0.0`, `Forward Port` to `1` and enable `Websockets Support` (you can also use other values, since these get fully ignored)
3. Maybe set an Access List
4. Make your TLS Settings
5.
a) Custom Nginx Configuration (advanced tab), which looks the following for file server:
- Note: the slash at the end of the file path is important
```
location / {
alias /var/www/<your-html-site-folder-name>/;
}
```
b) Custom Nginx Configuration (advanced tab), which looks the following for file server and **php**:
- Note: the slash at the end of the file path is important
- Note: first enable `PHP81` and/or `PHP82` inside your compose file
- Note: you can replace `fastcgi_pass php82;` with `fastcgi_pass` `php81`/`php82` `;`
- Note: to add more php extension use the packes from [here](https://pkgs.alpinelinux.org/packages?branch=v3.17&repo=community&arch=x86_64&name=php8*-*) and add them using the `PHP_APKS` env (see compose file)
```
location / {
alias /var/www/<your-php-site-folder-name>/;
location ~ [^/]\.php(/|$) {
fastcgi_pass php82;
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
if (!-f $document_root$fastcgi_script_name) {return 404;}
}}
```
# custom acme server
1. Open this file: `nano` `/opt/npm/ssl/certbot/config.ini`
2. uncomment the server line and change it to your acme server
3. maybe set eab keys
4. create your cert using the npm web ui
# Quick Setup
1. Install Docker and Docker Compose (or portainer)
- [Docker Install documentation](https://docs.docker.com/engine)
- [Docker Compose Install documentation](https://docs.docker.com/compose/install/linux)
2. Create a compose.yaml file similar to this (or use it as a portainer stack):
```yml
version: "3"
services:
nginx-proxy-manager:
container_name: nginx-proxy-manager
image: zoeyvid/nginx-proxy-manager
restart: always
network_mode: host
volumes:
- "/opt/npm:/data"
# - "/var/www:/var/www" # optional, if you want to use it as webserver for html/php
# - "/opt/npm-letsencrypt:/etc/letsencrypt" # Only needed for first time migration from original nginx-proxy-manager to this fork
environment:
- "TZ=Europe/Berlin" # set timezone, default UTC
# - "PUID=1000" # set group id, default 0 (root)
# - "PGID=1000" # set user id, default 0 (root)
# - "NIBEP=48693" # internal port, always bound to 127.0.0.1, default 48693, you need to change it, if you want to run multiple npm instances in network mode host
# - "NPM_PORT=81" # Port the NPM backend should be bound to, default 81, you need to change it, if you want to run multiple npm instances in network mode host
# - "IPV4_BINDING=127.0.0.1" # IPv4 address to bind, defaults to all
# - "NPM_IPV4_BINDING=127.0.0.1" # IPv4 address to bind for the NPM backend, defaults to all
# - "IPV6_BINDING=[::1]" # IPv6 address to bind, defaults to all
# - "NPM_IPV6_BINDING=[::1]" # IPv6 address to bind for the NPM backend, defaults to all
# - "DISABLE_IPV6=true" # disable IPv6, incompatible with IPV6_BINDING, default false
# - "NPM_DISABLE_IPV6=true" # disable IPv6 for the NPM backend, incompatible with NPM_IPV6_BINDING, default false
# - "NPM_LISTEN_LOCALHOST=true" # Bind the NPM Dashboard on Port 81 only to localhost, incompatible with NPM_IPV4_BINDING/NPM_IPV6_BINDING/NPM_DISABLE_IPV6, default false
# - "NPM_CERT_ID=1" # ID of cert, which should be used instead of dummycerts, default unset/dummycerts
# - "DISABLE_HTTP=true" # disables nginx to listen on port 80, default false
# - "NGINX_LOG_NOT_FOUND=true" # Allow logging of 404 errors, default false
# - "CLEAN=false" # Clean folders, default true
# - "FULLCLEAN=true" # Clean unused config folders, default false
# - "PHP81=true" # Activate PHP81, default false
# - "PHP81_APKS=php81-curl php-81-curl" # Add php extensions, see available packages here: https://pkgs.alpinelinux.org/packages?branch=v3.17&repo=community&arch=x86_64&name=php81-*, default none
# - "PHP82=true" # Activate PHP82, default false
# - "PHP82_APKS=php82-curl php-82-curl" # Add php extensions, see available packages here: https://pkgs.alpinelinux.org/packages?branch=v3.17&repo=community&arch=x86_64&name=php82-*, default none
```
3. Bring up your stack by running (or deploy your portainer stack)
```bash
docker compose up -d
```
4. Log in to the Admin UI
When your docker container is running, connect to it on port `81` for the admin interface.
Sometimes this can take a little bit because of the entropy of keys.
You may need to open port 81 in your firewall.
You may need to use another IP-Address.
[https://127.0.0.1:81](https://127.0.0.1:81)
Default Admin User:
```
Email: admin@example.com
Password: iArhP1j7p1P6TA92FA2FMbbUGYqwcYzxC4AVEe12Wbi94FY9gNN62aKyF1shrvG4NycjjX9KfmDQiwkLZH1ZDR9xMjiG2QmoHXi
```
Immediately after logging in with this default user you will be asked to modify your details and change your password.
## Contributors/Sponsor original NPM
Special thanks to [all of our contributors](https://github.com/NginxProxyManager/nginx-proxy-manager/graphs/contributors).
If you want to sponsor them, please see [here](https://github.com/NginxProxyManager/nginx-proxy-manager/blob/master/README.md).
# Please report Bugs first to this fork before reporting them to the original Repository
## Getting Support
1. [Found a bug?](https://github.com/ZoeyVid/nginx-proxy-manager/issues)
2. [Discussions](https://github.com/ZoeyVid/nginx-proxy-manager/discussions)
<!---
3. [Development Gitter](https://gitter.im/nginx-proxy-manager/community)
4. [Reddit](https://reddit.com/r/nginxproxymanager)
--->

73
backend/.eslintrc.json Normal file
View File

@@ -0,0 +1,73 @@
{
"env": {
"node": true,
"es6": true
},
"extends": [
"eslint:recommended"
],
"globals": {
"Atomics": "readonly",
"SharedArrayBuffer": "readonly"
},
"parserOptions": {
"ecmaVersion": 2018,
"sourceType": "module"
},
"plugins": [
"align-assignments"
],
"rules": {
"arrow-parens": [
"error",
"always"
],
"indent": [
"error",
"tab"
],
"linebreak-style": [
"error",
"unix"
],
"quotes": [
"error",
"single"
],
"semi": [
"error",
"always"
],
"key-spacing": [
"error",
{
"align": "value"
}
],
"comma-spacing": [
"error",
{
"before": false,
"after": true
}
],
"func-call-spacing": [
"error",
"never"
],
"keyword-spacing": [
"error",
{
"before": true
}
],
"no-irregular-whitespace": "error",
"no-unused-expressions": 0,
"align-assignments/align-assignments": [
2,
{
"requiresOnly": false
}
]
}
}

View File

@@ -1,84 +1,90 @@
import cookieParser from "cookie-parser";
import express from "express";
import fileUpload from "express-fileupload";
import { debug, express as logger } from "./logger.js";
import mainRoutes from "./routes/main.js";
const express = require('express');
const bodyParser = require('body-parser');
const fileUpload = require('express-fileupload');
const compression = require('compression');
const config = require('./lib/config');
const log = require('./logger').express;
/**
* App
*/
const app = express();
app.use(
fileUpload({
limits: { fileSize: 1024 * 1024 },
}),
);
app.use(cookieParser());
app.use(express.json());
app.use(express.urlencoded({ extended: true }));
app.use(fileUpload());
app.use(bodyParser.json());
app.use(bodyParser.urlencoded({extended: true}));
// Gzip
app.use(compression());
/**
* General Logging, BEFORE routes
*/
app.disable("x-powered-by");
app.enable("trust proxy", ["loopback", "linklocal", "uniquelocal"]);
app.enable("strict routing");
app.use((req, res, next) => {
if (["same-origin", undefined, "none"].includes(req.get("sec-fetch-site"))) {
return next();
}
if (
req.method === "GET" &&
req.path === "/api/oidc/callback" &&
req.get("sec-fetch-mode") === "navigate" &&
req.get("sec-fetch-dest") === "document"
) {
return next();
}
res.status(403).json({
error: { message: "Rejected Sec-Fetch-Site Value." },
});
});
app.disable('x-powered-by');
app.enable('trust proxy', ['loopback', 'linklocal', 'uniquelocal']);
app.enable('strict routing');
// pretty print JSON when not live
app.set("json spaces", 2);
if (config.debug()) {
app.set('json spaces', 2);
}
app.use("/", mainRoutes);
// CORS for everything
app.use(require('./lib/express/cors'));
// General security/cache related headers + server header
app.use(function (req, res, next) {
let x_frame_options = 'DENY';
if (typeof process.env.X_FRAME_OPTIONS !== 'undefined' && process.env.X_FRAME_OPTIONS) {
x_frame_options = process.env.X_FRAME_OPTIONS;
}
res.set({
'X-XSS-Protection': '1; mode=block',
'X-Content-Type-Options': 'nosniff',
'X-Frame-Options': x_frame_options,
'Cache-Control': 'no-cache, no-store, max-age=0, must-revalidate',
Pragma: 'no-cache',
Expires: 0
});
next();
});
app.use(require('./lib/express/jwt')());
app.use('/', require('./routes/api/main'));
// production error handler
// no stacktraces leaked to user
app.use((err, req, res, _) => {
const payload = {
// eslint-disable-next-line
app.use(function (err, req, res, next) {
let payload = {
error: {
code: err.status,
message: err.public ? err.message : "Internal Error",
},
code: err.status,
message: err.public ? err.message : 'Internal Error'
}
};
if (typeof err.message_i18n !== "undefined") {
payload.error.message_i18n = err.message_i18n;
}
if ((req.baseUrl + req.originalUrl).includes("nginx/certificates")) {
if (config.debug() || (req.baseUrl + req.path).includes('nginx/certificates')) {
payload.debug = {
stack: typeof err.stack !== "undefined" && err.stack ? err.stack.split("\n") : null,
previous: err.previous,
stack: typeof err.stack !== 'undefined' && err.stack ? err.stack.split('\n') : null,
previous: err.previous
};
}
// Not every error is worth logging - but this is good for now until it gets annoying.
if (typeof err.stack !== "undefined" && err.stack) {
debug(logger, err.stack);
if (typeof err.public === "undefined" || !err.public) {
logger.warn(`${req.method.toUpperCase()} ${req.originalUrl}: ${err}`);
if (typeof err.stack !== 'undefined' && err.stack) {
if (config.debug()) {
log.debug(err.stack);
} else if (typeof err.public == 'undefined' || !err.public) {
log.warn(err.message);
}
}
res.status(err.status || 500).send(payload);
res
.status(err.status || 500)
.send(payload);
});
export default app;
module.exports = app;

View File

@@ -1,74 +0,0 @@
{
"$schema": "https://biomejs.dev/schemas/2.4.5/schema.json",
"vcs": {
"enabled": true,
"clientKind": "git",
"useIgnoreFile": true
},
"files": {
"ignoreUnknown": false,
"includes": ["**/*.ts", "**/*.tsx", "**/*.js", "**/*.jsx", "!**/dist/**/*"]
},
"formatter": {
"enabled": true,
"indentStyle": "tab",
"indentWidth": 4,
"lineWidth": 120,
"formatWithErrors": true
},
"assist": {
"actions": {
"source": {
"organizeImports": {
"level": "on",
"options": {
"groups": [
":BUN:",
":NODE:",
["npm:*", "npm:*/**"],
":PACKAGE_WITH_PROTOCOL:",
":URL:",
":PACKAGE:",
["/src/*", "/src/**"],
["/**"],
["#*", "#*/**"],
":PATH:"
]
}
}
}
}
},
"linter": {
"enabled": true,
"rules": {
"recommended": true,
"correctness": {
"useUniqueElementIds": "off"
},
"suspicious": {
"noExplicitAny": "off"
},
"performance": {
"noDelete": "off"
},
"nursery": "off",
"a11y": {
"useSemanticElements": "off",
"useValidAnchor": "off"
},
"style": {
"noParameterAssign": "error",
"useAsConstAssertion": "error",
"useDefaultParameterLast": "error",
"useEnumInitializers": "error",
"useSelfClosingElements": "error",
"useSingleVarDeclarator": "error",
"noUnusedTemplateLiteral": "error",
"useNumberNamespace": "error",
"noInferrableTypes": "error",
"noUselessElse": "error"
}
}
}
}

View File

@@ -1,19 +0,0 @@
# Certbot dns-plugins
This file contains info about available Certbot DNS plugins.
This only works for plugins which use the standard argument structure, so:
`--authenticator <plugin-name> --<plugin-name>-credentials <FILE> --<plugin-name>-propagation-seconds <number>`
File Structure:
```json
{
"cloudflare": {
"name": "Name displayed to the user",
"package_name": "Package name in PyPi repo",
"credentials": "Template of the credentials file",
"full_plugin_name": "The full plugin name as used in the commandline with certbot, e.g. 'dns-njalla'"
},
...
}
```

View File

@@ -1,494 +0,0 @@
{
"acmedns": {
"name": "ACME-DNS",
"package_name": "certbot-dns-acmedns",
"credentials": "dns_acmedns_api_url = http://acmedns-server/\ndns_acmedns_registration_file = /data/tls/certbot/acme-registration.json",
"full_plugin_name": "dns-acmedns"
},
"active24": {
"name": "Active24",
"package_name": "certbot-dns-active24",
"credentials": "dns_active24_api_key = <identifier>\ndns_active24_secret = <secret>",
"full_plugin_name": "dns-active24"
},
"aliyun": {
"name": "Aliyun",
"package_name": "certbot-dns-aliyun",
"credentials": "dns_aliyun_access_key = 12345678\ndns_aliyun_access_key_secret = 1234567890abcdef1234567890abcdef",
"full_plugin_name": "dns-aliyun"
},
"arvan": {
"name": "ArvanCloud",
"package_name": "certbot-dns-arvan",
"credentials": "dns_arvan_key = Apikey xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
"full_plugin_name": "dns-arvan"
},
"azure": {
"name": "Azure",
"package_name": "certbot-dns-azure",
"credentials": "# This plugin supported API authentication using either Service Principals or utilizing a Managed Identity assigned to the virtual machine.\n# Regardless which authentication method used, the identity will need the “DNS Zone Contributor” role assigned to it.\n# As multiple Azure DNS Zones in multiple resource groups can exist, the config file needs a mapping of zone to resource group ID. Multiple zones -> ID mappings can be listed by using the key dns_azure_zoneX where X is a unique number. At least 1 zone mapping is required.\n\n# Using a service principal (option 1)\ndns_azure_sp_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\ndns_azure_sp_client_secret = E-xqXU83Y-jzTI6xe9fs2YC~mck3ZzUih9\ndns_azure_tenant_id = ed1090f3-ab18-4b12-816c-599af8a88cf7\n\n# Using used assigned MSI (option 2)\n# dns_azure_msi_client_id = 912ce44a-0156-4669-ae22-c16a17d34ca5\n\n# Using system assigned MSI (option 3)\n# dns_azure_msi_system_assigned = true\n\n# Zones (at least one always required)\ndns_azure_zone1 = example.com:/subscriptions/c135abce-d87d-48df-936c-15596c6968a5/resourceGroups/dns1\ndns_azure_zone2 = example.org:/subscriptions/99800903-fb14-4992-9aff-12eaf2744622/resourceGroups/dns2",
"full_plugin_name": "dns-azure"
},
"baidu": {
"name": "baidu",
"package_name": "certbot-dns-baidu",
"credentials": "dns_baidu_access_key = 12345678\ndns_baidu_secret_key = 1234567890abcdef1234567890abcdef",
"full_plugin_name": "dns-baidu"
},
"beget": {
"name": "Beget",
"package_name": "certbot-beget-plugin",
"credentials": "# Beget API credentials used by Certbot\nbeget_plugin_username = username\nbeget_plugin_password = password",
"full_plugin_name": "beget-plugin"
},
"bunny": {
"name": "bunny.net",
"package_name": "certbot-dns-bunny",
"credentials": "# Bunny API token used by Certbot (see https://dash.bunny.net/account/settings)\ndns_bunny_api_key = xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx",
"full_plugin_name": "dns-bunny"
},
"cdmon": {
"name": "cdmon",
"package_name": "certbot-dns-cdmon",
"credentials": "dns_cdmon_api_key=your-cdmon-api-token\ndns_cdmon_domain=your_domain_is_optional",
"full_plugin_name": "dns-cdmon"
},
"cloudflare": {
"name": "Cloudflare",
"package_name": "certbot-dns-cloudflare",
"credentials": "# Cloudflare restricted API token (recommended)\ndns_cloudflare_api_token=123\n\n# OR Cloudflare Global API Key (not recommended)\n#dns_cloudflare_email=cloudflare@example.org\n#dns_cloudflare_api_key=123",
"full_plugin_name": "dns-cloudflare"
},
"cloudns": {
"name": "ClouDNS",
"package_name": "certbot-dns-cloudns",
"credentials": "# Target user ID (see https://www.cloudns.net/api-settings/)\n\tdns_cloudns_auth_id=1234\n\t# Alternatively, one of the following two options can be set:\n\t# dns_cloudns_sub_auth_id=1234\n\t# dns_cloudns_sub_auth_user=foobar\n\n\t# API password\n\tdns_cloudns_auth_password=password1",
"full_plugin_name": "dns-cloudns"
},
"cloudxns": {
"name": "CloudXNS",
"package_name": "certbot-dns-cloudxns",
"credentials": "dns_cloudxns_api_key = 1234567890abcdef1234567890abcdef\ndns_cloudxns_secret_key = 1122334455667788",
"full_plugin_name": "dns-cloudxns"
},
"constellix": {
"name": "Constellix",
"package_name": "certbot-dns-constellix",
"credentials": "dns_constellix_apikey = 5fb4e76f-ac91-43e5-f982458bc595\ndns_constellix_secretkey = 47d99fd0-32e7-4e07-85b46d08e70b\ndns_constellix_endpoint = https://api.dns.constellix.com/v1",
"full_plugin_name": "dns-constellix"
},
"corenetworks": {
"name": "Core Networks",
"package_name": "certbot-dns-corenetworks",
"credentials": "dns_corenetworks_username = asaHB12r\ndns_corenetworks_password = secure_password",
"full_plugin_name": "dns-corenetworks"
},
"cpanel": {
"name": "cPanel",
"package_name": "certbot-dns-cpanel",
"credentials": "cpanel_url = https://cpanel.example.com:2083\ncpanel_username = your_username\ncpanel_password = your_password\ncpanel_token = your_api_token",
"full_plugin_name": "cpanel"
},
"ddnss": {
"name": "DDNSS",
"package_name": "certbot-dns-ddnss",
"credentials": "dns_ddnss_token = YOUR_DDNSS_API_TOKEN",
"full_plugin_name": "dns-ddnss"
},
"desec": {
"name": "deSEC",
"package_name": "certbot-dns-desec",
"credentials": "dns_desec_token = YOUR_DESEC_API_TOKEN\ndns_desec_endpoint = https://desec.io/api/v1/",
"full_plugin_name": "dns-desec"
},
"duckdns": {
"name": "DuckDNS",
"package_name": "certbot-dns-duckdns",
"credentials": "dns_duckdns_token=your-duckdns-token",
"full_plugin_name": "dns-duckdns"
},
"digitalocean": {
"name": "DigitalOcean",
"package_name": "certbot-dns-digitalocean",
"credentials": "dns_digitalocean_token = 0000111122223333444455556666777788889999aaaabbbbccccddddeeeeffff",
"full_plugin_name": "dns-digitalocean"
},
"directadmin": {
"name": "DirectAdmin",
"package_name": "certbot-dns-directadmin",
"credentials": "directadmin_url = https://my.directadminserver.com:2222\ndirectadmin_username = username\ndirectadmin_password = aSuperStrongPassword",
"full_plugin_name": "directadmin"
},
"dnsimple": {
"name": "DNSimple",
"package_name": "certbot-dns-dnsimple",
"credentials": "dns_dnsimple_token = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw",
"full_plugin_name": "dns-dnsimple"
},
"dnsmadeeasy": {
"name": "DNS Made Easy",
"package_name": "certbot-dns-dnsmadeeasy",
"credentials": "dns_dnsmadeeasy_api_key = 1c1a3c91-4770-4ce7-96f4-54c0eb0e457a\ndns_dnsmadeeasy_secret_key = c9b5625f-9834-4ff8-baba-4ed5f32cae55",
"full_plugin_name": "dns-dnsmadeeasy"
},
"dnspod": {
"name": "DNSPod",
"package_name": "certbot-dnspod",
"credentials": "certbot_dnspod_token = <your token>\ncertbot_dnspod_token_id = <your token id>",
"full_plugin_name": "certbot-dnspod"
},
"domainoffensive": {
"name": "DomainOffensive (do.de)",
"package_name": "certbot-dns-domainoffensive",
"credentials": "dns_domainoffensive_api_token = YOUR_DO_DE_AUTH_TOKEN",
"full_plugin_name": "dns-domainoffensive"
},
"domeneshop": {
"name": "Domeneshop",
"package_name": "certbot-dns-domeneshop",
"credentials": "dns_domeneshop_client_token=YOUR_DOMENESHOP_CLIENT_TOKEN\ndns_domeneshop_client_secret=YOUR_DOMENESHOP_CLIENT_SECRET",
"full_plugin_name": "dns-domeneshop"
},
"dreamhost": {
"name": "Dreamhost",
"package_name": "certbot-dns-dreamhost",
"credentials": "dns_dreamhost_baseurl=API_BASE_URL\ndns_dreamhost_api_key=API_KEY",
"full_plugin_name": "dns-dreamhost"
},
"dynu": {
"name": "Dynu",
"package_name": "certbot-dns-dynu",
"credentials": "dns_dynu_auth_token = YOUR_DYNU_AUTH_TOKEN",
"full_plugin_name": "dns-dynu"
},
"easydns": {
"name": "easyDNS",
"package_name": "certbot-dns-easydns",
"credentials": "dns_easydns_usertoken = YOUR_EASYDNS_USERTOKEN\ndns_easydns_userkey = YOUR_EASYDNS_USERKEY\ndns_easydns_endpoint = https://rest.easydns.net",
"full_plugin_name": "dns-easydns"
},
"edgedns": {
"name": "Akamai Edge DNS",
"package_name": "certbot-plugin-edgedns",
"credentials": "edgedns_client_secret = as3d1asd5d1a32sdfsdfs2d1asd5=\nedgedns_host = sdflskjdf-dfsdfsdf-sdfsdfsdf.luna.akamaiapis.net\nedgedns_access_token = kjdsi3-34rfsdfsdf-234234fsdfsdf\nedgedns_client_token = dkfjdf-342fsdfsd-23fsdfsdfsdf",
"full_plugin_name": "edgedns"
},
"eurodns": {
"name": "EuroDNS",
"package_name": "certbot-dns-eurodns",
"credentials": "dns_eurodns_applicationId = myuser\ndns_eurodns_apiKey = mysecretpassword\ndns_eurodns_endpoint = https://rest-api.eurodns.com/user-api-gateway/proxy",
"full_plugin_name": "dns-eurodns"
},
"firstdomains": {
"name": "First Domains",
"package_name": "certbot-dns-firstdomains",
"credentials": "dns_firstdomains_username = myremoteuser\ndns_firstdomains_password = verysecureremoteuserpassword",
"full_plugin_name": "dns-firstdomains"
},
"freedns": {
"name": "FreeDNS",
"package_name": "certbot-dns-freedns",
"credentials": "dns_freedns_username = myremoteuser\ndns_freedns_password = verysecureremoteuserpassword",
"full_plugin_name": "dns-freedns"
},
"gandi": {
"name": "Gandi Live DNS",
"package_name": "certbot-dns-gandi",
"credentials": "# Gandi personal access token\ndns_gandi_token=PERSONAL_ACCESS_TOKEN",
"full_plugin_name": "dns-gandi"
},
"gcore": {
"name": "Gcore DNS",
"package_name": "certbot-dns-gcore",
"credentials": "dns_gcore_apitoken = 0123456789abcdef0123456789abcdef01234567",
"full_plugin_name": "dns-gcore"
},
"glesys": {
"name": "Glesys",
"package_name": "certbot-dns-glesys",
"credentials": "dns_glesys_user = CL00000\ndns_glesys_password = apikeyvalue",
"full_plugin_name": "dns-glesys"
},
"godaddy": {
"name": "GoDaddy",
"package_name": "certbot-dns-godaddy",
"credentials": "dns_godaddy_secret = 0123456789abcdef0123456789abcdef01234567\ndns_godaddy_key = abcdef0123456789abcdef01234567abcdef0123",
"full_plugin_name": "dns-godaddy"
},
"google": {
"name": "Google",
"package_name": "certbot-dns-google",
"credentials": "{\n\"type\": \"service_account\",\n...\n}",
"full_plugin_name": "dns-google"
},
"googledomains": {
"name": "GoogleDomainsDNS",
"package_name": "certbot-dns-google-domains",
"credentials": "dns_google_domains_access_token = 0123456789abcdef0123456789abcdef01234567\ndns_google_domains_zone = \"example.com\"",
"full_plugin_name": "dns-google-domains"
},
"he": {
"name": "Hurricane Electric",
"package_name": "certbot-dns-hurricane-electric",
"credentials": "dns_hurricane_electric_user = Me\ndns_hurricane_electric_pass = my HE password",
"full_plugin_name": "dns-hurricane_electric"
},
"he-ddns": {
"name": "Hurricane Electric - DDNS",
"package_name": "certbot-dns-he-ddns",
"credentials": "dns_he_ddns_password = verysecurepassword",
"full_plugin_name": "dns-he-ddns"
},
"hetzner": {
"name": "Hetzner",
"package_name": "certbot-dns-hetzner",
"credentials": "dns_hetzner_api_token = 0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-hetzner"
},
"hetzner-cloud": {
"name": "Hetzner Cloud",
"package_name": "certbot-dns-hetzner-cloud",
"credentials": "dns_hetzner_cloud_api_token = your_api_token_here",
"full_plugin_name": "dns-hetzner-cloud"
},
"hostingnl": {
"name": "Hosting.nl",
"package_name": "certbot-dns-hostingnl",
"credentials": "dns_hostingnl_api_key = 0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-hostingnl"
},
"hover": {
"name": "Hover",
"package_name": "certbot-dns-hover",
"credentials": "dns_hover_hoverurl = https://www.hover.com\ndns_hover_username = hover-admin-username\ndns_hover_password = hover-admin-password\ndns_hover_totpsecret = 2fa-totp-secret",
"full_plugin_name": "dns-hover"
},
"infomaniak": {
"name": "Infomaniak",
"package_name": "certbot-dns-infomaniak",
"credentials": "dns_infomaniak_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"full_plugin_name": "dns-infomaniak"
},
"inwx": {
"name": "INWX",
"package_name": "certbot-dns-inwx",
"credentials": "dns_inwx_url = https://api.domrobot.com/xmlrpc/\ndns_inwx_username = your_username\ndns_inwx_password = your_password\ndns_inwx_shared_secret = your_shared_secret optional",
"full_plugin_name": "dns-inwx"
},
"ionos": {
"name": "IONOS",
"package_name": "certbot-dns-ionos",
"credentials": "dns_ionos_prefix = myapikeyprefix\ndns_ionos_secret = verysecureapikeysecret\ndns_ionos_endpoint = https://api.hosting.ionos.com",
"full_plugin_name": "dns-ionos"
},
"ispconfig": {
"name": "ISPConfig",
"package_name": "certbot-dns-ispconfig",
"credentials": "certbot_dns_ispconfig:dns_ispconfig_username = myremoteuser\ncertbot_dns_ispconfig:dns_ispconfig_password = verysecureremoteuserpassword\ncertbot_dns_ispconfig:dns_ispconfig_endpoint = https://you.ipsconfig.host:8080/remote/json.php",
"full_plugin_name": "certbot-dns-ispconfig:dns-ispconfig"
},
"isset": {
"name": "Isset",
"package_name": "certbot-dns-isset",
"credentials": "dns_isset_endpoint=\"https://customer.isset.net/api\"\ndns_isset_token=\"<token>\"",
"full_plugin_name": "dns-isset"
},
"joker": {
"name": "Joker",
"package_name": "certbot-dns-joker",
"credentials": "dns_joker_username = <Dynamic DNS Authentication Username>\ndns_joker_password = <Dynamic DNS Authentication Password>\ndns_joker_domain = <Dynamic DNS Domain>",
"full_plugin_name": "dns-joker"
},
"kas": {
"name": "All-Inkl",
"package_name": "certbot-dns-kas",
"credentials": "dns_kas_user = your_kas_user\ndns_kas_password = your_kas_password",
"full_plugin_name": "dns-kas"
},
"leaseweb": {
"name": "LeaseWeb",
"package_name": "certbot-dns-leaseweb",
"credentials": "dns_leaseweb_api_token = 01234556789",
"full_plugin_name": "dns-leaseweb"
},
"linode": {
"name": "Linode",
"package_name": "certbot-dns-linode",
"credentials": "dns_linode_key = 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ64\ndns_linode_version = [<blank>|3|4]",
"full_plugin_name": "dns-linode"
},
"loopia": {
"name": "Loopia",
"package_name": "certbot-dns-loopia",
"credentials": "dns_loopia_user = user@loopiaapi\ndns_loopia_password = abcdef0123456789abcdef01234567abcdef0123",
"full_plugin_name": "dns-loopia"
},
"luadns": {
"name": "LuaDNS",
"package_name": "certbot-dns-luadns",
"credentials": "dns_luadns_email = user@example.com\ndns_luadns_token = 0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-luadns"
},
"mchost24": {
"name": "MC-HOST24",
"package_name": "certbot-dns-mchost24",
"credentials": "# Obtain API token using https://github.com/JoeJoeTV/mchost24-api-python\ndns_mchost24_api_token=<insert obtained API token here>",
"full_plugin_name": "dns-mchost24"
},
"mijnhost": {
"name": "mijn.host",
"package_name": "certbot-dns-mijn-host",
"credentials": "dns_mijn_host_api_key=0123456789abcdef0123456789abcdef",
"full_plugin_name": "dns-mijn-host"
},
"namecheap": {
"name": "Namecheap",
"package_name": "certbot-dns-namecheap",
"credentials": "dns_namecheap_username = 123456\ndns_namecheap_api_key = 0123456789abcdef0123456789abcdef01234567",
"full_plugin_name": "dns-namecheap"
},
"netcup": {
"name": "netcup",
"package_name": "certbot-dns-netcup",
"credentials": "dns_netcup_customer_id = 123456\ndns_netcup_api_key = 0123456789abcdef0123456789abcdef01234567\ndns_netcup_api_password = abcdef0123456789abcdef01234567abcdef0123",
"full_plugin_name": "dns-netcup"
},
"nicru": {
"name": "nic.ru",
"package_name": "certbot-dns-nicru",
"credentials": "dns_nicru_client_id = application-id\ndns_nicru_client_secret = application-token\ndns_nicru_username = 0001110/NIC-D\ndns_nicru_password = password\ndns_nicru_scope = .+:.+/zones/example.com(/.+)?\ndns_nicru_service = DNS_SERVICE_NAME\ndns_nicru_zone = example.com",
"full_plugin_name": "dns-nicru"
},
"njalla": {
"name": "Njalla",
"package_name": "certbot-dns-njalla",
"credentials": "dns_njalla_token = 0123456789abcdef0123456789abcdef01234567",
"full_plugin_name": "dns-njalla"
},
"nsone": {
"name": "NS1",
"package_name": "certbot-dns-nsone",
"credentials": "dns_nsone_api_key = MDAwMDAwMDAwMDAwMDAw",
"full_plugin_name": "dns-nsone"
},
"oci": {
"name": "Oracle Cloud Infrastructure DNS",
"package_name": "certbot-dns-oci",
"credentials": "[DEFAULT]\nuser = ocid1.user.oc1...\nfingerprint = xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx:xx\ntenancy = ocid1.tenancy.oc1...\nregion = us-ashburn-1\nkey_file = ~/.oci/oci_api_key.pem",
"full_plugin_name": "dns-oci"
},
"ovh": {
"name": "OVH",
"package_name": "certbot-dns-ovh",
"credentials": "dns_ovh_endpoint = ovh-eu\ndns_ovh_application_key = MDAwMDAwMDAwMDAw\ndns_ovh_application_secret = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw\ndns_ovh_consumer_key = MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw",
"full_plugin_name": "dns-ovh"
},
"plesk": {
"name": "Plesk",
"package_name": "certbot-dns-plesk",
"credentials": "dns_plesk_username = your-username\ndns_plesk_password = secret\ndns_plesk_api_url = https://plesk-api-host:8443",
"full_plugin_name": "dns-plesk"
},
"porkbun": {
"name": "Porkbun",
"package_name": "certbot-dns-porkbun",
"credentials": "dns_porkbun_key=your-porkbun-api-key\ndns_porkbun_secret=your-porkbun-api-secret",
"full_plugin_name": "dns-porkbun"
},
"powerdns": {
"name": "PowerDNS",
"package_name": "certbot-dns-pdns",
"credentials": "dns_pdns_endpoint = https://pdns-api.example.com\ndns_pdns_api_key = <Your API Key>\ndns_pdns_server_id = localhost # see https://doc.powerdns.com/authoritative/http-api/server.html\ndns_pdns_disable_notify = false # Disable notification of secondaries after record changes",
"full_plugin_name": "dns-pdns"
},
"regru": {
"name": "reg.ru",
"package_name": "certbot-regru",
"credentials": "dns_username=username\ndns_password=password",
"full_plugin_name": "dns"
},
"rfc2136": {
"name": "RFC 2136",
"package_name": "certbot-dns-rfc2136",
"credentials": "# Target DNS server\ndns_rfc2136_server = 192.0.2.1\n# Target DNS port\ndns_rfc2136_port = 53\n# TSIG key name\ndns_rfc2136_name = keyname.\n# TSIG key secret\ndns_rfc2136_secret = 4q4wM/2I180UXoMyN4INVhJNi8V9BCV+jMw2mXgZw/CSuxUT8C7NKKFs AmKd7ak51vWKgSl12ib86oQRPkpDjg==\n# TSIG key algorithm\ndns_rfc2136_algorithm = HMAC-SHA512",
"full_plugin_name": "dns-rfc2136"
},
"rockenstein": {
"name": "rockenstein AG",
"package_name": "certbot-dns-rockenstein",
"credentials": "dns_rockenstein_token=<token>",
"full_plugin_name": "dns-rockenstein"
},
"scaleway": {
"name": "Scaleway",
"package_name": "certbot-dns-scaleway",
"credentials": "dns_scaleway_application_token = b3a0b9a9-3814-4f12-95b0-a7473bf8b306",
"full_plugin_name": "dns-scaleway"
},
"selectelv2": {
"name": "Selectel api v2",
"package_name": "certbot-dns-selectel-api-v2",
"credentials": "dns_selectel_api_v2_account_id = your_account_id\ndns_selectel_api_v2_project_name = your_project\ndns_selectel_api_v2_username = your_username\ndns_selectel_api_v2_password = your_password",
"full_plugin_name": "dns-selectel-api-v2"
},
"simply": {
"name": "Simply",
"package_name": "certbot-dns-simply",
"credentials": "dns_simply_account_name = UExxxxxx\ndns_simply_api_key = DsHJdsjh2812872sahj",
"full_plugin_name": "dns-simply"
},
"spaceship": {
"name": "Spaceship",
"package_name": "certbot-dns-spaceship",
"credentials": "[spaceship]\napi_key=your_api_key\napi_secret=your_api_secret",
"full_plugin_name": "dns-spaceship"
},
"strato": {
"name": "Strato",
"package_name": "certbot-dns-strato",
"credentials": "dns_strato_username = user\ndns_strato_password = pass\n# uncomment if you are using two factor authentication:\n# dns_strato_totp_devicename = 2fa_device\n# dns_strato_totp_secret = 2fa_secret\n#\n# uncomment if domain name contains special characters\n# insert domain display name as seen on your account page here\n# dns_strato_domain_display_name = my-punicode-url.de\n#\n# if you are not using strato.de or another special endpoint you can customise it below\n# you will probably only need to adjust the host, but you can also change the complete endpoint url\n# dns_strato_custom_api_scheme = https\n# dns_strato_custom_api_host = www.strato.de\n# dns_strato_custom_api_port = 443\n# dns_strato_custom_api_path = \"/apps/CustomerService\"",
"full_plugin_name": "dns-strato"
},
"timeweb": {
"name": "Timeweb Cloud",
"package_name": "certbot-dns-timeweb",
"credentials": "dns_timeweb_api_key = XXXXXXXXXXXXXXXXXXX",
"full_plugin_name": "dns-timeweb"
},
"transip": {
"name": "TransIP",
"package_name": "certbot-dns-transip",
"credentials": "dns_transip_username = my_username\ndns_transip_key_file = /data/tls/certbot/transip-rsa.key",
"full_plugin_name": "dns-transip"
},
"tencentcloud": {
"name": "Tencent Cloud",
"package_name": "certbot-dns-tencentcloud",
"credentials": "dns_tencentcloud_secret_id = TENCENT_CLOUD_SECRET_ID\ndns_tencentcloud_secret_key = TENCENT_CLOUD_SECRET_KEY",
"full_plugin_name": "dns-tencentcloud"
},
"vultr": {
"name": "Vultr",
"package_name": "certbot-dns-vultr",
"credentials": "dns_vultr_key = YOUR_VULTR_API_KEY",
"full_plugin_name": "dns-vultr"
},
"websupport": {
"name": "Websupport.sk",
"package_name": "certbot-dns-websupport",
"credentials": "dns_websupport_identifier = <api_key>\ndns_websupport_secret_key = <secret>",
"full_plugin_name": "dns-websupport"
},
"wedos": {
"name": "Wedos",
"package_name": "certbot-dns-wedos",
"credentials": "dns_wedos_user = <wedos_registration>\ndns_wedos_auth = <wapi_password>",
"full_plugin_name": "dns-wedos"
},
"zoneedit": {
"name": "ZoneEdit",
"package_name": "certbot-dns-zoneedit",
"credentials": "dns_zoneedit_user = <login-user-id>\ndns_zoneedit_token = <dyn-authentication-token>",
"full_plugin_name": "dns-zoneedit"
}
}

View File

@@ -1,42 +1,28 @@
import knex from "knex";
import { configGet, configHas } from "./lib/config.js";
const config = require('./lib/config');
let instance = null;
if (!config.has('database')) {
throw new Error('Database config does not exist! Please read the instructions: https://nginxproxymanager.com/setup');
}
const generateDbConfig = () => {
if (!configHas("database")) {
throw new Error(
"Database config does not exist! Please read the instructions: https://github.com/ZoeyVid/NPMplus",
);
}
const cfg = configGet("database");
if (cfg.engine === "knex-native") {
function generateDbConfig() {
const cfg = config.get('database');
if (cfg.engine === 'knex-native') {
return cfg.knex;
}
return {
client: cfg.engine,
client: cfg.engine,
connection: {
host: cfg.host,
user: cfg.user,
host: cfg.host,
user: cfg.user,
password: cfg.password,
database: cfg.name,
port: cfg.port,
...(cfg.ssl ? { ssl: cfg.ssl } : {}),
port: cfg.port,
ssl: cfg.tls,
},
migrations: {
tableName: "migrations",
},
tableName: 'migrations'
}
};
};
}
const getInstance = () => {
if (!instance) {
instance = knex(generateDbConfig());
}
return instance;
};
export default getInstance;
module.exports = require('knex')(generateDbConfig());

1456
backend/doc/api.swagger.json Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,45 +1,40 @@
#!/usr/bin/env node
import app from "./app.js";
import internalNginx from "./internal/nginx.js";
import internalCertificate from "./internal/certificate.js";
import internalIpRanges from "./internal/ip_ranges.js";
import { global as logger } from "./logger.js";
import { migrateUp } from "./migrate.js";
import { getCompiledSchema } from "./schema/index.js";
import setup from "./setup.js";
const logger = require('./logger').global;
async function appStart() {
return migrateUp()
async function appStart () {
const migrate = require('./migrate');
const setup = require('./setup');
const app = require('./app');
const apiValidator = require('./lib/validator/api');
const internalCertificate = require('./internal/certificate');
const internalIpRanges = require('./internal/ip_ranges');
return migrate.latest()
.then(setup)
.then(getCompiledSchema)
.then(() => {
if (process.env.TRUST_CLOUDFLARE === "false") {
logger.info("Cloudflares IPs are NOT trusted");
return;
}
logger.info("Cloudflares IPs are trusted");
internalIpRanges.initTimer();
return internalIpRanges.fetch();
return apiValidator.loadSchemas;
})
.then(internalIpRanges.fetch)
.then(() => {
internalCertificate.initTimer();
internalNginx.reload();
internalIpRanges.initTimer();
const server = app.listen("/run/npmplus.sock", () => {
logger.info(`Backend PID ${process.pid} listening on unix socket...`);
const server = app.listen(48693, '127.0.0.1', () => {
logger.info('Backend PID ' + process.pid + ' listening on port 48693 ...');
process.on("SIGTERM", () => {
logger.info(`PID ${process.pid} received SIGTERM`);
process.on('SIGTERM', () => {
logger.info('PID ' + process.pid + ' received SIGTERM');
server.close(() => {
logger.info("Stopping.");
logger.info('Stopping.');
process.exit(0);
});
});
});
})
.catch((err) => {
logger.error(`Startup Error: ${err.message}`, err);
logger.error(err.message);
setTimeout(appStart, 1000);
});
}
@@ -47,6 +42,6 @@ async function appStart() {
try {
appStart();
} catch (err) {
logger.fatal(err);
logger.error(err.message, err);
process.exit(1);
}

View File

@@ -1,388 +0,0 @@
import crypto from "node:crypto";
import bcrypt from "bcryptjs";
import { createGuardrails, generateSecret, generateURI, verify } from "otplib";
import errs from "../lib/error.js";
import authModel from "../models/auth.js";
import internalUser from "./user.js";
const APP_NAME = "NPMplus";
const BACKUP_CODE_COUNT = 8;
/**
* Generate backup codes
* @returns {Promise<{plain: string[], hashed: string[]}>}
*/
const generateBackupCodes = async () => {
const plain = [];
const hashed = [];
for (let i = 0; i < BACKUP_CODE_COUNT; i++) {
const code = crypto.randomBytes(4).toString("hex").toUpperCase();
plain.push(code);
const hash = await bcrypt.hash(code, 10);
hashed.push(hash);
}
return { plain, hashed };
};
const internal2fa = {
/**
* Check if user has 2FA enabled
* @param {number} userId
* @returns {Promise<boolean>}
*/
isEnabled: async (userId) => {
const auth = await internal2fa.getUserPasswordAuth(userId);
return auth?.meta?.totp_enabled === true;
},
/**
* Get 2FA status for user
* @param {Access} access
* @param {number} userId
* @returns {Promise<{enabled: boolean, backup_codes_remaining: number}>}
*/
getStatus: async (access, userId) => {
await access.can("users:password", userId);
await internalUser.get(access, { id: userId });
const auth = await internal2fa.getUserPasswordAuth(userId);
const enabled = auth?.meta?.totp_enabled === true;
let backup_codes_remaining = 0;
if (enabled) {
const backupCodes = auth.meta.backup_codes || [];
backup_codes_remaining = backupCodes.length;
}
return {
enabled,
backup_codes_remaining,
};
},
/**
* Start 2FA setup - store pending secret
*
* @param {Access} access
* @param {number} userId
* @returns {Promise<{secret: string, otpauth_url: string}>}
*/
startSetup: async (access, userId) => {
await access.can("users:password", userId);
const user = await internalUser.get(access, { id: userId });
const secret = generateSecret();
const otpauth_url = generateURI({
issuer: APP_NAME,
label: user.email,
secret: secret,
});
const auth = await internal2fa.getUserPasswordAuth(userId);
// ensure user isn't already setup for 2fa
const enabled = auth?.meta?.totp_enabled === true;
if (enabled) {
throw new errs.ValidationError("2FA is already enabled");
}
const meta = auth.meta || {};
meta.totp_pending_secret = secret;
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
return { secret, otpauth_url };
},
/**
* Enable 2FA after verifying code
*
* @param {Access} access
* @param {number} userId
* @param {string} code
* @returns {Promise<{backup_codes: string[]}>}
*/
enable: async (access, userId, code) => {
await access.can("users:password", userId);
await internalUser.get(access, { id: userId });
const auth = await internal2fa.getUserPasswordAuth(userId);
const secret = auth?.meta?.totp_pending_secret || false;
if (!secret) {
throw new errs.ValidationError("No pending 2FA setup found");
}
const codeTrim = code.trim();
const result = await verify({ token: codeTrim, secret });
if (!result.valid) {
throw new errs.ValidationError("Invalid verification code");
}
const { plain, hashed } = await generateBackupCodes();
const meta = {
...auth.meta,
totp_secret: secret,
totp_enabled: true,
totp_enabled_at: new Date().toISOString(),
backup_codes: hashed,
};
delete meta.totp_pending_secret;
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
return { backup_codes: plain };
},
/**
* Disable 2FA
*
* @param {Access} access
* @param {number} userId
* @param {string} code
* @returns {Promise<void>}
*/
disable: async (access, userId, code) => {
await access.can("users:password", userId);
await internalUser.get(access, { id: userId });
const auth = await internal2fa.getUserPasswordAuth(userId);
const enabled = auth?.meta?.totp_enabled === true;
if (!enabled) {
throw new errs.ValidationError("2FA is not enabled");
}
const codeTrim = code.trim();
if (codeTrim.length !== 6 && codeTrim.length !== 8) {
throw new errs.ValidationError("Invalid verification code");
}
// Try TOTP code first, if it's 6 chars. it will throw errors if it's not 6 chars
// and the backup codes are 8 chars.
if (codeTrim.length === 6) {
const result = await verify({
token: codeTrim,
secret: auth.meta.totp_secret,
// These guardrails lower the minimum length requirement for secrets.
// In v12 of otplib the default minimum length is 10 and in v13 it is 16.
// Since there are 2fa secrets in the wild generated with v12 we need to allow shorter secrets
// so people won't be locked out when upgrading.
guardrails: createGuardrails({
MIN_SECRET_BYTES: 10,
}),
});
if (!result.valid) {
throw new errs.ValidationError("Invalid verification code");
}
}
// Try backup codes
if (codeTrim.length === 8) {
const backupCodes = auth?.meta?.backup_codes || [];
let invalid = true;
for (let i = 0; i < backupCodes.length; i++) {
const match = await bcrypt.compare(codeTrim.toUpperCase(), backupCodes[i]);
if (match) {
// Remove used backup code
const updatedCodes = [...backupCodes];
updatedCodes.splice(i, 1);
const meta = { ...auth.meta, backup_codes: updatedCodes };
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
invalid = false;
}
}
if (invalid) {
throw new errs.ValidationError("Invalid verification code");
}
}
const meta = { ...auth.meta };
delete meta.totp_secret;
delete meta.totp_enabled;
delete meta.totp_enabled_at;
delete meta.backup_codes;
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
},
/**
* Verify 2FA code for login
*
* @param {number} userId
* @param {string} token
* @returns {Promise<boolean>}
*/
verifyForLogin: async (userId, token) => {
const auth = await internal2fa.getUserPasswordAuth(userId);
const secret = auth?.meta?.totp_secret || false;
if (!secret) {
return false;
}
const tokenTrim = token.trim();
// Try TOTP code first, if it's 6 chars. it will throw errors if it's not 6 chars
// and the backup codes are 8 chars.
if (tokenTrim.length === 6) {
const result = await verify({
token: tokenTrim,
secret,
// These guardrails lower the minimum length requirement for secrets.
// In v12 of otplib the default minimum length is 10 and in v13 it is 16.
// Since there are 2fa secrets in the wild generated with v12 we need to allow shorter secrets
// so people won't be locked out when upgrading.
guardrails: createGuardrails({
MIN_SECRET_BYTES: 10,
}),
});
return result.valid;
}
// Try backup codes
if (tokenTrim.length === 8) {
const backupCodes = auth?.meta?.backup_codes || [];
for (let i = 0; i < backupCodes.length; i++) {
const match = await bcrypt.compare(tokenTrim.toUpperCase(), backupCodes[i]);
if (match) {
// Remove used backup code
const updatedCodes = [...backupCodes];
updatedCodes.splice(i, 1);
const meta = { ...auth.meta, backup_codes: updatedCodes };
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
return true;
}
}
}
return false;
},
/**
* Regenerate backup codes
*
* @param {Access} access
* @param {number} userId
* @param {string} token
* @returns {Promise<{backup_codes: string[]}>}
*/
regenerateBackupCodes: async (access, userId, token) => {
await access.can("users:password", userId);
await internalUser.get(access, { id: userId });
const auth = await internal2fa.getUserPasswordAuth(userId);
const enabled = auth?.meta?.totp_enabled === true;
const secret = auth?.meta?.totp_secret || false;
if (!enabled) {
throw new errs.ValidationError("2FA is not enabled");
}
if (!secret) {
throw new errs.ValidationError("No 2FA secret found");
}
const tokenTrim = token.trim();
if (tokenTrim.length !== 6 && tokenTrim.length !== 8) {
throw new errs.ValidationError("Invalid verification code");
}
// Try TOTP code first, if it's 6 chars. it will throw errors if it's not 6 chars
// and the backup codes are 8 chars.
if (tokenTrim.length === 6) {
const result = await verify({
token: tokenTrim,
secret,
// These guardrails lower the minimum length requirement for secrets.
// In v12 of otplib the default minimum length is 10 and in v13 it is 16.
// Since there are 2fa secrets in the wild generated with v12 we need to allow shorter secrets
// so people won't be locked out when upgrading.
guardrails: createGuardrails({
MIN_SECRET_BYTES: 10,
}),
});
if (!result.valid) {
throw new errs.ValidationError("Invalid verification code");
}
}
// Try backup codes
if (tokenTrim.length === 8) {
const backupCodes = auth?.meta?.backup_codes || [];
let invalid = true;
for (let i = 0; i < backupCodes.length; i++) {
const match = await bcrypt.compare(tokenTrim.toUpperCase(), backupCodes[i]);
if (match) {
// Remove used backup code
const updatedCodes = [...backupCodes];
updatedCodes.splice(i, 1);
const meta = { ...auth.meta, backup_codes: updatedCodes };
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
invalid = false;
}
}
if (invalid) {
throw new errs.ValidationError("Invalid verification code");
}
}
const { plain, hashed } = await generateBackupCodes();
const meta = { ...auth.meta, backup_codes: hashed };
await authModel
.query()
.where("id", auth.id)
.andWhere("user_id", userId)
.andWhere("type", "password")
.patch({ meta });
return { backup_codes: plain };
},
getUserPasswordAuth: async (userId) => {
const auth = await authModel.query().where("user_id", userId).andWhere("type", "password").first();
if (!auth) {
throw new errs.ItemNotFoundError("Auth not found");
}
return auth;
},
};
export default internal2fa;

View File

@@ -1,94 +1,103 @@
import fs from "node:fs";
import bcrypt from "bcryptjs";
import _ from "lodash";
import errs from "../lib/error.js";
import utils from "../lib/utils.js";
import { access as logger } from "../logger.js";
import accessListModel from "../models/access_list.js";
import accessListAuthModel from "../models/access_list_auth.js";
import accessListClientModel from "../models/access_list_client.js";
import proxyHostModel from "../models/proxy_host.js";
import internalAuditLog from "./audit-log.js";
import internalNginx from "./nginx.js";
const _ = require('lodash');
const fs = require('fs');
const batchflow = require('batchflow');
const logger = require('../logger').access;
const error = require('../lib/error');
const utils = require('../lib/utils');
const accessListModel = require('../models/access_list');
const accessListAuthModel = require('../models/access_list_auth');
const accessListClientModel = require('../models/access_list_client');
const proxyHostModel = require('../models/proxy_host');
const internalAuditLog = require('./audit-log');
const internalNginx = require('./nginx');
const omissions = () => {
return ["is_deleted"];
};
function omissions () {
return ['is_deleted'];
}
const internalAccessList = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: async (access, data) => {
await access.can("access_lists:create", data);
const row = await accessListModel
.query()
.insertAndFetch({
name: data.name,
satisfy_any: data.satisfy_any,
pass_auth: data.pass_auth,
owner_user_id: access.token.getUserId(1),
create: (access, data) => {
return access.can('access_lists:create', data)
.then((/*access_data*/) => {
return accessListModel
.query()
.insertAndFetch({
name: data.name,
satisfy_any: data.satisfy_any,
pass_auth: data.pass_auth,
owner_user_id: access.token.getUserId(1)
})
.then(utils.omitRow(omissions()));
})
.then(utils.omitRow(omissions()));
.then((row) => {
data.id = row.id;
data.id = row.id;
let promises = [];
const promises = [];
// Items
data.items.map((item) => {
promises.push(
accessListAuthModel.query().insert({
access_list_id: row.id,
username: item.username,
password: bcrypt.hashSync(item.password, 6),
}),
);
return true;
});
// Now add the items
data.items.map((item) => {
promises.push(accessListAuthModel
.query()
.insert({
access_list_id: row.id,
username: item.username,
password: item.password
})
);
});
// Clients
data.clients?.map((client) => {
promises.push(
accessListClientModel.query().insert({
access_list_id: row.id,
address: client.address,
directive: client.directive,
}),
);
return true;
});
// Now add the clients
if (typeof data.clients !== 'undefined' && data.clients) {
data.clients.map((client) => {
promises.push(accessListClientModel
.query()
.insert({
access_list_id: row.id,
address: client.address,
directive: client.directive
})
);
});
}
await Promise.all(promises);
return Promise.all(promises);
})
.then(() => {
// re-fetch with expansions
return internalAccessList.get(access, {
id: data.id,
expand: ['owner', 'items', 'clients', 'proxy_hosts.access_list.[clients,items]']
}, true /* <- skip masking */);
})
.then((row) => {
// Audit log
data.meta = _.assign({}, data.meta || {}, row.meta);
// re-fetch with expansions
const freshRow = await internalAccessList.get(
access,
{
id: data.id,
expand: ["owner", "items", "clients", "proxy_hosts.access_list.[clients,items]"],
},
true, // skip masking
);
// Audit log
data.meta = _.assign({}, data.meta || {}, freshRow.meta);
await internalAccessList.build(freshRow);
if (Number.parseInt(freshRow.proxy_host_count, 10)) {
await internalNginx.bulkGenerateConfigs(proxyHostModel, "proxy_host", freshRow.proxy_hosts);
}
// Add to audit log
await internalAuditLog.add(access, {
action: "created",
object_type: "access-list",
object_id: freshRow.id,
meta: internalAccessList.maskItems(data),
});
return internalAccessList.maskItems(freshRow);
return internalAccessList.build(row)
.then(() => {
if (row.proxy_host_count) {
return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
}
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: 'created',
object_type: 'access-list',
object_id: row.id,
meta: internalAccessList.maskItems(data)
});
})
.then(() => {
return internalAccessList.maskItems(row);
});
});
},
/**
@@ -99,107 +108,130 @@ const internalAccessList = {
* @param {String} [data.items]
* @return {Promise}
*/
update: async (access, data) => {
await access.can("access_lists:update", data.id);
const row = await internalAccessList.get(access, { id: data.id });
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Access List could not be updated, IDs do not match: ${row.id} !== ${data.id}`,
);
}
// patch name if specified
if (typeof data.name !== "undefined" && data.name) {
await accessListModel.query().where({ id: data.id }).patch({
name: data.name,
satisfy_any: data.satisfy_any,
pass_auth: data.pass_auth,
});
}
// Check for items and add/update/remove them
if (typeof data.items !== "undefined" && data.items) {
const promises = [];
const itemsToKeep = [];
data.items.map((item) => {
if (item.password) {
promises.push(
accessListAuthModel.query().insert({
access_list_id: data.id,
username: item.username,
password: bcrypt.hashSync(item.password, 6),
}),
);
} else {
// This was supplied with an empty password, which means keep it but don't change the password
itemsToKeep.push(item.username);
update: (access, data) => {
return access.can('access_lists:update', data.id)
.then((/*access_data*/) => {
return internalAccessList.get(access, {id: data.id});
})
.then((row) => {
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new error.InternalValidationError('Access List could not be updated, IDs do not match: ' + row.id + ' !== ' + data.id);
}
return true;
});
const query = accessListAuthModel.query().delete().where("access_list_id", data.id);
if (itemsToKeep.length) {
query.andWhere("username", "NOT IN", itemsToKeep);
}
await query;
// Add new items
if (promises.length) {
await Promise.all(promises);
}
}
// Check for clients and add/update/remove them
if (typeof data.clients !== "undefined" && data.clients) {
const clientPromises = [];
data.clients.map((client) => {
if (client.address) {
clientPromises.push(
accessListClientModel.query().insert({
access_list_id: data.id,
address: client.address,
directive: client.directive,
}),
);
})
.then(() => {
// patch name if specified
if (typeof data.name !== 'undefined' && data.name) {
return accessListModel
.query()
.where({id: data.id})
.patch({
name: data.name,
satisfy_any: data.satisfy_any,
pass_auth: data.pass_auth,
});
}
return true;
})
.then(() => {
// Check for items and add/update/remove them
if (typeof data.items !== 'undefined' && data.items) {
let promises = [];
let items_to_keep = [];
data.items.map(function (item) {
if (item.password) {
promises.push(accessListAuthModel
.query()
.insert({
access_list_id: data.id,
username: item.username,
password: item.password
})
);
} else {
// This was supplied with an empty password, which means keep it but don't change the password
items_to_keep.push(item.username);
}
});
let query = accessListAuthModel
.query()
.delete()
.where('access_list_id', data.id);
if (items_to_keep.length) {
query.andWhere('username', 'NOT IN', items_to_keep);
}
return query
.then(() => {
// Add new items
if (promises.length) {
return Promise.all(promises);
}
});
}
})
.then(() => {
// Check for clients and add/update/remove them
if (typeof data.clients !== 'undefined' && data.clients) {
let promises = [];
data.clients.map(function (client) {
if (client.address) {
promises.push(accessListClientModel
.query()
.insert({
access_list_id: data.id,
address: client.address,
directive: client.directive
})
);
}
});
let query = accessListClientModel
.query()
.delete()
.where('access_list_id', data.id);
return query
.then(() => {
// Add new items
if (promises.length) {
return Promise.all(promises);
}
});
}
})
.then(internalNginx.reload)
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: 'updated',
object_type: 'access-list',
object_id: data.id,
meta: internalAccessList.maskItems(data)
});
})
.then(() => {
// re-fetch with expansions
return internalAccessList.get(access, {
id: data.id,
expand: ['owner', 'items', 'clients', 'proxy_hosts.[certificate,access_list.[clients,items]]']
}, true /* <- skip masking */);
})
.then((row) => {
return internalAccessList.build(row)
.then(() => {
if (row.proxy_host_count) {
return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
}
})
.then(() => {
return internalAccessList.maskItems(row);
});
});
const query = accessListClientModel.query().delete().where("access_list_id", data.id);
await query;
// Add new clitens
if (clientPromises.length) {
await Promise.all(clientPromises);
}
}
// Add to audit log
await internalAuditLog.add(access, {
action: "updated",
object_type: "access-list",
object_id: data.id,
meta: internalAccessList.maskItems(data),
});
// re-fetch with expansions
const freshRow = await internalAccessList.get(
access,
{
id: data.id,
expand: ["owner", "items", "clients", "proxy_hosts.[certificate,access_list.[clients,items]]"],
},
true, // skip masking
);
await internalAccessList.build(freshRow);
if (Number.parseInt(freshRow.proxy_host_count, 10)) {
await internalNginx.bulkGenerateConfigs(proxyHostModel, "proxy_host", freshRow.proxy_hosts);
}
await internalNginx.reload();
return internalAccessList.maskItems(freshRow);
},
/**
@@ -208,46 +240,48 @@ const internalAccessList = {
* @param {Integer} data.id
* @param {Array} [data.expand]
* @param {Array} [data.omit]
* @param {Boolean} [skipMasking]
* @param {Boolean} [skip_masking]
* @return {Promise}
*/
get: async (access, data, skipMasking) => {
const thisData = data || {};
const accessData = await access.can("access_lists:get", thisData.id);
get: (access, data, skip_masking) => {
if (typeof data === 'undefined') {
data = {};
}
const query = accessListModel
.query()
.select("access_list.*", accessListModel.raw("COUNT(proxy_host.id) as proxy_host_count"))
.leftJoin("proxy_host", function () {
this.on("proxy_host.access_list_id", "=", "access_list.id").andOn("proxy_host.is_deleted", "=", 0);
return access.can('access_lists:get', data.id)
.then((access_data) => {
let query = accessListModel
.query()
.select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count'))
.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
.where('access_list.is_deleted', 0)
.andWhere('access_list.id', data.id)
.allowGraph('[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]')
.first();
if (access_data.permission_visibility !== 'all') {
query.andWhere('access_list.owner_user_id', access.token.getUserId(1));
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.where("access_list.is_deleted", 0)
.andWhere("access_list.id", thisData.id)
.groupBy("access_list.id")
.allowGraph("[owner,items,clients,proxy_hosts.[certificate,access_list.[clients,items]]]")
.first();
if (accessData.permission_visibility !== "all") {
query.andWhere("access_list.owner_user_id", access.token.getUserId(1));
}
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
}
let row = await query.then(utils.omitRow(omissions()));
if (!row || !row.id) {
throw new errs.ItemNotFoundError(thisData.id);
}
if (!skipMasking && typeof row.items !== "undefined" && row.items) {
row = internalAccessList.maskItems(row);
}
// Custom omissions
if (typeof data.omit !== "undefined" && data.omit !== null) {
row = _.omit(row, data.omit);
}
return row;
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
if (!skip_masking && typeof row.items !== 'undefined' && row.items) {
row = internalAccessList.maskItems(row);
}
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
}
return row;
});
},
/**
@@ -257,58 +291,73 @@ const internalAccessList = {
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: async (access, data) => {
await access.can("access_lists:delete", data.id);
const row = await internalAccessList.get(access, {
id: data.id,
expand: ["proxy_hosts", "items", "clients"],
});
delete: (access, data) => {
return access.can('access_lists:delete', data.id)
.then(() => {
return internalAccessList.get(access, {id: data.id, expand: ['proxy_hosts', 'items', 'clients']});
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
// 1. update row to be deleted
// 2. update any proxy hosts that were using it (ignoring permissions)
// 3. reconfigure those hosts
// 4. audit log
// 1. update row to be deleted
// 2. update any proxy hosts that were using it (ignoring permissions)
// 3. reconfigure those hosts
// 4. audit log
// 1. update row to be deleted
return accessListModel
.query()
.where('id', row.id)
.patch({
is_deleted: 1
})
.then(() => {
// 2. update any proxy hosts that were using it (ignoring permissions)
if (row.proxy_hosts) {
return proxyHostModel
.query()
.where('access_list_id', '=', row.id)
.patch({access_list_id: 0})
.then(() => {
// 3. reconfigure those hosts, then reload nginx
// 1. update row to be deleted
await accessListModel.query().where("id", row.id).patch({
is_deleted: 1,
});
// set the access_list_id to zero for these items
row.proxy_hosts.map(function (val, idx) {
row.proxy_hosts[idx].access_list_id = 0;
});
// 2. update any proxy hosts that were using it (ignoring permissions)
if (row.proxy_hosts) {
await proxyHostModel.query().where("access_list_id", "=", row.id).patch({ access_list_id: 0 });
return internalNginx.bulkGenerateConfigs('proxy_host', row.proxy_hosts);
})
.then(() => {
return internalNginx.reload();
});
}
})
.then(() => {
// delete the htpasswd file
let htpasswd_file = internalAccessList.getFilename(row);
// 3. reconfigure those hosts, then reload nginx
// set the access_list_id to zero for these items
row.proxy_hosts.map((_val, idx) => {
row.proxy_hosts[idx].access_list_id = 0;
try {
fs.unlinkSync(htpasswd_file);
} catch (err) {
// do nothing
}
})
.then(() => {
// 4. audit log
return internalAuditLog.add(access, {
action: 'deleted',
object_type: 'access-list',
object_id: row.id,
meta: _.omit(internalAccessList.maskItems(row), ['is_deleted', 'proxy_hosts'])
});
});
})
.then(() => {
return true;
});
await internalNginx.bulkGenerateConfigs(proxyHostModel, "proxy_host", row.proxy_hosts);
}
await internalNginx.reload();
// delete the htpasswd file
try {
fs.unlinkSync(internalAccessList.getFilename(row));
} catch (_err) {
// do nothing
}
// 4. audit log
await internalAuditLog.add(access, {
action: "deleted",
object_type: "access-list",
object_id: row.id,
meta: _.omit(internalAccessList.maskItems(row), ["is_deleted", "proxy_hosts"]),
});
return true;
},
/**
@@ -316,66 +365,72 @@ const internalAccessList = {
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [searchQuery]
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
const accessData = await access.can("access_lists:list");
getAll: (access, expand, search_query) => {
return access.can('access_lists:list')
.then((access_data) => {
let query = accessListModel
.query()
.select('access_list.*', accessListModel.raw('COUNT(proxy_host.id) as proxy_host_count'))
.joinRaw('LEFT JOIN `proxy_host` ON `proxy_host`.`access_list_id` = `access_list`.`id` AND `proxy_host`.`is_deleted` = 0')
.where('access_list.is_deleted', 0)
.groupBy('access_list.id')
.allowGraph('[owner,items,clients]')
.orderBy('access_list.name', 'ASC');
const query = accessListModel
.query()
.select("access_list.*", accessListModel.raw("COUNT(proxy_host.id) as proxy_host_count"))
.leftJoin("proxy_host", function () {
this.on("proxy_host.access_list_id", "=", "access_list.id").andOn("proxy_host.is_deleted", "=", 0);
})
.where("access_list.is_deleted", 0)
.groupBy("access_list.id")
.allowGraph("[owner,items,clients]")
.orderBy("access_list.name", "ASC");
if (accessData.permission_visibility !== "all") {
query.andWhere("access_list.owner_user_id", access.token.getUserId(1));
}
// Query is used for searching
if (typeof searchQuery === "string") {
query.where(function () {
this.where("name", "like", `%${searchQuery}%`);
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const rows = await query.then(utils.omitRows(omissions()));
if (rows) {
rows.map((row, idx) => {
if (typeof row.items !== "undefined" && row.items) {
rows[idx] = internalAccessList.maskItems(row);
if (access_data.permission_visibility !== 'all') {
query.andWhere('access_list.owner_user_id', access.token.getUserId(1));
}
return true;
// Query is used for searching
if (typeof search_query === 'string') {
query.where(function () {
this.where('name', 'like', '%' + search_query + '%');
});
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
})
.then((rows) => {
if (rows) {
rows.map(function (row, idx) {
if (typeof row.items !== 'undefined' && row.items) {
rows[idx] = internalAccessList.maskItems(row);
}
});
}
return rows;
});
}
return rows;
},
/**
* Count is used in reports
* Report use
*
* @param {Integer} userId
* @param {Integer} user_id
* @param {String} visibility
* @returns {Promise}
*/
getCount: async (userId, visibility) => {
const query = accessListModel.query().count("id as count").where("is_deleted", 0);
getCount: (user_id, visibility) => {
let query = accessListModel
.query()
.count('id as count')
.where('is_deleted', 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", userId);
if (visibility !== 'all') {
query.andWhere('owner_user_id', user_id);
}
const row = await query.first();
return Number.parseInt(row.count, 10);
return query.first()
.then((row) => {
return parseInt(row.count, 10);
});
},
/**
@@ -383,21 +438,21 @@ const internalAccessList = {
* @returns {Object}
*/
maskItems: (list) => {
if (list && typeof list.items !== "undefined") {
list.items.map((val, idx) => {
let repeatFor = 8;
let firstChar = "*";
if (list && typeof list.items !== 'undefined') {
list.items.map(function (val, idx) {
let repeat_for = 8;
let first_char = '*';
if (typeof val.password !== "undefined" && val.password) {
repeatFor = val.password.length - 1;
firstChar = val.password.charAt(0);
if (typeof val.password !== 'undefined' && val.password) {
repeat_for = val.password.length - 1;
first_char = val.password.charAt(0);
}
list.items[idx].hint = firstChar + "*".repeat(repeatFor);
list.items[idx].password = "";
return true;
list.items[idx].hint = first_char + ('*').repeat(repeat_for);
list.items[idx].password = '';
});
}
return list;
},
@@ -407,7 +462,7 @@ const internalAccessList = {
* @returns {String}
*/
getFilename: (list) => {
return `/data/access/${list.id}`;
return '/data/etc/access/' + list.id;
},
/**
@@ -417,34 +472,58 @@ const internalAccessList = {
* @param {Array} list.items
* @returns {Promise}
*/
build: async (list) => {
logger.info(`Building Access file #${list.id} for: ${list.name}`);
build: (list) => {
logger.info('Building Access file #' + list.id + ' for: ' + list.name);
const htpasswdFile = internalAccessList.getFilename(list);
return new Promise((resolve, reject) => {
let htpasswd_file = internalAccessList.getFilename(list);
fs.rmSync(htpasswdFile, { force: true });
fs.writeFileSync(htpasswdFile, "", { encoding: "utf8" });
if (list.items?.length) {
for (const item of list.items) {
if (item.username?.length && item.password?.length) {
logger.info(`Adding: ${item.username}`);
try {
fs.appendFileSync(htpasswdFile, `${item.username}:${item.password}\n`, {
encoding: "utf8",
});
} catch (err) {
logger.error(err);
throw err;
}
}
// 1. remove any existing access file
try {
fs.unlinkSync(htpasswd_file);
} catch (err) {
// do nothing
}
}
logger.success(`Built Access file #${list.id} for: ${list.name}`);
},
// 2. create empty access file
try {
fs.writeFileSync(htpasswd_file, '', {encoding: 'utf8'});
resolve(htpasswd_file);
} catch (err) {
reject(err);
}
})
.then((htpasswd_file) => {
// 3. generate password for each user
if (list.items.length) {
return new Promise((resolve, reject) => {
batchflow(list.items).sequential()
.each((i, item, next) => {
if (typeof item.password !== 'undefined' && item.password.length) {
logger.info('Adding: ' + item.username);
utils.execFile('htpasswd', ['-b', htpasswd_file, item.username, item.password])
.then((/*result*/) => {
next();
})
.catch((err) => {
logger.error(err);
next(err);
});
}
})
.error((err) => {
logger.error(err);
reject(err);
})
.end((results) => {
logger.success('Built Access file #' + list.id + ' for: ' + list.name);
resolve(results);
});
});
}
});
}
};
export default internalAccessList;
module.exports = internalAccessList;

View File

@@ -1,63 +1,39 @@
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import auditLogModel from "../models/audit-log.js";
const error = require('../lib/error');
const auditLogModel = require('../models/audit-log');
const internalAuditLog = {
/**
* All logs
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [searchQuery]
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
await access.can("auditlog:list");
getAll: (access, expand, search_query) => {
return access.can('auditlog:list')
.then(() => {
let query = auditLogModel
.query()
.orderBy('created_on', 'DESC')
.orderBy('id', 'DESC')
.limit(100)
.allowGraph('[user]');
const query = auditLogModel
.query()
.orderBy("created_on", "DESC")
.orderBy("id", "DESC")
.limit(100)
.allowGraph("[user]");
// Query is used for searching
if (typeof search_query === 'string') {
query.where(function () {
this.where('meta', 'like', '%' + search_query + '%');
});
}
// Query is used for searching
if (typeof searchQuery === "string" && searchQuery.length > 0) {
query.where(function () {
this.where(castJsonIfNeed("meta"), "like", `%${searchQuery}`);
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
}
return query;
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
return await query;
},
/**
* @param {Access} access
* @param {Object} [data]
* @param {Integer} [data.id] Defaults to the token user
* @param {Array} [data.expand]
* @return {Promise}
*/
get: async (access, data) => {
await access.can("auditlog:list");
const query = auditLogModel.query().andWhere("id", data.id).allowGraph("[user]").first();
if (typeof data.expand !== "undefined" && data.expand !== null) {
query.withGraphFetched(`[${data.expand.join(", ")}]`);
}
const row = await query;
if (!row?.id) {
throw new errs.ItemNotFoundError(data.id);
}
return row;
},
/**
@@ -74,24 +50,29 @@ const internalAuditLog = {
* @param {Object} [data.meta]
* @returns {Promise}
*/
add: async (access, data) => {
if (typeof data.user_id === "undefined" || !data.user_id) {
data.user_id = access.token.getUserId(1);
}
add: (access, data) => {
return new Promise((resolve, reject) => {
// Default the user id
if (typeof data.user_id === 'undefined' || !data.user_id) {
data.user_id = access.token.getUserId(1);
}
if (typeof data.action === "undefined" || !data.action) {
throw new errs.InternalValidationError("Audit log entry must contain an Action");
}
// Make sure at least 1 of the IDs are set and action
return await auditLogModel.query().insert({
user_id: data.user_id,
action: data.action,
object_type: data.object_type || "",
object_id: data.object_id || 0,
meta: data.meta || {},
if (typeof data.action === 'undefined' || !data.action) {
reject(new error.InternalValidationError('Audit log entry must contain an Action'));
} else {
// Make sure at least 1 of the IDs are set and action
resolve(auditLogModel
.query()
.insert({
user_id: data.user_id,
action: data.action,
object_type: data.object_type || '',
object_id: data.object_id || 0,
meta: data.meta || {}
}));
}
});
},
}
};
export default internalAuditLog;
module.exports = internalAuditLog;

File diff suppressed because it is too large Load Diff

View File

@@ -1,94 +1,103 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import utils from "../lib/utils.js";
import deadHostModel from "../models/dead_host.js";
import internalAuditLog from "./audit-log.js";
import internalCertificate from "./certificate.js";
import internalHost from "./host.js";
import internalNginx from "./nginx.js";
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const deadHostModel = require('../models/dead_host');
const internalHost = require('./host');
const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log');
const internalCertificate = require('./certificate');
const omissions = () => {
return ["is_deleted"];
};
function omissions () {
return ['is_deleted'];
}
const internalDeadHost = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: async (access, data) => {
const createCertificate = data.certificate_id === "new";
create: (access, data) => {
let create_certificate = data.certificate_id === 'new';
if (createCertificate) {
if (create_certificate) {
delete data.certificate_id;
}
await access.can("dead_hosts:create", data);
return access.can('dead_hosts:create', data)
.then((/*access_data*/) => {
// Get a list of the domain names and check each of them against existing records
let domain_name_check_promises = [];
// Get a list of the domain names and check each of them against existing records
const domainNameCheckPromises = [];
data.domain_names.map(function (domain_name) {
domain_name_check_promises.push(internalHost.isHostnameTaken(domain_name));
});
data.domain_names.map((domain_name) => {
domainNameCheckPromises.push(internalHost.isHostnameTaken(domain_name));
return true;
});
return Promise.all(domain_name_check_promises)
.then((check_results) => {
check_results.map(function (result) {
if (result.is_taken) {
throw new error.ValidationError(result.hostname + ' is already in use');
}
});
});
})
.then(() => {
// At this point the domains should have been checked
data.owner_user_id = access.token.getUserId(1);
data = internalHost.cleanSslHstsData(data);
await Promise.all(domainNameCheckPromises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
return deadHostModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
})
.then((row) => {
if (create_certificate) {
return internalCertificate.createQuickCertificate(access, data)
.then((cert) => {
// update host with cert id
return internalDeadHost.update(access, {
id: row.id,
certificate_id: cert.id
});
})
.then(() => {
return row;
});
} else {
return row;
}
return true;
})
.then((row) => {
// re-fetch with cert
return internalDeadHost.get(access, {
id: row.id,
expand: ['certificate', 'owner']
});
})
.then((row) => {
// Configure nginx
return internalNginx.configure(deadHostModel, 'dead_host', row)
.then(() => {
return row;
});
})
.then((row) => {
data.meta = _.assign({}, data.meta || {}, row.meta);
// Add to audit log
return internalAuditLog.add(access, {
action: 'created',
object_type: 'dead-host',
object_id: row.id,
meta: data
})
.then(() => {
return row;
});
});
});
// At this point the domains should have been checked
data.owner_user_id = access.token.getUserId(1);
const thisData = internalHost.cleanSslHstsData(createCertificate, data);
// Fix for db field not having a default value
// for this optional field.
if (typeof data.advanced_config === "undefined") {
thisData.advanced_config = "";
}
const row = await deadHostModel.query().insertAndFetch(thisData).then(utils.omitRow(omissions()));
// Add to audit log
await internalAuditLog.add(access, {
action: "created",
object_type: "dead-host",
object_id: row.id,
meta: thisData,
});
if (createCertificate) {
const cert = await internalCertificate.createQuickCertificate(access, data);
// update host with cert id
await internalDeadHost.update(access, {
id: row.id,
certificate_id: cert.id,
});
}
// re-fetch with cert
const freshRow = await internalDeadHost.get(access, {
id: row.id,
expand: ["certificate", "owner"],
});
// Sanity check
if (createCertificate && !freshRow.certificate_id) {
throw new errs.InternalValidationError("The host was created but the Certificate creation failed.");
}
// Configure nginx
await internalNginx.configure(deadHostModel, "dead_host", freshRow);
return freshRow;
},
/**
@@ -97,80 +106,98 @@ const internalDeadHost = {
* @param {Number} data.id
* @return {Promise}
*/
update: async (access, data) => {
const createCertificate = data.certificate_id === "new";
if (createCertificate) {
update: (access, data) => {
let create_certificate = data.certificate_id === 'new';
if (create_certificate) {
delete data.certificate_id;
}
await access.can("dead_hosts:update", data.id);
return access.can('dead_hosts:update', data.id)
.then((/*access_data*/) => {
// Get a list of the domain names and check each of them against existing records
let domain_name_check_promises = [];
// Get a list of the domain names and check each of them against existing records
const domainNameCheckPromises = [];
if (typeof data.domain_names !== "undefined") {
data.domain_names.map((domainName) => {
domainNameCheckPromises.push(internalHost.isHostnameTaken(domainName, "dead", data.id));
return true;
});
if (typeof data.domain_names !== 'undefined') {
data.domain_names.map(function (domain_name) {
domain_name_check_promises.push(internalHost.isHostnameTaken(domain_name, 'dead', data.id));
});
const checkResults = await Promise.all(domainNameCheckPromises);
checkResults.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
return Promise.all(domain_name_check_promises)
.then((check_results) => {
check_results.map(function (result) {
if (result.is_taken) {
throw new error.ValidationError(result.hostname + ' is already in use');
}
});
});
}
return true;
})
.then(() => {
return internalDeadHost.get(access, {id: data.id});
})
.then((row) => {
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new error.InternalValidationError('404 Host could not be updated, IDs do not match: ' + row.id + ' !== ' + data.id);
}
if (create_certificate) {
return internalCertificate.createQuickCertificate(access, {
domain_names: data.domain_names || row.domain_names,
meta: _.assign({}, row.meta, data.meta)
})
.then((cert) => {
// update host with cert id
data.certificate_id = cert.id;
})
.then(() => {
return row;
});
} else {
return row;
}
})
.then((row) => {
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
data = _.assign({}, {
domain_names: row.domain_names
}, data);
data = internalHost.cleanSslHstsData(data, row);
return deadHostModel
.query()
.where({id: data.id})
.patch(data)
.then((saved_row) => {
// Add to audit log
return internalAuditLog.add(access, {
action: 'updated',
object_type: 'dead-host',
object_id: row.id,
meta: data
})
.then(() => {
return _.omit(saved_row, omissions());
});
});
})
.then(() => {
return internalDeadHost.get(access, {
id: data.id,
expand: ['owner', 'certificate']
})
.then((row) => {
// Configure nginx
return internalNginx.configure(deadHostModel, 'dead_host', row)
.then((new_meta) => {
row.meta = new_meta;
row = internalHost.cleanRowCertificateMeta(row);
return _.omit(row, omissions());
});
});
});
}
const row = await internalDeadHost.get(access, { id: data.id });
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`404 Host could not be updated, IDs do not match: ${row.id} !== ${data.id}`,
);
}
if (createCertificate) {
const cert = await internalCertificate.createQuickCertificate(access, {
domain_names: data.domain_names || row.domain_names,
meta: _.assign({}, row.meta, data.meta),
});
// update host with cert id
data.certificate_id = cert.id;
}
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
let thisData = _.assign(
{},
{
domain_names: row.domain_names,
},
data,
);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData, row);
// do the row update
await deadHostModel.query().where({ id: data.id }).patch(data);
// Add to audit log
await internalAuditLog.add(access, {
action: "updated",
object_type: "dead-host",
object_id: row.id,
meta: thisData,
});
const thisRow = await internalDeadHost.get(access, {
id: thisData.id,
expand: ["owner", "certificate"],
});
// Configure nginx
const newMeta = await internalNginx.configure(deadHostModel, "dead_host", row);
row.meta = newMeta;
return _.omit(internalHost.cleanRowCertificateMeta(thisRow), omissions());
},
/**
@@ -181,32 +208,40 @@ const internalDeadHost = {
* @param {Array} [data.omit]
* @return {Promise}
*/
get: async (access, data) => {
const accessData = await access.can("dead_hosts:get", data.id);
const query = deadHostModel
.query()
.where("is_deleted", 0)
.andWhere("id", data.id)
.allowGraph(deadHostModel.defaultAllowGraph)
.first();
if (accessData.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
get: (access, data) => {
if (typeof data === 'undefined') {
data = {};
}
if (typeof data.expand !== "undefined" && data.expand !== null) {
query.withGraphFetched(`[${data.expand.join(", ")}]`);
}
return access.can('dead_hosts:get', data.id)
.then((access_data) => {
let query = deadHostModel
.query()
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner,certificate]')
.first();
const row = await query.then(utils.omitRow(omissions()));
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof data.omit !== "undefined" && data.omit !== null) {
return _.omit(row, data.omit);
}
return row;
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
}
return row;
});
},
/**
@@ -216,29 +251,42 @@ const internalDeadHost = {
* @param {String} [data.reason]
* @returns {Promise}
*/
delete: async (access, data) => {
await access.can("dead_hosts:delete", data.id);
const row = await internalDeadHost.get(access, { id: data.id });
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
delete: (access, data) => {
return access.can('dead_hosts:delete', data.id)
.then(() => {
return internalDeadHost.get(access, {id: data.id});
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
await deadHostModel.query().where("id", row.id).patch({
is_deleted: 1,
});
// Delete Nginx Config
await internalNginx.deleteConfig("dead_host", row);
await internalNginx.reload();
// Add to audit log
await internalAuditLog.add(access, {
action: "deleted",
object_type: "dead-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
return true;
return deadHostModel
.query()
.where('id', row.id)
.patch({
is_deleted: 1
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig('dead_host', row)
.then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: 'deleted',
object_type: 'dead-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
.then(() => {
return true;
});
},
/**
@@ -248,36 +296,46 @@ const internalDeadHost = {
* @param {String} [data.reason]
* @returns {Promise}
*/
enable: async (access, data) => {
await access.can("dead_hosts:update", data.id);
const row = await internalDeadHost.get(access, {
id: data.id,
expand: ["certificate", "owner"],
});
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (row.enabled) {
throw new errs.ValidationError("Host is already enabled");
}
enable: (access, data) => {
return access.can('dead_hosts:update', data.id)
.then(() => {
return internalDeadHost.get(access, {
id: data.id,
expand: ['certificate', 'owner']
});
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
} else if (row.enabled) {
throw new error.ValidationError('Host is already enabled');
}
row.enabled = 1;
row.enabled = 1;
await deadHostModel.query().where("id", row.id).patch({
enabled: 1,
});
// Configure nginx
await internalNginx.configure(deadHostModel, "dead_host", row);
// Add to audit log
await internalAuditLog.add(access, {
action: "enabled",
object_type: "dead-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
return true;
return deadHostModel
.query()
.where('id', row.id)
.patch({
enabled: 1
})
.then(() => {
// Configure nginx
return internalNginx.configure(deadHostModel, 'dead_host', row);
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: 'enabled',
object_type: 'dead-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
.then(() => {
return true;
});
},
/**
@@ -287,34 +345,46 @@ const internalDeadHost = {
* @param {String} [data.reason]
* @returns {Promise}
*/
disable: async (access, data) => {
await access.can("dead_hosts:update", data.id);
const row = await internalDeadHost.get(access, { id: data.id });
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (!row.enabled) {
throw new errs.ValidationError("Host is already disabled");
}
disable: (access, data) => {
return access.can('dead_hosts:update', data.id)
.then(() => {
return internalDeadHost.get(access, {id: data.id});
})
.then((row) => {
if (!row) {
throw new error.ItemNotFoundError(data.id);
} else if (!row.enabled) {
throw new error.ValidationError('Host is already disabled');
}
row.enabled = 0;
row.enabled = 0;
await deadHostModel.query().where("id", row.id).patch({
enabled: 0,
});
// Delete Nginx Config
await internalNginx.deleteConfig("dead_host", row);
await internalNginx.reload();
// Add to audit log
await internalAuditLog.add(access, {
action: "disabled",
object_type: "dead-host",
object_id: row.id,
meta: _.omit(row, omissions()),
});
return true;
return deadHostModel
.query()
.where('id', row.id)
.patch({
enabled: 0
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig('dead_host', row)
.then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: 'disabled',
object_type: 'dead-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
.then(() => {
return true;
});
},
/**
@@ -322,38 +392,43 @@ const internalDeadHost = {
*
* @param {Access} access
* @param {Array} [expand]
* @param {String} [searchQuery]
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
const accessData = await access.can("dead_hosts:list");
const query = deadHostModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph(deadHostModel.defaultAllowGraph)
.orderBy(castJsonIfNeed("domain_names"), "ASC");
getAll: (access, expand, search_query) => {
return access.can('dead_hosts:list')
.then((access_data) => {
let query = deadHostModel
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner,certificate]')
.orderBy('domain_names', 'ASC');
if (accessData.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
// Query is used for searching
if (typeof searchQuery === "string" && searchQuery.length > 0) {
query.where(function () {
this.where(castJsonIfNeed("domain_names"), "like", `%${searchQuery}%`);
// Query is used for searching
if (typeof search_query === 'string') {
query.where(function () {
this.where('domain_names', 'like', '%' + search_query + '%');
});
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
})
.then((rows) => {
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {
return internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const rows = await query.then(utils.omitRows(omissions()));
if (typeof expand !== "undefined" && expand !== null && expand.indexOf("certificate") !== -1) {
internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
},
/**
@@ -363,16 +438,21 @@ const internalDeadHost = {
* @param {String} visibility
* @returns {Promise}
*/
getCount: async (user_id, visibility) => {
const query = deadHostModel.query().count("id as count").where("is_deleted", 0);
getCount: (user_id, visibility) => {
let query = deadHostModel
.query()
.count('id as count')
.where('is_deleted', 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", user_id);
if (visibility !== 'all') {
query.andWhere('owner_user_id', user_id);
}
const row = await query.first();
return Number.parseInt(row.count, 10);
},
return query.first()
.then((row) => {
return parseInt(row.count, 10);
});
}
};
export default internalDeadHost;
module.exports = internalDeadHost;

View File

@@ -1,10 +1,10 @@
import _ from "lodash";
import { castJsonIfNeed } from "../lib/helpers.js";
import deadHostModel from "../models/dead_host.js";
import proxyHostModel from "../models/proxy_host.js";
import redirectionHostModel from "../models/redirection_host.js";
const _ = require('lodash');
const proxyHostModel = require('../models/proxy_host');
const redirectionHostModel = require('../models/redirection_host');
const deadHostModel = require('../models/dead_host');
const internalHost = {
/**
* Makes sure that the ssl_* and hsts_* fields play nicely together.
* ie: if there is no cert, then force_ssl is off.
@@ -14,19 +14,25 @@ const internalHost = {
* @param {object} [existing_data]
* @returns {object}
*/
cleanSslHstsData: (newCert, data, existingData) => {
const combinedData = _.assign({}, existingData || {}, data);
cleanSslHstsData: function (data, existing_data) {
existing_data = existing_data === undefined ? {} : existing_data;
if (!combinedData.certificate_id && !newCert) {
combinedData.hsts_subdomains = false;
combinedData.ssl_forced = false;
let combined_data = _.assign({}, existing_data, data);
if (!combined_data.certificate_id) {
combined_data.ssl_forced = false;
combined_data.http2_support = false;
}
if (!combinedData.ssl_forced) {
combinedData.hsts_enabled = false;
if (!combined_data.ssl_forced) {
combined_data.hsts_enabled = false;
}
return combinedData;
if (!combined_data.hsts_enabled) {
combined_data.hsts_subdomains = false;
}
return combined_data;
},
/**
@@ -35,12 +41,11 @@ const internalHost = {
* @param {Array} rows
* @returns {Array}
*/
cleanAllRowsCertificateMeta: (rows) => {
rows.map((_, idx) => {
if (typeof rows[idx].certificate !== "undefined" && rows[idx].certificate) {
cleanAllRowsCertificateMeta: function (rows) {
rows.map(function (row, idx) {
if (typeof rows[idx].certificate !== 'undefined' && rows[idx].certificate) {
rows[idx].certificate.meta = {};
}
return true;
});
return rows;
@@ -52,8 +57,8 @@ const internalHost = {
* @param {Object} row
* @returns {Object}
*/
cleanRowCertificateMeta: (row) => {
if (typeof row.certificate !== "undefined" && row.certificate) {
cleanRowCertificateMeta: function (row) {
if (typeof row.certificate !== 'undefined' && row.certificate) {
row.certificate.meta = {};
}
@@ -61,33 +66,54 @@ const internalHost = {
},
/**
* This returns all the host types with any domain listed in the provided domainNames array.
* This returns all the host types with any domain listed in the provided domain_names array.
* This is used by the certificates to temporarily disable any host that is using the domain
*
* @param {Array} domainNames
* @param {Array} domain_names
* @returns {Promise}
*/
getHostsWithDomains: async (domainNames) => {
const responseObject = {
total_count: 0,
dead_hosts: [],
proxy_hosts: [],
redirection_hosts: [],
};
getHostsWithDomains: function (domain_names) {
let promises = [
proxyHostModel
.query()
.where('is_deleted', 0),
redirectionHostModel
.query()
.where('is_deleted', 0),
deadHostModel
.query()
.where('is_deleted', 0)
];
const proxyRes = await proxyHostModel.query().where("is_deleted", 0);
responseObject.proxy_hosts = internalHost._getHostsWithDomains(proxyRes, domainNames);
responseObject.total_count += responseObject.proxy_hosts.length;
return Promise.all(promises)
.then((promises_results) => {
let response_object = {
total_count: 0,
dead_hosts: [],
proxy_hosts: [],
redirection_hosts: []
};
const redirRes = await redirectionHostModel.query().where("is_deleted", 0);
responseObject.redirection_hosts = internalHost._getHostsWithDomains(redirRes, domainNames);
responseObject.total_count += responseObject.redirection_hosts.length;
if (promises_results[0]) {
// Proxy Hosts
response_object.proxy_hosts = internalHost._getHostsWithDomains(promises_results[0], domain_names);
response_object.total_count += response_object.proxy_hosts.length;
}
const deadRes = await deadHostModel.query().where("is_deleted", 0);
responseObject.dead_hosts = internalHost._getHostsWithDomains(deadRes, domainNames);
responseObject.total_count += responseObject.dead_hosts.length;
if (promises_results[1]) {
// Redirection Hosts
response_object.redirection_hosts = internalHost._getHostsWithDomains(promises_results[1], domain_names);
response_object.total_count += response_object.redirection_hosts.length;
}
return responseObject;
if (promises_results[2]) {
// Dead Hosts
response_object.dead_hosts = internalHost._getHostsWithDomains(promises_results[2], domain_names);
response_object.total_count += response_object.dead_hosts.length;
}
return response_object;
});
},
/**
@@ -98,133 +124,112 @@ const internalHost = {
* @param {Integer} [ignore_id] Must be supplied if type was also supplied
* @returns {Promise}
*/
isHostnameTaken: (hostname, ignore_type, ignore_id) => {
const promises = [
isHostnameTaken: function (hostname, ignore_type, ignore_id) {
let promises = [
proxyHostModel
.query()
.where("is_deleted", 0)
.andWhere(castJsonIfNeed("domain_names"), "like", `%${hostname}%`),
.where('is_deleted', 0)
.andWhere('domain_names', 'like', '%' + hostname + '%'),
redirectionHostModel
.query()
.where("is_deleted", 0)
.andWhere(castJsonIfNeed("domain_names"), "like", `%${hostname}%`),
.where('is_deleted', 0)
.andWhere('domain_names', 'like', '%' + hostname + '%'),
deadHostModel
.query()
.where("is_deleted", 0)
.andWhere(castJsonIfNeed("domain_names"), "like", `%${hostname}%`),
.where('is_deleted', 0)
.andWhere('domain_names', 'like', '%' + hostname + '%')
];
return Promise.all(promises).then((promises_results) => {
let is_taken = false;
return Promise.all(promises)
.then((promises_results) => {
let is_taken = false;
if (promises_results[0]) {
// Proxy Hosts
if (
internalHost._checkHostnameRecordsTaken(
hostname,
promises_results[0],
ignore_type === "proxy" && ignore_id ? ignore_id : 0,
)
) {
is_taken = true;
if (promises_results[0]) {
// Proxy Hosts
if (internalHost._checkHostnameRecordsTaken(hostname, promises_results[0], ignore_type === 'proxy' && ignore_id ? ignore_id : 0)) {
is_taken = true;
}
}
}
if (promises_results[1]) {
// Redirection Hosts
if (
internalHost._checkHostnameRecordsTaken(
hostname,
promises_results[1],
ignore_type === "redirection" && ignore_id ? ignore_id : 0,
)
) {
is_taken = true;
if (promises_results[1]) {
// Redirection Hosts
if (internalHost._checkHostnameRecordsTaken(hostname, promises_results[1], ignore_type === 'redirection' && ignore_id ? ignore_id : 0)) {
is_taken = true;
}
}
}
if (promises_results[2]) {
// Dead Hosts
if (
internalHost._checkHostnameRecordsTaken(
hostname,
promises_results[2],
ignore_type === "dead" && ignore_id ? ignore_id : 0,
)
) {
is_taken = true;
if (promises_results[2]) {
// Dead Hosts
if (internalHost._checkHostnameRecordsTaken(hostname, promises_results[2], ignore_type === 'dead' && ignore_id ? ignore_id : 0)) {
is_taken = true;
}
}
}
return {
hostname: hostname,
is_taken: is_taken,
};
});
return {
hostname: hostname,
is_taken: is_taken
};
});
},
/**
* Private call only
*
* @param {String} hostname
* @param {Array} existingRows
* @param {Integer} [ignoreId]
* @param {Array} existing_rows
* @param {Integer} [ignore_id]
* @returns {Boolean}
*/
_checkHostnameRecordsTaken: (hostname, existingRows, ignoreId) => {
let isTaken = false;
_checkHostnameRecordsTaken: function (hostname, existing_rows, ignore_id) {
let is_taken = false;
if (existingRows?.length) {
existingRows.map((existingRow) => {
existingRow.domain_names.map((existingHostname) => {
if (existing_rows && existing_rows.length) {
existing_rows.map(function (existing_row) {
existing_row.domain_names.map(function (existing_hostname) {
// Does this domain match?
if (existingHostname.toLowerCase() === hostname.toLowerCase()) {
if (!ignoreId || ignoreId !== existingRow.id) {
isTaken = true;
if (existing_hostname.toLowerCase() === hostname.toLowerCase()) {
if (!ignore_id || ignore_id !== existing_row.id) {
is_taken = true;
}
}
return true;
});
return true;
});
}
return isTaken;
return is_taken;
},
/**
* Private call only
*
* @param {Array} hosts
* @param {Array} domainNames
* @param {Array} domain_names
* @returns {Array}
*/
_getHostsWithDomains: (hosts, domainNames) => {
const response = [];
_getHostsWithDomains: function (hosts, domain_names) {
let response = [];
if (hosts?.length) {
hosts.map((host) => {
let hostMatches = false;
if (hosts && hosts.length) {
hosts.map(function (host) {
let host_matches = false;
domainNames.map((domainName) => {
host.domain_names.map((hostDomainName) => {
if (domainName.toLowerCase() === hostDomainName.toLowerCase()) {
hostMatches = true;
domain_names.map(function (domain_name) {
host.domain_names.map(function (host_domain_name) {
if (domain_name.toLowerCase() === host_domain_name.toLowerCase()) {
host_matches = true;
}
return true;
});
return true;
});
if (hostMatches) {
if (host_matches) {
response.push(host);
}
return true;
});
}
return response;
},
}
};
export default internalHost;
module.exports = internalHost;

View File

@@ -1,109 +1,147 @@
import { readFile, writeFile } from "node:fs/promises";
import { dirname } from "node:path";
import { fileURLToPath } from "node:url";
import utils from "../lib/utils.js";
import { ipRanges as logger } from "../logger.js";
import internalNginx from "./nginx.js";
import pjson from "../package.json" with { type: "json" };
const https = require('https');
const fs = require('fs');
const logger = require('../logger').ip_ranges;
const error = require('../lib/error');
const utils = require('../lib/utils');
const internalNginx = require('./nginx');
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const CLOUDFRONT_URL = 'https://ip-ranges.amazonaws.com/ip-ranges.json';
const CLOUDFARE_V4_URL = 'https://www.cloudflare.com/ips-v4';
const CLOUDFARE_V6_URL = 'https://www.cloudflare.com/ips-v6';
const CLOUDFLARE_V4_URL = "https://www.cloudflare.com/ips-v4";
const CLOUDFLARE_V6_URL = "https://www.cloudflare.com/ips-v6";
const regIpV4 = /^(?:[0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]{1,2}$/;
const regIpV6 = /^([0-9a-fA-F:]+)\/[0-9]{1,3}$/;
const regIpV4 = /^(\d+\.?){4}\/\d+/;
const regIpV6 = /^(([\da-fA-F]+)?:)+\/\d+/;
const internalIpRanges = {
interval_timeout: 1000 * 60 * 60,
interval: null,
interval_timeout: 1000 * 60 * 60 * 6, // 6 hours
interval: null,
interval_processing: false,
iteration_count: 0,
initTimer: () => {
logger.info("IP Ranges Renewal Timer initialized");
logger.info('IP Ranges Renewal Timer initialized');
internalIpRanges.interval = setInterval(internalIpRanges.fetch, internalIpRanges.interval_timeout);
},
fetchUrl: async (url) => {
const res = await fetch(url, {
headers: { "User-Agent": `NPMplus/${pjson.version}` },
fetchUrl: (url) => {
return new Promise((resolve, reject) => {
logger.info('Fetching ' + url);
return https.get(url, (res) => {
res.setEncoding('utf8');
let raw_data = '';
res.on('data', (chunk) => {
raw_data += chunk;
});
res.on('end', () => {
resolve(raw_data);
});
}).on('error', (err) => {
reject(err);
});
});
if (!res.ok) {
throw new Error(`Status code: ${response.status}`);
}
return await res.text();
},
/**
* Triggered at startup and then later by a timer, this will fetch the ip ranges from services and apply them to nginx.
*/
fetch: async () => {
if (internalIpRanges.interval_processing) {
return;
}
fetch: () => {
if (!internalIpRanges.interval_processing) {
internalIpRanges.interval_processing = true;
logger.info('Fetching IP Ranges from online services...');
internalIpRanges.interval_processing = true;
logger.info("Fetching IP Ranges from online services...");
let ip_ranges = [];
try {
const [v4Data, v6Data] = await Promise.all([
internalIpRanges.fetchUrl(CLOUDFLARE_V4_URL),
internalIpRanges.fetchUrl(CLOUDFLARE_V6_URL),
]);
return internalIpRanges.fetchUrl(CLOUDFRONT_URL)
.then((cloudfront_data) => {
let data = JSON.parse(cloudfront_data);
const v4Ranges = v4Data
.split("\n")
.map((line) => line.trim())
.filter((line) => regIpV4.test(line));
const v6Ranges = v6Data
.split("\n")
.map((line) => line.trim())
.filter((line) => regIpV6.test(line));
if (data && typeof data.prefixes !== 'undefined') {
data.prefixes.map((item) => {
if (item.service === 'CLOUDFRONT') {
ip_ranges.push(item.ip_prefix);
}
});
}
const ip_ranges = [...v4Ranges, ...v6Ranges];
if (data && typeof data.ipv6_prefixes !== 'undefined') {
data.ipv6_prefixes.map((item) => {
if (item.service === 'CLOUDFRONT') {
ip_ranges.push(item.ipv6_prefix);
}
});
}
})
.then(() => {
return internalIpRanges.fetchUrl(CLOUDFARE_V4_URL);
})
.then((cloudfare_data) => {
let items = cloudfare_data.split('\n').filter((line) => regIpV4.test(line));
ip_ranges = [... ip_ranges, ... items];
})
.then(() => {
return internalIpRanges.fetchUrl(CLOUDFARE_V6_URL);
})
.then((cloudfare_data) => {
let items = cloudfare_data.split('\n').filter((line) => regIpV6.test(line));
ip_ranges = [... ip_ranges, ... items];
})
.then(() => {
let clean_ip_ranges = [];
ip_ranges.map((range) => {
if (range) {
clean_ip_ranges.push(range);
}
});
if (await internalIpRanges.generateConfig(ip_ranges)) {
await internalNginx.reload();
}
} catch (err) {
logger.error(err.message);
} finally {
internalIpRanges.interval_processing = false;
return internalIpRanges.generateConfig(clean_ip_ranges)
.then(() => {
if (internalIpRanges.iteration_count) {
// Reload nginx
return internalNginx.reload();
}
});
})
.then(() => {
internalIpRanges.interval_processing = false;
internalIpRanges.iteration_count++;
})
.catch((err) => {
logger.error(err.message);
internalIpRanges.interval_processing = false;
});
}
},
/**
* @param {Array} ip_ranges
* @returns {Promise<boolean>}
* @returns {Promise}
*/
generateConfig: async (ip_ranges) => {
try {
const renderEngine = utils.getRenderEngine();
const template = await readFile(`${__dirname}/../templates/ip_ranges.conf`, { encoding: "utf8" });
const newConfig = await renderEngine.parseAndRender(template, { ip_ranges: ip_ranges });
const filePath = "/usr/local/nginx/conf/conf.d/ip_ranges.conf";
generateConfig: (ip_ranges) => {
const renderEngine = utils.getRenderEngine();
return new Promise((resolve, reject) => {
let template = null;
let filename = '/data/nginx/ip_ranges.conf';
try {
const oldConfig = await readFile(filePath, {
encoding: "utf8",
});
if (oldConfig === newConfig) {
logger.info("Not updating Cloudflared IPs");
return false;
}
} catch {}
template = fs.readFileSync(__dirname + '/../templates/ip_ranges.conf', {encoding: 'utf8'});
} catch (err) {
reject(new error.ConfigurationError(err.message));
return;
}
await writeFile(filePath, newConfig, { encoding: "utf8" });
logger.info("Updated Cloudflared IPs");
return true;
} catch (err) {
logger.error(`Error updating Cloudflare IPs: ${err.message}`);
return false;
}
},
renderEngine
.parseAndRender(template, {ip_ranges: ip_ranges})
.then((config_text) => {
fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
resolve(true);
})
.catch((err) => {
logger.warn('Could not write ' + filename + ':', err.message);
reject(new error.ConfigurationError(err.message));
});
});
}
};
export default internalIpRanges;
module.exports = internalIpRanges;

View File

@@ -1,22 +1,21 @@
import { readFile, rename, rm, writeFile } from "node:fs/promises";
import { dirname } from "node:path";
import { domainToASCII, fileURLToPath } from "node:url";
import _ from "lodash";
import errs from "../lib/error.js";
import utils from "../lib/utils.js";
import { debug, nginx as logger } from "../logger.js";
const _ = require('lodash');
const fs = require('fs');
const logger = require('../logger').nginx;
const config = require('../lib/config');
const utils = require('../lib/utils');
const error = require('../lib/error');
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const NgxPidFilePath = '/usr/local/nginx/logs/nginx.pid';
const internalNginx = {
/**
* This will:
* - test the nginx config first to make sure it's OK
* - create / recreate the config for the host
* - test again
* - IF OK: update the meta with online status
* - IF BAD: update the meta with offline status and rename the config
* - IF BAD: update the meta with offline status and remove the config entirely
* - then reload nginx
*
* @param {Object|String} model
@@ -24,81 +23,112 @@ const internalNginx = {
* @param {Object} host
* @returns {Promise}
*/
configure: async (model, host_type, host) => {
configure: (model, host_type, host) => {
let combined_meta = {};
await internalNginx.deleteConfig(host_type, host);
await internalNginx.generateConfig(host_type, host);
return internalNginx.test()
.then(() => {
// Nginx is OK
// We're deleting this config regardless.
// Don't throw errors, as the file may not exist at all
// Delete the .err file too
return internalNginx.deleteConfig(host_type, host, false, true);
})
.then(() => {
return internalNginx.generateConfig(host_type, host);
})
.then(() => {
// Test nginx again and update meta with result
return internalNginx.test()
.then(() => {
// nginx is ok
combined_meta = _.assign({}, host.meta, {
nginx_online: true,
nginx_err: null
});
try {
await internalNginx.test();
combined_meta = _.assign({}, host.meta, {
nginx_online: true,
nginx_err: null,
return model
.query()
.where('id', host.id)
.patch({
meta: combined_meta
});
})
.catch((err) => {
// Remove the error_log line because it's a docker-ism false positive that doesn't need to be reported.
// It will always look like this:
// nginx: [alert] could not open error log file: open() "/dev/null" failed (6: No such device or address)
let valid_lines = [];
let err_lines = err.message.split('\n');
err_lines.map(function (line) {
if (line.indexOf('/dev/null') === -1) {
valid_lines.push(line);
}
});
if (config.debug()) {
logger.error('Nginx test failed:', valid_lines.join('\n'));
}
// config is bad, update meta and delete config
combined_meta = _.assign({}, host.meta, {
nginx_online: false,
nginx_err: valid_lines.join('\n')
});
return model
.query()
.where('id', host.id)
.patch({
meta: combined_meta
})
.then(() => {
internalNginx.renameConfigAsError(host_type, host);
})
.then(() => {
return internalNginx.deleteConfig(host_type, host, true);
});
});
})
.then(() => {
return internalNginx.reload();
})
.then(() => {
return combined_meta;
});
await model.query().where("id", host.id).patch({
meta: combined_meta,
});
} catch (err) {
logger.error(err.message);
// config is bad, update meta and rename config
combined_meta = _.assign({}, host.meta, {
nginx_online: false,
nginx_err: err.message,
});
await model.query().where("id", host.id).patch({
meta: combined_meta,
});
await internalNginx.renameConfigAsError(host_type, host);
}
await internalNginx.reload();
return combined_meta;
},
/**
* @returns {Promise}
*/
test: async () => {
return utils.execFile("nginx", ["-tq"]);
test: () => {
if (config.debug()) {
logger.info('Testing Nginx configuration');
}
return utils.exec('nginx -t -g "error_log off;"');
},
/**
* @returns {Promise}
*/
reload: async () => {
if (process.env.ACME_OCSP_STAPLING === "true") {
try {
await utils.execFile("certbot-ocsp-fetcher.sh", [
"-c",
"/data/tls/certbot/live",
"-o",
"/data/tls/certbot/live",
"--no-reload-webserver",
"--quiet",
]);
} catch {}
}
if (process.env.CUSTOM_OCSP_STAPLING === "true") {
try {
await utils.execFile("certbot-ocsp-fetcher.sh", [
"-c",
"/data/tls/custom",
"-o",
"/data/tls/custom",
"--no-reload-webserver",
"--quiet",
]);
} catch {}
}
await internalNginx.test();
return utils.execFile("nginx", ["-s", "reload"]);
reload: () => {
return internalNginx.test()
.then(() => {
if (fs.existsSync(NgxPidFilePath)) {
const ngxPID = fs.readFileSync(NgxPidFilePath, 'utf8').trim();
if (ngxPID.length > 0) {
logger.info('Killing Nginx');
utils.exec(`kill ${ngxPID}`);
}
}
logger.info('Starting Nginx after three seconds');
setTimeout(() => {
utils.execfg('nginx');
}, 3000);
});
},
/**
@@ -107,10 +137,10 @@ const internalNginx = {
* @returns {String}
*/
getConfigName: (host_type, host_id) => {
if (host_type === "default") {
return "/usr/local/nginx/conf/conf.d/default.conf";
if (host_type === 'default') {
return '/data/nginx/default.conf';
}
return `/data/nginx/${internalNginx.getFileFriendlyHostType(host_type)}/${host_id}.conf`;
return '/data/nginx/' + internalNginx.getFileFriendlyHostType(host_type) + '/' + host_id + '.conf';
},
/**
@@ -118,40 +148,44 @@ const internalNginx = {
* @param {Object} host
* @returns {Promise}
*/
renderLocations: async (host) => {
let template;
renderLocations: (host) => {
return new Promise((resolve, reject) => {
let template;
try {
template = await readFile(`${__dirname}/../templates/_proxy_host_custom_location.conf`, {
encoding: "utf8",
});
} catch (err) {
throw new errs.ConfigurationError(err.message);
}
const renderEngine = utils.getRenderEngine();
let renderedLocations = "";
for (const location of host.locations) {
if (location.npmplus_enabled === false) {
continue;
try {
template = fs.readFileSync(__dirname + '/../templates/_location.conf', {encoding: 'utf8'});
} catch (err) {
reject(new error.ConfigurationError(err.message));
return;
}
if (
location.forward_host.indexOf("/") > -1 &&
!location.forward_host.startsWith("/") &&
!location.forward_host.startsWith("unix")
) {
const split = location.forward_host.split("/");
const renderEngine = utils.getRenderEngine();
let renderedLocations = '';
location.forward_host = split.shift();
location.forward_path = `/${split.join("/")}`;
}
const locationRendering = async () => {
for (let i = 0; i < host.locations.length; i++) {
let locationCopy = Object.assign({}, {access_list_id: host.access_list_id}, {certificate_id: host.certificate_id},
{ssl_forced: host.ssl_forced}, {caching_enabled: host.caching_enabled}, {block_exploits: host.block_exploits},
{allow_websocket_upgrade: host.allow_websocket_upgrade}, {http2_support: host.http2_support},
{hsts_enabled: host.hsts_enabled}, {hsts_subdomains: host.hsts_subdomains}, {access_list: host.access_list},
{certificate: host.certificate}, host.locations[i]);
renderedLocations += await renderEngine.parseAndRender(template, location);
}
if (locationCopy.forward_host.indexOf('/') > -1) {
const splitted = locationCopy.forward_host.split('/');
return renderedLocations;
locationCopy.forward_host = splitted.shift();
locationCopy.forward_path = `/${splitted.join('/')}`;
}
// eslint-disable-next-line
renderedLocations += await renderEngine.parseAndRender(template, locationCopy);
}
};
locationRendering().then(() => resolve(renderedLocations));
});
},
/**
@@ -159,97 +193,144 @@ const internalNginx = {
* @param {Object} host
* @returns {Promise}
*/
generateConfig: async (host_type, host_row) => {
// Prevent modifying the original object:
const host = JSON.parse(JSON.stringify(host_row));
generateConfig: (host_type, host) => {
const nice_host_type = internalNginx.getFileFriendlyHostType(host_type);
if (config.debug()) {
logger.info('Generating ' + nice_host_type + ' Config:', JSON.stringify(host, null, 2));
}
const renderEngine = utils.getRenderEngine();
let template = null;
const filename = internalNginx.getConfigName(nice_host_type, host.id);
return new Promise((resolve, reject) => {
let template = null;
let filename = internalNginx.getConfigName(nice_host_type, host.id);
try {
template = await readFile(`${__dirname}/../templates/${nice_host_type}.conf`, { encoding: "utf8" });
} catch (err) {
throw new errs.ConfigurationError(err.message);
}
let origLocations;
// Manipulate the data a bit before sending it to the template
if (nice_host_type !== "default") {
host.use_default_location = true;
if (typeof host.advanced_config !== "undefined" && host.advanced_config) {
host.use_default_location = !internalNginx.advancedConfigHasDefaultLocation(host.advanced_config);
try {
template = fs.readFileSync(__dirname + '/../templates/' + nice_host_type + '.conf', {encoding: 'utf8'});
} catch (err) {
reject(new error.ConfigurationError(err.message));
return;
}
}
// For redirection hosts, if the scheme is not http or https, set it to $scheme
if (
nice_host_type === "redirection_host" &&
["http", "https"].indexOf(host.forward_scheme.toLowerCase()) === -1
) {
host.forward_scheme = "$scheme";
}
let locationsPromise;
let origLocations;
if (host.locations) {
_.map(host.locations, (location) => {
if (location.path === "/" && location.location_type !== "= " && location.npmplus_enabled !== false) {
host.use_default_location = false;
}
if (location.npmplus_auth_request === "anubis") {
host.create_anubis_locations = true;
}
if (location.npmplus_auth_request === "tinyauth") {
host.create_tinyauth_locations = true;
}
if (location.npmplus_auth_request === "authelia") {
host.create_authelia_locations = true;
}
if (
location.npmplus_auth_request === "authentik" ||
location.npmplus_auth_request === "authentik-send-basic-auth"
) {
host.create_authentik_locations = true;
// Manipulate the data a bit before sending it to the template
if (nice_host_type !== 'default') {
host.use_default_location = true;
if (typeof host.advanced_config !== 'undefined' && host.advanced_config) {
host.use_default_location = !internalNginx.advancedConfigHasDefaultLocation(host.advanced_config);
}
}
if (host.locations) {
//logger.info ('host.locations = ' + JSON.stringify(host.locations, null, 2));
origLocations = [].concat(host.locations);
locationsPromise = internalNginx.renderLocations(host).then((renderedLocations) => {
host.locations = renderedLocations;
});
// Allow someone who is using / custom location path to use it, and skip the default / location
_.map(host.locations, (location) => {
if (location.path === '/') {
host.use_default_location = false;
}
});
} else {
locationsPromise = Promise.resolve();
}
// Set the IPv6 setting for the host
host.ipv6 = internalNginx.ipv6Enabled();
locationsPromise.then(() => {
renderEngine
.parseAndRender(template, host)
.then((config_text) => {
fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
if (config.debug()) {
logger.success('Wrote config:', filename, config_text);
}
// Restore locations array
host.locations = origLocations;
resolve(true);
})
.catch((err) => {
if (config.debug()) {
logger.warn('Could not write ' + filename + ':', err.message);
}
reject(new error.ConfigurationError(err.message));
});
});
});
},
host.locations = await internalNginx.renderLocations(host);
/**
* This generates a temporary nginx config listening on port 80 for the domain names listed
* in the certificate setup. It allows the certbot acme challenge to be requested by certbot
* when requesting a certificate without having a hostname set up already.
*
* @param {Object} certificate
* @returns {Promise}
*/
generateLetsEncryptRequestConfig: (certificate) => {
if (config.debug()) {
logger.info('Generating certbot Request Config:', certificate);
}
if (
host.forward_host &&
host.forward_host.indexOf("/") > -1 &&
!host.forward_host.startsWith("/") &&
!host.forward_host.startsWith("unix")
) {
const split = host.forward_host.split("/");
const renderEngine = utils.getRenderEngine();
host.forward_host = split.shift();
host.forward_path = `/${split.join("/")}`;
}
return new Promise((resolve, reject) => {
let template = null;
let filename = '/usr/local/nginx/conf/conf.d/certbot_' + certificate.id + '.conf';
if (host.domain_names) {
host.server_names = host.domain_names.map((domain_name) => domainToASCII(domain_name) || domain_name);
}
host.env = process.env;
try {
const config_text = await renderEngine.parseAndRender(template, host);
await writeFile(filename, config_text, { encoding: "utf8" });
debug(logger, "Wrote config:", filename);
if (process.env.DISABLE_NGINX_BEAUTIFIER === "false") {
await utils.execFile("nginxbeautifier", ["-s", "4", filename]).catch(() => {});
try {
template = fs.readFileSync(__dirname + '/../templates/certbot-request.conf', {encoding: 'utf8'});
} catch (err) {
reject(new error.ConfigurationError(err.message));
return;
}
return true;
certificate.ipv6 = internalNginx.ipv6Enabled();
renderEngine
.parseAndRender(template, certificate)
.then((config_text) => {
fs.writeFileSync(filename, config_text, {encoding: 'utf8'});
if (config.debug()) {
logger.success('Wrote config:', filename, config_text);
}
resolve(true);
})
.catch((err) => {
if (config.debug()) {
logger.warn('Could not write ' + filename + ':', err.message);
}
reject(new error.ConfigurationError(err.message));
});
});
},
/**
* A simple wrapper around unlinkSync that writes to the logger
*
* @param {String} filename
*/
deleteFile: (filename) => {
logger.debug('Deleting file: ' + filename);
try {
fs.unlinkSync(filename);
} catch (err) {
debug(logger, `Could not write ${filename}:`, err.message);
throw new errs.ConfigurationError(err.message);
logger.debug('Could not delete file:', JSON.stringify(err, null, 2));
}
},
@@ -259,30 +340,40 @@ const internalNginx = {
* @returns String
*/
getFileFriendlyHostType: (host_type) => {
return host_type.replace(/-/g, "_");
return host_type.replace(new RegExp('-', 'g'), '_');
},
/**
* This removes the temporary nginx config file generated by `generateLetsEncryptRequestConfig`
*
* @param {Object} certificate
* @returns {Promise}
*/
deleteLetsEncryptRequestConfig: (certificate) => {
const config_file = '/usr/local/nginx/conf/conf.d/letsencrypt_' + certificate.id + '.conf';
return new Promise((resolve/*, reject*/) => {
internalNginx.deleteFile(config_file);
resolve();
});
},
/**
* @param {String} host_type
* @param {Object} [host]
* @param {Boolean} [delete_err_file]
* @returns {Promise}
*/
deleteConfig: async (host_type, host) => {
const config_file = internalNginx.getConfigName(
internalNginx.getFileFriendlyHostType(host_type),
typeof host === "undefined" ? 0 : host.id,
);
deleteConfig: (host_type, host, delete_err_file) => {
const config_file = internalNginx.getConfigName(internalNginx.getFileFriendlyHostType(host_type), typeof host === 'undefined' ? 0 : host.id);
const config_file_err = config_file + '.err';
const filesToDelete = [config_file, `${config_file}.err`];
for (const filename of filesToDelete) {
try {
debug(logger, `Deleting file: ${filename}`);
await rm(filename, { force: true });
} catch (err) {
debug(logger, "Could not delete file:", JSON.stringify(err, null, 2));
return new Promise((resolve/*, reject*/) => {
internalNginx.deleteFile(config_file);
if (delete_err_file) {
internalNginx.deleteFile(config_file_err);
}
}
resolve();
});
},
/**
@@ -290,38 +381,68 @@ const internalNginx = {
* @param {Object} [host]
* @returns {Promise}
*/
renameConfigAsError: async (host_type, host) => {
const config_file = internalNginx.getConfigName(
internalNginx.getFileFriendlyHostType(host_type),
typeof host === "undefined" ? 0 : host.id,
);
renameConfigAsError: (host_type, host) => {
const config_file = internalNginx.getConfigName(internalNginx.getFileFriendlyHostType(host_type), typeof host === 'undefined' ? 0 : host.id);
const config_file_err = config_file + '.err';
try {
await rename(config_file, `${config_file}.err`);
} catch {}
return new Promise((resolve/*, reject*/) => {
fs.unlink(config_file, () => {
// ignore result, continue
fs.rename(config_file, config_file_err, () => {
// also ignore result, as this is a debugging informative file anyway
resolve();
});
});
});
},
/**
* @param {String} hostType
* @param {String} host_type
* @param {Array} hosts
* @returns {Promise}
*/
bulkGenerateConfigs: async (model, hostType, hosts) => {
const results = [];
bulkGenerateConfigs: (host_type, hosts) => {
let promises = [];
hosts.map(function (host) {
promises.push(internalNginx.generateConfig(host_type, host));
});
for (const host of hosts) {
const result = await internalNginx.configure(model, hostType, host);
results.push(result);
}
return Promise.all(promises);
},
return results;
/**
* @param {String} host_type
* @param {Array} hosts
* @returns {Promise}
*/
bulkDeleteConfigs: (host_type, hosts) => {
let promises = [];
hosts.map(function (host) {
promises.push(internalNginx.deleteConfig(host_type, host, true));
});
return Promise.all(promises);
},
/**
* @param {string} config
* @returns {boolean}
*/
advancedConfigHasDefaultLocation: (cfg) => !!cfg.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im),
advancedConfigHasDefaultLocation: function (cfg) {
return !!cfg.match(/^(?:.*;)?\s*?location\s*?\/\s*?{/im);
},
/**
* @returns {boolean}
*/
ipv6Enabled: function () {
if (typeof process.env.DISABLE_IPV6 !== 'undefined') {
const disabled = process.env.DISABLE_IPV6.toLowerCase();
return !(disabled === 'on' || disabled === 'true' || disabled === '1' || disabled === 'yes');
}
return true;
}
};
export default internalNginx;
module.exports = internalNginx;

View File

@@ -1,106 +1,100 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import utils from "../lib/utils.js";
import proxyHostModel from "../models/proxy_host.js";
import internalAuditLog from "./audit-log.js";
import internalCertificate from "./certificate.js";
import internalHost from "./host.js";
import internalNginx from "./nginx.js";
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const proxyHostModel = require('../models/proxy_host');
const internalHost = require('./host');
const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log');
const internalCertificate = require('./certificate');
const omissions = () => {
return ["is_deleted", "owner.is_deleted"];
};
function omissions () {
return ['is_deleted'];
}
const internalProxyHost = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: (access, data) => {
let thisData = data;
const createCertificate = thisData.certificate_id === "new";
let create_certificate = data.certificate_id === 'new';
if (createCertificate) {
delete thisData.certificate_id;
if (create_certificate) {
delete data.certificate_id;
}
return access
.can("proxy_hosts:create", thisData)
return access.can('proxy_hosts:create', data)
.then(() => {
// Get a list of the domain names and check each of them against existing records
const domain_name_check_promises = [];
let domain_name_check_promises = [];
thisData.domain_names.map((domain_name) => {
data.domain_names.map(function (domain_name) {
domain_name_check_promises.push(internalHost.isHostnameTaken(domain_name));
return true;
});
return Promise.all(domain_name_check_promises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
return Promise.all(domain_name_check_promises)
.then((check_results) => {
check_results.map(function (result) {
if (result.is_taken) {
throw new error.ValidationError(result.hostname + ' is already in use');
}
});
});
});
})
.then(() => {
// At this point the domains should have been checked
thisData.owner_user_id = access.token.getUserId(1);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData);
data.owner_user_id = access.token.getUserId(1);
data = internalHost.cleanSslHstsData(data);
// Fix for db field not having a default value
// for this optional field.
if (typeof thisData.advanced_config === "undefined") {
thisData.advanced_config = "";
}
return proxyHostModel.query().insertAndFetch(thisData).then(utils.omitRow(omissions()));
return proxyHostModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
})
.then((row) => {
if (createCertificate) {
return internalCertificate
.createQuickCertificate(access, thisData)
if (create_certificate) {
return internalCertificate.createQuickCertificate(access, data)
.then((cert) => {
// update host with cert id
return internalProxyHost.update(access, {
id: row.id,
certificate_id: cert.id,
id: row.id,
certificate_id: cert.id
});
})
.then(() => {
return row;
});
} else {
return row;
}
return row;
})
.then((row) => {
// re-fetch with cert
return internalProxyHost.get(access, {
id: row.id,
expand: ["certificate", "owner", "access_list.[clients,items]"],
id: row.id,
expand: ['certificate', 'owner', 'access_list.[clients,items]']
});
})
.then((row) => {
// Configure nginx
return internalNginx.configure(proxyHostModel, "proxy_host", row).then(() => {
return row;
});
return internalNginx.configure(proxyHostModel, 'proxy_host', row)
.then(() => {
return row;
});
})
.then((row) => {
// Audit log
thisData.meta = _.assign({}, thisData.meta || {}, row.meta);
data.meta = _.assign({}, data.meta || {}, row.meta);
// Add to audit log
return internalAuditLog
.add(access, {
action: "created",
object_type: "proxy-host",
object_id: row.id,
meta: thisData,
})
return internalAuditLog.add(access, {
action: 'created',
object_type: 'proxy-host',
object_id: row.id,
meta: data
})
.then(() => {
return row;
});
@@ -114,110 +108,100 @@ const internalProxyHost = {
* @return {Promise}
*/
update: (access, data) => {
let thisData = data;
const createCertificate = thisData.certificate_id === "new";
let create_certificate = data.certificate_id === 'new';
if (createCertificate) {
delete thisData.certificate_id;
if (create_certificate) {
delete data.certificate_id;
}
return access
.can("proxy_hosts:update", thisData.id)
return access.can('proxy_hosts:update', data.id)
.then((/*access_data*/) => {
// Get a list of the domain names and check each of them against existing records
const domain_name_check_promises = [];
let domain_name_check_promises = [];
if (typeof thisData.domain_names !== "undefined") {
thisData.domain_names.map((domain_name) => {
return domain_name_check_promises.push(
internalHost.isHostnameTaken(domain_name, "proxy", thisData.id),
);
if (typeof data.domain_names !== 'undefined') {
data.domain_names.map(function (domain_name) {
domain_name_check_promises.push(internalHost.isHostnameTaken(domain_name, 'proxy', data.id));
});
return Promise.all(domain_name_check_promises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
return Promise.all(domain_name_check_promises)
.then((check_results) => {
check_results.map(function (result) {
if (result.is_taken) {
throw new error.ValidationError(result.hostname + ' is already in use');
}
});
});
});
}
})
.then(() => {
return internalProxyHost.get(access, { id: thisData.id });
return internalProxyHost.get(access, {id: data.id});
})
.then((row) => {
if (row.id !== thisData.id) {
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Proxy Host could not be updated, IDs do not match: ${row.id} !== ${thisData.id}`,
);
throw new error.InternalValidationError('Proxy Host could not be updated, IDs do not match: ' + row.id + ' !== ' + data.id);
}
if (createCertificate) {
return internalCertificate
.createQuickCertificate(access, {
domain_names: thisData.domain_names || row.domain_names,
meta: _.assign({}, row.meta, thisData.meta),
})
if (create_certificate) {
return internalCertificate.createQuickCertificate(access, {
domain_names: data.domain_names || row.domain_names,
meta: _.assign({}, row.meta, data.meta)
})
.then((cert) => {
// update host with cert id
thisData.certificate_id = cert.id;
data.certificate_id = cert.id;
})
.then(() => {
return row;
});
} else {
return row;
}
return row;
})
.then((row) => {
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
thisData = _.assign(
{},
{
domain_names: row.domain_names,
},
data,
);
data = _.assign({}, {
domain_names: row.domain_names
}, data);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData, row);
data = internalHost.cleanSslHstsData(data, row);
return proxyHostModel
.query()
.where({ id: thisData.id })
.patch(thisData)
.where({id: data.id})
.patch(data)
.then(utils.omitRow(omissions()))
.then((saved_row) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "updated",
object_type: "proxy-host",
object_id: row.id,
meta: thisData,
})
return internalAuditLog.add(access, {
action: 'updated',
object_type: 'proxy-host',
object_id: row.id,
meta: data
})
.then(() => {
return saved_row;
});
});
})
.then(() => {
return internalProxyHost
.get(access, {
id: thisData.id,
expand: ["owner", "certificate", "access_list.[clients,items]"],
})
return internalProxyHost.get(access, {
id: data.id,
expand: ['owner', 'certificate', 'access_list.[clients,items]']
})
.then((row) => {
if (!row.enabled) {
// No need to add nginx config if host is disabled
return row;
}
// Configure nginx
return internalNginx.configure(proxyHostModel, "proxy_host", row).then((new_meta) => {
row.meta = new_meta;
return _.omit(internalHost.cleanRowCertificateMeta(row), omissions());
});
return internalNginx.configure(proxyHostModel, 'proxy_host', row)
.then((new_meta) => {
row.meta = new_meta;
row = internalHost.cleanRowCertificateMeta(row);
return _.omit(row, omissions());
});
});
});
},
@@ -231,37 +215,39 @@ const internalProxyHost = {
* @return {Promise}
*/
get: (access, data) => {
const thisData = data || {};
return access
.can("proxy_hosts:get", thisData.id)
if (typeof data === 'undefined') {
data = {};
}
return access.can('proxy_hosts:get', data.id)
.then((access_data) => {
const query = proxyHostModel
let query = proxyHostModel
.query()
.where("is_deleted", 0)
.andWhere("id", thisData.id)
.allowGraph(proxyHostModel.defaultAllowGraph)
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner,access_list,access_list.[clients,items],certificate]')
.first();
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(thisData.id);
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
const thisRow = internalHost.cleanRowCertificateMeta(row);
row = internalHost.cleanRowCertificateMeta(row);
// Custom omissions
if (typeof thisData.omit !== "undefined" && thisData.omit !== null) {
return _.omit(row, thisData.omit);
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
}
return thisRow;
return row;
});
},
@@ -273,35 +259,35 @@ const internalProxyHost = {
* @returns {Promise}
*/
delete: (access, data) => {
return access
.can("proxy_hosts:delete", data.id)
return access.can('proxy_hosts:delete', data.id)
.then(() => {
return internalProxyHost.get(access, { id: data.id });
return internalProxyHost.get(access, {id: data.id});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
return proxyHostModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
is_deleted: 1,
is_deleted: 1
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("proxy_host", row).then(() => {
return internalNginx.reload();
});
return internalNginx.deleteConfig('proxy_host', row)
.then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "deleted",
object_type: "proxy-host",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'deleted',
object_type: 'proxy-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -318,41 +304,39 @@ const internalProxyHost = {
* @returns {Promise}
*/
enable: (access, data) => {
return access
.can("proxy_hosts:update", data.id)
return access.can('proxy_hosts:update', data.id)
.then(() => {
return internalProxyHost.get(access, {
id: data.id,
expand: ["certificate", "owner", "access_list"],
id: data.id,
expand: ['certificate', 'owner', 'access_list']
});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (row.enabled) {
throw new errs.ValidationError("Host is already enabled");
if (!row) {
throw new error.ItemNotFoundError(data.id);
} else if (row.enabled) {
throw new error.ValidationError('Host is already enabled');
}
row.enabled = 1;
return proxyHostModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
enabled: 1,
enabled: 1
})
.then(() => {
// Configure nginx
return internalNginx.configure(proxyHostModel, "proxy_host", row);
return internalNginx.configure(proxyHostModel, 'proxy_host', row);
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "enabled",
object_type: "proxy-host",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'enabled',
object_type: 'proxy-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -369,40 +353,39 @@ const internalProxyHost = {
* @returns {Promise}
*/
disable: (access, data) => {
return access
.can("proxy_hosts:update", data.id)
return access.can('proxy_hosts:update', data.id)
.then(() => {
return internalProxyHost.get(access, { id: data.id });
return internalProxyHost.get(access, {id: data.id});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (!row.enabled) {
throw new errs.ValidationError("Host is already disabled");
if (!row) {
throw new error.ItemNotFoundError(data.id);
} else if (!row.enabled) {
throw new error.ValidationError('Host is already disabled');
}
row.enabled = 0;
return proxyHostModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
enabled: 0,
enabled: 0
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("proxy_host", row).then(() => {
return internalNginx.reload();
});
return internalNginx.deleteConfig('proxy_host', row)
.then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "disabled",
object_type: "proxy-host",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'disabled',
object_type: 'proxy-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -419,36 +402,40 @@ const internalProxyHost = {
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: async (access, expand, searchQuery) => {
const accessData = await access.can("proxy_hosts:list");
getAll: (access, expand, search_query) => {
return access.can('proxy_hosts:list')
.then((access_data) => {
let query = proxyHostModel
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner,access_list,certificate]')
.orderBy('domain_names', 'ASC');
const query = proxyHostModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph(proxyHostModel.defaultAllowGraph)
.orderBy(castJsonIfNeed("domain_names"), "ASC");
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (accessData.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
}
// Query is used for searching
if (typeof search_query === 'string') {
query.where(function () {
this.where('domain_names', 'like', '%' + search_query + '%');
});
}
// Query is used for searching
if (typeof searchQuery === "string" && searchQuery.length > 0) {
query.where(function () {
this.where(castJsonIfNeed("domain_names"), "like", `%${searchQuery}%`);
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
})
.then((rows) => {
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {
return internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const rows = await query.then(utils.omitRows(omissions()));
if (typeof expand !== "undefined" && expand !== null && expand.indexOf("certificate") !== -1) {
return internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
},
/**
@@ -459,16 +446,20 @@ const internalProxyHost = {
* @returns {Promise}
*/
getCount: (user_id, visibility) => {
const query = proxyHostModel.query().count("id as count").where("is_deleted", 0);
let query = proxyHostModel
.query()
.count('id as count')
.where('is_deleted', 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", user_id);
if (visibility !== 'all') {
query.andWhere('owner_user_id', user_id);
}
return query.first().then((row) => {
return Number.parseInt(row.count, 10);
});
},
return query.first()
.then((row) => {
return parseInt(row.count, 10);
});
}
};
export default internalProxyHost;
module.exports = internalProxyHost;

View File

@@ -1,73 +1,66 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import utils from "../lib/utils.js";
import redirectionHostModel from "../models/redirection_host.js";
import internalAuditLog from "./audit-log.js";
import internalCertificate from "./certificate.js";
import internalHost from "./host.js";
import internalNginx from "./nginx.js";
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const redirectionHostModel = require('../models/redirection_host');
const internalHost = require('./host');
const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log');
const internalCertificate = require('./certificate');
const omissions = () => {
return ["is_deleted"];
};
function omissions () {
return ['is_deleted'];
}
const internalRedirectionHost = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: (access, data) => {
let thisData = data || {};
const createCertificate = thisData.certificate_id === "new";
let create_certificate = data.certificate_id === 'new';
if (createCertificate) {
delete thisData.certificate_id;
if (create_certificate) {
delete data.certificate_id;
}
return access
.can("redirection_hosts:create", thisData)
return access.can('redirection_hosts:create', data)
.then((/*access_data*/) => {
// Get a list of the domain names and check each of them against existing records
const domain_name_check_promises = [];
let domain_name_check_promises = [];
thisData.domain_names.map((domain_name) => {
data.domain_names.map(function (domain_name) {
domain_name_check_promises.push(internalHost.isHostnameTaken(domain_name));
return true;
});
return Promise.all(domain_name_check_promises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
return Promise.all(domain_name_check_promises)
.then((check_results) => {
check_results.map(function (result) {
if (result.is_taken) {
throw new error.ValidationError(result.hostname + ' is already in use');
}
});
});
});
})
.then(() => {
// At this point the domains should have been checked
thisData.owner_user_id = access.token.getUserId(1);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData);
data.owner_user_id = access.token.getUserId(1);
data = internalHost.cleanSslHstsData(data);
// Fix for db field not having a default value
// for this optional field.
if (typeof data.advanced_config === "undefined") {
data.advanced_config = "";
}
return redirectionHostModel.query().insertAndFetch(thisData).then(utils.omitRow(omissions()));
return redirectionHostModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
})
.then((row) => {
if (createCertificate) {
return internalCertificate
.createQuickCertificate(access, thisData)
if (create_certificate) {
return internalCertificate.createQuickCertificate(access, data)
.then((cert) => {
// update host with cert id
return internalRedirectionHost.update(access, {
id: row.id,
certificate_id: cert.id,
id: row.id,
certificate_id: cert.id
});
})
.then(() => {
@@ -79,27 +72,27 @@ const internalRedirectionHost = {
.then((row) => {
// re-fetch with cert
return internalRedirectionHost.get(access, {
id: row.id,
expand: ["certificate", "owner"],
id: row.id,
expand: ['certificate', 'owner']
});
})
.then((row) => {
// Configure nginx
return internalNginx.configure(redirectionHostModel, "redirection_host", row).then(() => {
return row;
});
return internalNginx.configure(redirectionHostModel, 'redirection_host', row)
.then(() => {
return row;
});
})
.then((row) => {
thisData.meta = _.assign({}, thisData.meta || {}, row.meta);
data.meta = _.assign({}, data.meta || {}, row.meta);
// Add to audit log
return internalAuditLog
.add(access, {
action: "created",
object_type: "redirection-host",
object_id: row.id,
meta: thisData,
})
return internalAuditLog.add(access, {
action: 'created',
object_type: 'redirection-host',
object_id: row.id,
meta: data
})
.then(() => {
return row;
});
@@ -113,107 +106,94 @@ const internalRedirectionHost = {
* @return {Promise}
*/
update: (access, data) => {
let thisData = data || {};
const createCertificate = thisData.certificate_id === "new";
let create_certificate = data.certificate_id === 'new';
if (createCertificate) {
delete thisData.certificate_id;
if (create_certificate) {
delete data.certificate_id;
}
return access
.can("redirection_hosts:update", thisData.id)
return access.can('redirection_hosts:update', data.id)
.then((/*access_data*/) => {
// Get a list of the domain names and check each of them against existing records
const domain_name_check_promises = [];
let domain_name_check_promises = [];
if (typeof thisData.domain_names !== "undefined") {
thisData.domain_names.map((domain_name) => {
domain_name_check_promises.push(
internalHost.isHostnameTaken(domain_name, "redirection", thisData.id),
);
return true;
if (typeof data.domain_names !== 'undefined') {
data.domain_names.map(function (domain_name) {
domain_name_check_promises.push(internalHost.isHostnameTaken(domain_name, 'redirection', data.id));
});
return Promise.all(domain_name_check_promises).then((check_results) => {
check_results.map((result) => {
if (result.is_taken) {
throw new errs.ValidationError(`${result.hostname} is already in use`);
}
return true;
return Promise.all(domain_name_check_promises)
.then((check_results) => {
check_results.map(function (result) {
if (result.is_taken) {
throw new error.ValidationError(result.hostname + ' is already in use');
}
});
});
});
}
})
.then(() => {
return internalRedirectionHost.get(access, { id: thisData.id });
return internalRedirectionHost.get(access, {id: data.id});
})
.then((row) => {
if (row.id !== thisData.id) {
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Redirection Host could not be updated, IDs do not match: ${row.id} !== ${thisData.id}`,
);
throw new error.InternalValidationError('Redirection Host could not be updated, IDs do not match: ' + row.id + ' !== ' + data.id);
}
if (createCertificate) {
return internalCertificate
.createQuickCertificate(access, {
domain_names: thisData.domain_names || row.domain_names,
meta: _.assign({}, row.meta, thisData.meta),
})
if (create_certificate) {
return internalCertificate.createQuickCertificate(access, {
domain_names: data.domain_names || row.domain_names,
meta: _.assign({}, row.meta, data.meta)
})
.then((cert) => {
// update host with cert id
thisData.certificate_id = cert.id;
data.certificate_id = cert.id;
})
.then(() => {
return row;
});
} else {
return row;
}
return row;
})
.then((row) => {
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
thisData = _.assign(
{},
{
domain_names: row.domain_names,
},
thisData,
);
data = _.assign({}, {
domain_names: row.domain_names
}, data);
thisData = internalHost.cleanSslHstsData(createCertificate, thisData, row);
data = internalHost.cleanSslHstsData(data, row);
return redirectionHostModel
.query()
.where({ id: thisData.id })
.patch(thisData)
.where({id: data.id})
.patch(data)
.then((saved_row) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "updated",
object_type: "redirection-host",
object_id: row.id,
meta: thisData,
})
return internalAuditLog.add(access, {
action: 'updated',
object_type: 'redirection-host',
object_id: row.id,
meta: data
})
.then(() => {
return _.omit(saved_row, omissions());
});
});
})
.then(() => {
return internalRedirectionHost
.get(access, {
id: thisData.id,
expand: ["owner", "certificate"],
})
return internalRedirectionHost.get(access, {
id: data.id,
expand: ['owner', 'certificate']
})
.then((row) => {
// Configure nginx
return internalNginx
.configure(redirectionHostModel, "redirection_host", row)
return internalNginx.configure(redirectionHostModel, 'redirection_host', row)
.then((new_meta) => {
row.meta = new_meta;
return _.omit(internalHost.cleanRowCertificateMeta(row), omissions());
row = internalHost.cleanRowCertificateMeta(row);
return _.omit(row, omissions());
});
});
});
@@ -228,38 +208,39 @@ const internalRedirectionHost = {
* @return {Promise}
*/
get: (access, data) => {
const thisData = data || {};
return access
.can("redirection_hosts:get", thisData.id)
if (typeof data === 'undefined') {
data = {};
}
return access.can('redirection_hosts:get', data.id)
.then((access_data) => {
const query = redirectionHostModel
let query = redirectionHostModel
.query()
.where("is_deleted", 0)
.andWhere("id", thisData.id)
.allowGraph(redirectionHostModel.defaultAllowGraph)
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner,certificate]')
.first();
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
let thisRow = row;
if (!thisRow || !thisRow.id) {
throw new errs.ItemNotFoundError(thisData.id);
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
thisRow = internalHost.cleanRowCertificateMeta(thisRow);
row = internalHost.cleanRowCertificateMeta(row);
// Custom omissions
if (typeof thisData.omit !== "undefined" && thisData.omit !== null) {
return _.omit(thisRow, thisData.omit);
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
}
return thisRow;
return row;
});
},
@@ -271,35 +252,35 @@ const internalRedirectionHost = {
* @returns {Promise}
*/
delete: (access, data) => {
return access
.can("redirection_hosts:delete", data.id)
return access.can('redirection_hosts:delete', data.id)
.then(() => {
return internalRedirectionHost.get(access, { id: data.id });
return internalRedirectionHost.get(access, {id: data.id});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
return redirectionHostModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
is_deleted: 1,
is_deleted: 1
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("redirection_host", row).then(() => {
return internalNginx.reload();
});
return internalNginx.deleteConfig('redirection_host', row)
.then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "deleted",
object_type: "redirection-host",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'deleted',
object_type: 'redirection-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -316,41 +297,39 @@ const internalRedirectionHost = {
* @returns {Promise}
*/
enable: (access, data) => {
return access
.can("redirection_hosts:update", data.id)
return access.can('redirection_hosts:update', data.id)
.then(() => {
return internalRedirectionHost.get(access, {
id: data.id,
expand: ["certificate", "owner"],
id: data.id,
expand: ['certificate', 'owner']
});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (row.enabled) {
throw new errs.ValidationError("Host is already enabled");
if (!row) {
throw new error.ItemNotFoundError(data.id);
} else if (row.enabled) {
throw new error.ValidationError('Host is already enabled');
}
row.enabled = 1;
return redirectionHostModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
enabled: 1,
enabled: 1
})
.then(() => {
// Configure nginx
return internalNginx.configure(redirectionHostModel, "redirection_host", row);
return internalNginx.configure(redirectionHostModel, 'redirection_host', row);
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "enabled",
object_type: "redirection-host",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'enabled',
object_type: 'redirection-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -367,40 +346,39 @@ const internalRedirectionHost = {
* @returns {Promise}
*/
disable: (access, data) => {
return access
.can("redirection_hosts:update", data.id)
return access.can('redirection_hosts:update', data.id)
.then(() => {
return internalRedirectionHost.get(access, { id: data.id });
return internalRedirectionHost.get(access, {id: data.id});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (!row.enabled) {
throw new errs.ValidationError("Host is already disabled");
if (!row) {
throw new error.ItemNotFoundError(data.id);
} else if (!row.enabled) {
throw new error.ValidationError('Host is already disabled');
}
row.enabled = 0;
return redirectionHostModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
enabled: 0,
enabled: 0
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("redirection_host", row).then(() => {
return internalNginx.reload();
});
return internalNginx.deleteConfig('redirection_host', row)
.then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "disabled",
object_type: "redirection-host",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'disabled',
object_type: 'redirection-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -418,35 +396,34 @@ const internalRedirectionHost = {
* @returns {Promise}
*/
getAll: (access, expand, search_query) => {
return access
.can("redirection_hosts:list")
return access.can('redirection_hosts:list')
.then((access_data) => {
const query = redirectionHostModel
let query = redirectionHostModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph(redirectionHostModel.defaultAllowGraph)
.orderBy(castJsonIfNeed("domain_names"), "ASC");
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner,certificate]')
.orderBy('domain_names', 'ASC');
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
// Query is used for searching
if (typeof search_query === "string" && search_query.length > 0) {
if (typeof search_query === 'string') {
query.where(function () {
this.where(castJsonIfNeed("domain_names"), "like", `%${search_query}%`);
this.where('domain_names', 'like', '%' + search_query + '%');
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
})
.then((rows) => {
if (typeof expand !== "undefined" && expand !== null && expand.indexOf("certificate") !== -1) {
if (typeof expand !== 'undefined' && expand !== null && expand.indexOf('certificate') !== -1) {
return internalHost.cleanAllRowsCertificateMeta(rows);
}
@@ -462,16 +439,20 @@ const internalRedirectionHost = {
* @returns {Promise}
*/
getCount: (user_id, visibility) => {
const query = redirectionHostModel.query().count("id as count").where("is_deleted", 0);
let query = redirectionHostModel
.query()
.count('id as count')
.where('is_deleted', 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", user_id);
if (visibility !== 'all') {
query.andWhere('owner_user_id', user_id);
}
return query.first().then((row) => {
return Number.parseInt(row.count, 10);
});
},
return query.first()
.then((row) => {
return parseInt(row.count, 10);
});
}
};
export default internalRedirectionHost;
module.exports = internalRedirectionHost;

View File

@@ -1,56 +0,0 @@
import { remoteVersion as logger } from "../logger.js";
import pjson from "../package.json" with { type: "json" };
const internalRemoteVersion = {
cache_timeout: 1000 * 60 * 60, // 1 hour
last_result: null,
last_fetch_time: null,
/**
* Fetch the latest version info, using a cached result if within the cache timeout period.
* @return {Promise<{current: string, latest: string, update_available: boolean}>} Version info
*/
get: async () => {
try {
if (
!internalRemoteVersion.last_result ||
!internalRemoteVersion.last_fetch_time ||
Date.now() - internalRemoteVersion.last_fetch_time > internalRemoteVersion.cache_timeout
) {
const response = await fetch("https://api.github.com/repos/ZoeyVid/NPMplus/releases/latest", {
headers: {
"User-Agent": `NPMplus/${pjson.version}`,
},
});
if (!response.ok) {
throw new Error(`Status code: ${response.status}`);
}
const data = await response.json();
internalRemoteVersion.last_result = data;
internalRemoteVersion.last_fetch_time = Date.now();
}
} catch (error) {
logger.error("Failed to fetch remote version:", error.message);
if (!internalRemoteVersion.last_result) {
return {
current: pjson.version,
latest: "unknown",
update_available: false,
};
}
}
const latestVersion = internalRemoteVersion.last_result?.tag_name || "unknown";
const currentVersion = pjson.version;
return {
current: currentVersion,
latest: latestVersion,
update_available: currentVersion < latestVersion && currentVersion.length >= 13,
};
},
};
export default internalRemoteVersion;

View File

@@ -1,37 +1,38 @@
import internalDeadHost from "./dead-host.js";
import internalProxyHost from "./proxy-host.js";
import internalRedirectionHost from "./redirection-host.js";
import internalStream from "./stream.js";
const internalProxyHost = require('./proxy-host');
const internalRedirectionHost = require('./redirection-host');
const internalDeadHost = require('./dead-host');
const internalStream = require('./stream');
const internalReport = {
/**
* @param {Access} access
* @return {Promise}
*/
getHostsReport: (access) => {
return access
.can("reports:hosts", 1)
return access.can('reports:hosts', 1)
.then((access_data) => {
const userId = access.token.getUserId(1);
let user_id = access.token.getUserId(1);
const promises = [
internalProxyHost.getCount(userId, access_data.permission_visibility),
internalRedirectionHost.getCount(userId, access_data.permission_visibility),
internalStream.getCount(userId, access_data.permission_visibility),
internalDeadHost.getCount(userId, access_data.permission_visibility),
let promises = [
internalProxyHost.getCount(user_id, access_data.visibility),
internalRedirectionHost.getCount(user_id, access_data.visibility),
internalStream.getCount(user_id, access_data.visibility),
internalDeadHost.getCount(user_id, access_data.visibility)
];
return Promise.all(promises);
})
.then((counts) => {
return {
proxy: counts.shift(),
proxy: counts.shift(),
redirection: counts.shift(),
stream: counts.shift(),
dead: counts.shift(),
stream: counts.shift(),
dead: counts.shift()
};
});
},
}
};
export default internalReport;
module.exports = internalReport;

View File

@@ -1,9 +1,10 @@
import fs from "node:fs";
import errs from "../lib/error.js";
import settingModel from "../models/setting.js";
import internalNginx from "./nginx.js";
const fs = require('fs');
const error = require('../lib/error');
const settingModel = require('../models/setting');
const internalNginx = require('./nginx');
const internalSetting = {
/**
* @param {Access} access
* @param {Object} data
@@ -11,38 +12,37 @@ const internalSetting = {
* @return {Promise}
*/
update: (access, data) => {
return access
.can("settings:update", data.id)
return access.can('settings:update', data.id)
.then((/*access_data*/) => {
return internalSetting.get(access, { id: data.id });
return internalSetting.get(access, {id: data.id});
})
.then((row) => {
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Setting could not be updated, IDs do not match: ${row.id} !== ${data.id}`,
);
throw new error.InternalValidationError('Setting could not be updated, IDs do not match: ' + row.id + ' !== ' + data.id);
}
return settingModel.query().where({ id: data.id }).patch(data);
return settingModel
.query()
.where({id: data.id})
.patch(data);
})
.then(() => {
return internalSetting.get(access, {
id: data.id,
id: data.id
});
})
.then((row) => {
if (row.id === "default-site") {
if (row.id === 'default-site') {
// write the html if we need to
if (row.value === "html") {
fs.writeFileSync("/data/html/index.html", row.meta.html, { encoding: "utf8" });
if (row.value === 'html') {
fs.writeFileSync('/data/nginx/etc/index.html', row.meta.html, {encoding: 'utf8'});
}
// Configure nginx
return internalNginx
.deleteConfig("default")
return internalNginx.deleteConfig('default')
.then(() => {
return internalNginx.generateConfig("default", row);
return internalNginx.generateConfig('default', row);
})
.then(() => {
return internalNginx.test();
@@ -54,8 +54,7 @@ const internalSetting = {
return row;
})
.catch((/*err*/) => {
internalNginx
.deleteConfig("default")
internalNginx.deleteConfig('default')
.then(() => {
return internalNginx.test();
})
@@ -64,11 +63,12 @@ const internalSetting = {
})
.then(() => {
// I'm being slack here I know..
throw new errs.ValidationError("Could not reconfigure Nginx. Please check logs.");
throw new error.ValidationError('Could not reconfigure Nginx. Please check logs.');
});
});
} else {
return row;
}
return row;
});
},
@@ -79,16 +79,19 @@ const internalSetting = {
* @return {Promise}
*/
get: (access, data) => {
return access
.can("settings:get", data.id)
return access.can('settings:get', data.id)
.then(() => {
return settingModel.query().where("id", data.id).first();
return settingModel
.query()
.where('id', data.id)
.first();
})
.then((row) => {
if (row) {
return row;
} else {
throw new error.ItemNotFoundError(data.id);
}
throw new errs.ItemNotFoundError(data.id);
});
},
@@ -99,13 +102,15 @@ const internalSetting = {
* @returns {*}
*/
getCount: (access) => {
return access
.can("settings:list")
return access.can('settings:list')
.then(() => {
return settingModel.query().count("id as count").first();
return settingModel
.query()
.count('id as count')
.first();
})
.then((row) => {
return Number.parseInt(row.count, 10);
return parseInt(row.count, 10);
});
},
@@ -116,10 +121,13 @@ const internalSetting = {
* @returns {Promise}
*/
getAll: (access) => {
return access.can("settings:list").then(() => {
return settingModel.query().orderBy("description", "ASC");
});
},
return access.can('settings:list')
.then(() => {
return settingModel
.query()
.orderBy('description', 'ASC');
});
}
};
export default internalSetting;
module.exports = internalSetting;

View File

@@ -1,85 +1,51 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { castJsonIfNeed } from "../lib/helpers.js";
import utils from "../lib/utils.js";
import streamModel from "../models/stream.js";
import internalAuditLog from "./audit-log.js";
import internalCertificate from "./certificate.js";
import internalHost from "./host.js";
import internalNginx from "./nginx.js";
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const streamModel = require('../models/stream');
const internalNginx = require('./nginx');
const internalAuditLog = require('./audit-log');
const omissions = () => {
return ["is_deleted", "owner.is_deleted", "certificate.is_deleted"];
};
function omissions () {
return ['is_deleted'];
}
const internalStream = {
/**
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: (access, data) => {
const create_certificate = data.certificate_id === "new";
if (create_certificate) {
delete data.certificate_id;
}
return access
.can("streams:create", data)
return access.can('streams:create', data)
.then((/*access_data*/) => {
// TODO: At this point the existing ports should have been checked
data.owner_user_id = access.token.getUserId(1);
if (typeof data.meta === "undefined") {
if (typeof data.meta === 'undefined') {
data.meta = {};
}
// streams aren't routed by domain name so don't store domain names in the DB
const data_no_domains = structuredClone(data);
delete data_no_domains.domain_names;
return streamModel.query().insertAndFetch(data_no_domains).then(utils.omitRow(omissions()));
})
.then((row) => {
if (create_certificate) {
return internalCertificate
.createQuickCertificate(access, data)
.then((cert) => {
// update host with cert id
return internalStream.update(access, {
id: row.id,
certificate_id: cert.id,
});
})
.then(() => {
return row;
});
}
return row;
})
.then((row) => {
// re-fetch with cert
return internalStream.get(access, {
id: row.id,
expand: ["certificate", "owner"],
});
return streamModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
})
.then((row) => {
// Configure nginx
return internalNginx.configure(streamModel, "stream", row).then(() => {
return row;
});
return internalNginx.configure(streamModel, 'stream', row)
.then(() => {
return internalStream.get(access, {id: row.id, expand: ['owner']});
});
})
.then((row) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "created",
object_type: "stream",
object_id: row.id,
meta: data,
})
return internalAuditLog.add(access, {
action: 'created',
object_type: 'stream',
object_id: row.id,
meta: data
})
.then(() => {
return row;
});
@@ -93,78 +59,39 @@ const internalStream = {
* @return {Promise}
*/
update: (access, data) => {
let thisData = data;
const create_certificate = thisData.certificate_id === "new";
if (create_certificate) {
delete thisData.certificate_id;
}
return access
.can("streams:update", thisData.id)
return access.can('streams:update', data.id)
.then((/*access_data*/) => {
// TODO: at this point the existing streams should have been checked
return internalStream.get(access, { id: thisData.id });
return internalStream.get(access, {id: data.id});
})
.then((row) => {
if (row.id !== thisData.id) {
if (row.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`Stream could not be updated, IDs do not match: ${row.id} !== ${thisData.id}`,
);
throw new error.InternalValidationError('Stream could not be updated, IDs do not match: ' + row.id + ' !== ' + data.id);
}
if (create_certificate) {
return internalCertificate
.createQuickCertificate(access, {
domain_names: thisData.domain_names || row.domain_names,
meta: _.assign({}, row.meta, thisData.meta),
})
.then((cert) => {
// update host with cert id
thisData.certificate_id = cert.id;
})
.then(() => {
return row;
});
}
return row;
})
.then((row) => {
// Add domain_names to the data in case it isn't there, so that the audit log renders correctly. The order is important here.
thisData = _.assign(
{},
{
domain_names: row.domain_names,
},
thisData,
);
return streamModel
.query()
.patchAndFetchById(row.id, thisData)
.patchAndFetchById(row.id, data)
.then(utils.omitRow(omissions()))
.then((saved_row) => {
return internalNginx.configure(streamModel, 'stream', saved_row)
.then(() => {
return internalStream.get(access, {id: row.id, expand: ['owner']});
});
})
.then((saved_row) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "updated",
object_type: "stream",
object_id: row.id,
meta: thisData,
})
return internalAuditLog.add(access, {
action: 'updated',
object_type: 'stream',
object_id: row.id,
meta: data
})
.then(() => {
return saved_row;
});
});
})
.then(() => {
return internalStream.get(access, { id: thisData.id, expand: ["owner", "certificate"] }).then((row) => {
return internalNginx.configure(streamModel, "stream", row).then((new_meta) => {
row.meta = new_meta;
return _.omit(internalHost.cleanRowCertificateMeta(row), omissions());
});
});
});
},
@@ -177,38 +104,38 @@ const internalStream = {
* @return {Promise}
*/
get: (access, data) => {
const thisData = data || {};
return access
.can("streams:get", thisData.id)
if (typeof data === 'undefined') {
data = {};
}
return access.can('streams:get', data.id)
.then((access_data) => {
const query = streamModel
let query = streamModel
.query()
.where("is_deleted", 0)
.andWhere("id", thisData.id)
.allowGraph(streamModel.defaultAllowGraph)
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[owner]')
.first();
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
let thisRow = row;
if (!thisRow || !thisRow.id) {
throw new errs.ItemNotFoundError(thisData.id);
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
thisRow = internalHost.cleanRowCertificateMeta(thisRow);
// Custom omissions
if (typeof thisData.omit !== "undefined" && thisData.omit !== null) {
return _.omit(thisRow, thisData.omit);
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
}
return thisRow;
return row;
});
},
@@ -220,35 +147,35 @@ const internalStream = {
* @returns {Promise}
*/
delete: (access, data) => {
return access
.can("streams:delete", data.id)
return access.can('streams:delete', data.id)
.then(() => {
return internalStream.get(access, { id: data.id });
return internalStream.get(access, {id: data.id});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
return streamModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
is_deleted: 1,
is_deleted: 1
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("stream", row).then(() => {
return internalNginx.reload();
});
return internalNginx.deleteConfig('stream', row)
.then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "deleted",
object_type: "stream",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'deleted',
object_type: 'stream',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -265,41 +192,39 @@ const internalStream = {
* @returns {Promise}
*/
enable: (access, data) => {
return access
.can("streams:update", data.id)
return access.can('streams:update', data.id)
.then(() => {
return internalStream.get(access, {
id: data.id,
expand: ["certificate", "owner"],
id: data.id,
expand: ['owner']
});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (row.enabled) {
throw new errs.ValidationError("Stream is already enabled");
if (!row) {
throw new error.ItemNotFoundError(data.id);
} else if (row.enabled) {
throw new error.ValidationError('Host is already enabled');
}
row.enabled = 1;
return streamModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
enabled: 1,
enabled: 1
})
.then(() => {
// Configure nginx
return internalNginx.configure(streamModel, "stream", row);
return internalNginx.configure(streamModel, 'stream', row);
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "enabled",
object_type: "stream",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'enabled',
object_type: 'stream',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -316,40 +241,39 @@ const internalStream = {
* @returns {Promise}
*/
disable: (access, data) => {
return access
.can("streams:update", data.id)
return access.can('streams:update', data.id)
.then(() => {
return internalStream.get(access, { id: data.id });
return internalStream.get(access, {id: data.id});
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(data.id);
}
if (!row.enabled) {
throw new errs.ValidationError("Stream is already disabled");
if (!row) {
throw new error.ItemNotFoundError(data.id);
} else if (!row.enabled) {
throw new error.ValidationError('Host is already disabled');
}
row.enabled = 0;
return streamModel
.query()
.where("id", row.id)
.where('id', row.id)
.patch({
enabled: 0,
enabled: 0
})
.then(() => {
// Delete Nginx Config
return internalNginx.deleteConfig("stream", row).then(() => {
return internalNginx.reload();
});
return internalNginx.deleteConfig('stream', row)
.then(() => {
return internalNginx.reload();
});
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "disabled",
object_type: "stream",
object_id: row.id,
meta: _.omit(row, omissions()),
action: 'disabled',
object_type: 'stream-host',
object_id: row.id,
meta: _.omit(row, omissions())
});
});
})
@@ -367,39 +291,31 @@ const internalStream = {
* @returns {Promise}
*/
getAll: (access, expand, search_query) => {
return access
.can("streams:list")
return access.can('streams:list')
.then((access_data) => {
const query = streamModel
let query = streamModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph(streamModel.defaultAllowGraph)
.orderBy("incoming_port", "ASC");
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[owner]')
.orderBy('incoming_port', 'ASC');
if (access_data.permission_visibility !== "all") {
query.andWhere("owner_user_id", access.token.getUserId(1));
if (access_data.permission_visibility !== 'all') {
query.andWhere('owner_user_id', access.token.getUserId(1));
}
// Query is used for searching
if (typeof search_query === "string" && search_query.length > 0) {
if (typeof search_query === 'string') {
query.where(function () {
this.where(castJsonIfNeed("incoming_port"), "like", `%${search_query}%`);
this.where('incoming_port', 'like', '%' + search_query + '%');
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
})
.then((rows) => {
if (typeof expand !== "undefined" && expand !== null && expand.indexOf("certificate") !== -1) {
return internalHost.cleanAllRowsCertificateMeta(rows);
}
return rows;
});
},
@@ -411,16 +327,20 @@ const internalStream = {
* @returns {Promise}
*/
getCount: (user_id, visibility) => {
const query = streamModel.query().count("id AS count").where("is_deleted", 0);
let query = streamModel
.query()
.count('id as count')
.where('is_deleted', 0);
if (visibility !== "all") {
query.andWhere("owner_user_id", user_id);
if (visibility !== 'all') {
query.andWhere('owner_user_id', user_id);
}
return query.first().then((row) => {
return Number.parseInt(row.count, 10);
});
},
return query.first()
.then((row) => {
return parseInt(row.count, 10);
});
}
};
export default internalStream;
module.exports = internalStream;

View File

@@ -1,17 +1,12 @@
import _ from "lodash";
import errs from "../lib/error.js";
import { parseDatePeriod } from "../lib/helpers.js";
import authModel from "../models/auth.js";
import TokenModel from "../models/token.js";
import userModel from "../models/user.js";
import twoFactor from "./2fa.js";
const _ = require('lodash');
const error = require('../lib/error');
const userModel = require('../models/user');
const authModel = require('../models/auth');
const helpers = require('../lib/helpers');
const TokenModel = require('../models/token');
const ERROR_MESSAGE_INVALID_AUTH = "Invalid email or password";
const ERROR_MESSAGE_INVALID_AUTH_I18N = "error.invalid-auth";
const ERROR_MESSAGE_INVALID_2FA = "Invalid verification code";
const ERROR_MESSAGE_INVALID_2FA_I18N = "error.invalid-2fa";
module.exports = {
export default {
/**
* @param {Object} data
* @param {String} data.identity
@@ -21,121 +16,70 @@ export default {
* @param {String} [issuer]
* @returns {Promise}
*/
getTokenFromEmail: async (data, issuer) => {
const Token = TokenModel();
getTokenFromEmail: (data, issuer) => {
let Token = new TokenModel();
data.scope = data.scope || "user";
data.expiry = data.expiry || "1d";
data.scope = data.scope || 'user';
data.expiry = data.expiry || '1d';
const user = await userModel
return userModel
.query()
.where("email", data.identity.toLowerCase().trim())
.andWhere("is_deleted", 0)
.andWhere("is_disabled", 0)
.first();
.where('email', data.identity.toLowerCase().trim())
.andWhere('is_deleted', 0)
.andWhere('is_disabled', 0)
.first()
.then((user) => {
if (user) {
// Get auth
return authModel
.query()
.where('user_id', '=', user.id)
.where('type', '=', 'password')
.first()
.then((auth) => {
if (auth) {
return auth.verifyPassword(data.secret)
.then((valid) => {
if (valid) {
if (!user) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_AUTH);
}
if (data.scope !== 'user' && _.indexOf(user.roles, data.scope) === -1) {
// The scope requested doesn't exist as a role against the user,
// you shall not pass.
throw new error.AuthError('Invalid scope: ' + data.scope);
}
const auth = await authModel.query().where("user_id", "=", user.id).where("type", "=", "password").first();
// Create a moment of the expiry expression
let expiry = helpers.parseDatePeriod(data.expiry);
if (expiry === null) {
throw new error.AuthError('Invalid expiry time: ' + data.expiry);
}
if (!auth) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_AUTH);
}
const valid = await auth.verifyPassword(data.secret);
if (!valid) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_AUTH, ERROR_MESSAGE_INVALID_AUTH_I18N);
}
if (data.scope !== "user" && _.indexOf(user.roles, data.scope) === -1) {
// The scope requested doesn't exist as a role against the user,
// you shall not pass.
throw new errs.AuthError(`Invalid scope: ${data.scope}`);
}
// Check if 2FA is enabled
const has2FA = await twoFactor.isEnabled(user.id);
if (has2FA) {
// Return challenge token instead of full token
const challengeToken = await Token.create({
iss: issuer || "api",
attrs: {
id: user.id,
},
scope: ["2fa-challenge"],
expiresIn: "5m",
return Token.create({
iss: issuer || 'api',
attrs: {
id: user.id
},
scope: [data.scope],
expiresIn: data.expiry
})
.then((signed) => {
return {
token: signed.token,
expires: expiry.toISOString()
};
});
} else {
throw new error.AuthError('Invalid password');
}
});
} else {
throw new error.AuthError('No password auth for user');
}
});
} else {
throw new error.AuthError('No relevant user found');
}
});
return {
requires_2fa: true,
challenge_token: challengeToken.token,
};
}
// Create a dayjs of the expiry expression
const expiry = parseDatePeriod(data.expiry);
if (expiry === null) {
throw new errs.AuthError(`Invalid expiry time: ${data.expiry}`);
}
const signed = await Token.create({
iss: issuer || "api",
attrs: {
id: user.id,
},
scope: [data.scope],
expiresIn: data.expiry,
});
return {
token: signed.token,
expires: expiry.toISOString(),
};
},
/**
* @param {Object} data
* @param {String} data.identity
* @returns {Promise}
*/
getTokenFromOAuthClaim: async (data) => {
const Token = TokenModel();
data.scope = "user";
data.expiry = "1d";
const user = await userModel
.query()
.where("email", data.identity.toLowerCase().trim())
.andWhere("is_deleted", 0)
.andWhere("is_disabled", 0)
.first();
if (!user) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_AUTH);
}
// Create a dayjs of the expiry expression
const expiry = parseDatePeriod(data.expiry);
if (expiry === null) {
throw new errs.AuthError(`Invalid expiry time: ${data.expiry}`);
}
const signed = await Token.create({
iss: "api",
attrs: {
id: user.id,
},
scope: [data.scope],
expiresIn: data.expiry,
});
return {
token: signed.token,
expires: expiry.toISOString(),
};
},
/**
@@ -145,126 +89,74 @@ export default {
* @param {String} [data.scope] Only considered if existing token scope is admin
* @returns {Promise}
*/
getFreshToken: async (access, data) => {
const Token = TokenModel();
const thisData = data || {};
getFreshToken: (access, data) => {
let Token = new TokenModel();
thisData.expiry = thisData.expiry || "1d";
data = data || {};
data.expiry = data.expiry || '1d';
if (access?.token.getUserId(0)) {
// Create a dayjs of the expiry expression
const expiry = parseDatePeriod(thisData.expiry);
if (access && access.token.getUserId(0)) {
// Create a moment of the expiry expression
let expiry = helpers.parseDatePeriod(data.expiry);
if (expiry === null) {
throw new errs.AuthError(`Invalid expiry time: ${thisData.expiry}`);
throw new error.AuthError('Invalid expiry time: ' + data.expiry);
}
const token_attrs = {
id: access.token.getUserId(0),
let token_attrs = {
id: access.token.getUserId(0)
};
// Only admins can request otherwise scoped tokens
let scope = access.token.get("scope");
if (thisData.scope && access.token.hasScope("admin")) {
scope = [thisData.scope];
let scope = access.token.get('scope');
if (data.scope && access.token.hasScope('admin')) {
scope = [data.scope];
if (thisData.scope === "job-board" || thisData.scope === "worker") {
if (data.scope === 'job-board' || data.scope === 'worker') {
token_attrs.id = 0;
}
}
const signed = await Token.create({
iss: "api",
scope: scope,
attrs: token_attrs,
expiresIn: thisData.expiry,
});
return {
token: signed.token,
expires: expiry.toISOString(),
};
return Token.create({
iss: 'api',
scope: scope,
attrs: token_attrs,
expiresIn: data.expiry
})
.then((signed) => {
return {
token: signed.token,
expires: expiry.toISOString()
};
});
} else {
throw new error.AssertionFailedError('Existing token contained invalid user data');
}
throw new errs.AssertionFailedError("Existing token contained invalid user data");
},
/**
* Verify 2FA code and return full token
* @param {string} challengeToken
* @param {string} code
* @param {string} [expiry]
* @returns {Promise}
*/
verify2FA: async (challengeToken, code, expiry) => {
const Token = TokenModel();
const tokenExpiry = expiry || "1d";
// Verify challenge token
let tokenData;
try {
tokenData = await Token.load(challengeToken);
} catch {
throw new errs.AuthError("Invalid or expired challenge token");
}
// Check scope
if (!tokenData.scope || tokenData.scope[0] !== "2fa-challenge") {
throw new errs.AuthError("Invalid challenge token");
}
const userId = tokenData.attrs?.id;
if (!userId) {
throw new errs.AuthError("Invalid challenge token");
}
// Verify 2FA code
const valid = await twoFactor.verifyForLogin(userId, code);
if (!valid) {
throw new errs.AuthError(ERROR_MESSAGE_INVALID_2FA, ERROR_MESSAGE_INVALID_2FA_I18N);
}
// Create full token
const expiryDate = parseDatePeriod(tokenExpiry);
if (expiryDate === null) {
throw new errs.AuthError(`Invalid expiry time: ${tokenExpiry}`);
}
const signed = await Token.create({
iss: "api",
attrs: {
id: userId,
},
scope: ["user"],
expiresIn: tokenExpiry,
});
return {
token: signed.token,
expires: expiryDate.toISOString(),
};
},
/**
* @param {Object} user
* @returns {Promise}
*/
getTokenFromUser: async (user) => {
const expire = "1d";
const Token = TokenModel();
const expiry = parseDatePeriod(expire);
getTokenFromUser: (user) => {
const expire = '1d';
const Token = new TokenModel();
const expiry = helpers.parseDatePeriod(expire);
const signed = await Token.create({
iss: "api",
return Token.create({
iss: 'api',
attrs: {
id: user.id,
id: user.id
},
scope: ["user"],
expiresIn: expire,
});
return {
token: signed.token,
expires: expiry.toISOString(),
user: user,
};
},
scope: ['user'],
expiresIn: expire
})
.then((signed) => {
return {
token: signed.token,
expires: expiry.toISOString(),
user: user
};
});
}
};

View File

@@ -1,129 +1,93 @@
import _ from "lodash";
import crypto from "node:crypto";
import fs from "node:fs";
import { pipeline } from "node:stream/promises";
import errs from "../lib/error.js";
import utils from "../lib/utils.js";
import { gravatar as logger } from "../logger.js";
import authModel from "../models/auth.js";
import userModel from "../models/user.js";
import userPermissionModel from "../models/user_permission.js";
import internalAuditLog from "./audit-log.js";
import internalToken from "./token.js";
import pjson from "../package.json" with { type: "json" };
const _ = require('lodash');
const error = require('../lib/error');
const utils = require('../lib/utils');
const userModel = require('../models/user');
const userPermissionModel = require('../models/user_permission');
const authModel = require('../models/auth');
const gravatar = require('gravatar');
const internalToken = require('./token');
const internalAuditLog = require('./audit-log');
const omissions = () => {
return ["is_deleted", "permissions.id", "permissions.user_id", "permissions.created_on", "permissions.modified_on"];
};
function omissions () {
return ['is_deleted'];
}
const internalUser = {
/**
* Create a user can happen unauthenticated only once and only when no active users exist.
* Otherwise, a valid auth method is required.
*
* @param {Access} access
* @param {Object} data
* @returns {Promise}
*/
create: async (access, data) => {
const auth = data.auth || null;
create: (access, data) => {
let auth = data.auth || null;
delete data.auth;
data.avatar = data.avatar || "";
data.roles = data.roles || [];
data.avatar = data.avatar || '';
data.roles = data.roles || [];
data.email = data.email.toLowerCase().trim();
internalUser.isEmailAvailable(data.email).then((available) => {
if (!available) {
throw new errs.ValidationError(`Email address already in use - ${data.email}`);
}
});
if (typeof data.is_disabled !== "undefined") {
if (typeof data.is_disabled !== 'undefined') {
data.is_disabled = data.is_disabled ? 1 : 0;
}
await access.can("users:create", data);
return access.can('users:create', data)
.then(() => {
data.avatar = gravatar.url(data.email, {default: 'mm'});
if (process.env.DISABLE_GRAVATAR === "true") {
data.avatar = "/images/default-avatar.jpg";
} else {
try {
const hash = crypto.createHash("sha256").update(data.email.trim().toLowerCase()).digest("hex");
const response = await fetch(
`https://www.gravatar.com/avatar/${hash}?s=64&default=initials&name=${encodeURIComponent(
data.name
.split(" ")
.map((n) => n[0])
.join(""),
)}`,
{
headers: {
"User-Agent": `NPMplus/${pjson.version}`,
},
},
);
if (!response.ok) throw new Error(`Status code: ${response.status}`);
let ext;
switch (response.headers.get("content-type")) {
case "image/png":
ext = "png";
break;
case "image/jpeg":
ext = "jpeg";
break;
case "image/gif":
ext = "gif";
break;
default:
throw new Error();
return userModel
.query()
.insertAndFetch(data)
.then(utils.omitRow(omissions()));
})
.then((user) => {
if (auth) {
return authModel
.query()
.insert({
user_id: user.id,
type: auth.type,
secret: auth.secret,
meta: {}
})
.then(() => {
return user;
});
} else {
return user;
}
})
.then((user) => {
// Create permissions row as well
let is_admin = data.roles.indexOf('admin') !== -1;
await pipeline(response.body, fs.createWriteStream(`/data/npmplus/gravatar/${hash}.${ext}`));
data.avatar = `/images/gravatar/${hash}.${ext}`;
} catch (err) {
logger.error(`Error downloading gravatar: ${err.message}`);
data.avatar = "/images/default-avatar.jpg";
}
}
let user = await userModel.query().insertAndFetch(data).then(utils.omitRow(omissions()));
if (auth) {
user = await authModel.query().insert({
user_id: user.id,
type: auth.type,
secret: auth.secret,
meta: {},
return userPermissionModel
.query()
.insert({
user_id: user.id,
visibility: is_admin ? 'all' : 'user',
proxy_hosts: 'manage',
redirection_hosts: 'manage',
dead_hosts: 'manage',
streams: 'manage',
access_lists: 'manage',
certificates: 'manage'
})
.then(() => {
return internalUser.get(access, {id: user.id, expand: ['permissions']});
});
})
.then((user) => {
// Add to audit log
return internalAuditLog.add(access, {
action: 'created',
object_type: 'user',
object_id: user.id,
meta: user
})
.then(() => {
return user;
});
});
}
// Create permissions row as well
const isAdmin = data.roles.indexOf("admin") !== -1;
await userPermissionModel.query().insert({
user_id: user.id,
visibility: isAdmin ? "all" : "user",
proxy_hosts: "manage",
redirection_hosts: "manage",
dead_hosts: "manage",
streams: "manage",
access_lists: "manage",
certificates: "manage",
});
user = await internalUser.get(access, { id: user.id, expand: ["permissions"] });
await internalAuditLog.add(access, {
action: "created",
object_type: "user",
object_id: user.id,
meta: user,
});
return user;
},
/**
@@ -135,104 +99,62 @@ const internalUser = {
* @return {Promise}
*/
update: (access, data) => {
if (typeof data.is_disabled !== "undefined") {
if (typeof data.is_disabled !== 'undefined') {
data.is_disabled = data.is_disabled ? 1 : 0;
}
return access
.can("users:update", data.id)
return access.can('users:update', data.id)
.then(() => {
// Make sure that the user being updated doesn't change their email to another user that is already using it
// 1. get user we want to update
return internalUser.get(access, { id: data.id }).then((user) => {
// 2. if email is to be changed, find other users with that email
if (typeof data.email !== "undefined") {
data.email = data.email.toLowerCase().trim();
return internalUser.get(access, {id: data.id})
.then((user) => {
if (user.email !== data.email) {
return internalUser.isEmailAvailable(data.email, data.id).then((available) => {
if (!available) {
throw new errs.ValidationError(`Email address already in use - ${data.email}`);
}
return user;
});
// 2. if email is to be changed, find other users with that email
if (typeof data.email !== 'undefined') {
data.email = data.email.toLowerCase().trim();
if (user.email !== data.email) {
return internalUser.isEmailAvailable(data.email, data.id)
.then((available) => {
if (!available) {
throw new error.ValidationError('Email address already in use - ' + data.email);
}
return user;
});
}
}
}
// No change to email:
return user;
});
// No change to email:
return user;
});
})
.then(async (user) => {
.then((user) => {
if (user.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`User could not be updated, IDs do not match: ${user.id} !== ${data.id}`,
);
throw new error.InternalValidationError('User could not be updated, IDs do not match: ' + user.id + ' !== ' + data.id);
}
if (process.env.DISABLE_GRAVATAR === "true") {
data.avatar = "/images/default-avatar.jpg";
} else {
try {
const hash = crypto
.createHash("sha256")
.update((data.email || user.email).trim().toLowerCase())
.digest("hex");
const response = await fetch(
`https://www.gravatar.com/avatar/${hash}?s=64&default=initials&name=${encodeURIComponent(
(data.name || user.name)
.split(" ")
.map((n) => n[0])
.join(""),
)}`,
{
headers: {
"User-Agent": `NPMplus/${pjson.version}`,
},
},
);
data.avatar = gravatar.url(data.email || user.email, {default: 'mm'});
if (!response.ok) throw new Error(`Status code: ${response.status}`);
let ext;
switch (response.headers.get("content-type")) {
case "image/png":
ext = "png";
break;
case "image/jpeg":
ext = "jpg";
break;
case "image/gif":
ext = "gif";
break;
default:
throw new Error();
}
await pipeline(response.body, fs.createWriteStream(`/data/npmplus/gravatar/${hash}.${ext}`));
data.avatar = `/images/gravatar/${hash}.${ext}`;
} catch (err) {
logger.error(`Error downloading gravatar: ${err.message}`);
data.avatar = "/images/default-avatar.jpg";
}
}
return userModel.query().patchAndFetchById(user.id, data).then(utils.omitRow(omissions()));
return userModel
.query()
.patchAndFetchById(user.id, data)
.then(utils.omitRow(omissions()));
})
.then(() => {
return internalUser.get(access, { id: data.id });
return internalUser.get(access, {id: data.id});
})
.then((user) => {
// Add to audit log
return internalAuditLog
.add(access, {
action: "updated",
object_type: "user",
object_id: user.id,
meta: { ...data, id: user.id, name: user.name },
})
return internalAuditLog.add(access, {
action: 'updated',
object_type: 'user',
object_id: user.id,
meta: data
})
.then(() => {
return user;
});
@@ -248,41 +170,37 @@ const internalUser = {
* @return {Promise}
*/
get: (access, data) => {
const thisData = data || {};
if (typeof thisData.id === "undefined" || !thisData.id) {
thisData.id = access.token.getUserId(0);
if (typeof data === 'undefined') {
data = {};
}
return access
.can("users:get", thisData.id)
if (typeof data.id === 'undefined' || !data.id) {
data.id = access.token.getUserId(0);
}
return access.can('users:get', data.id)
.then(() => {
const query = userModel
let query = userModel
.query()
.where("is_deleted", 0)
.andWhere("id", thisData.id)
.allowGraph("[permissions]")
.where('is_deleted', 0)
.andWhere('id', data.id)
.allowGraph('[permissions]')
.first();
if (typeof thisData.expand !== "undefined" && thisData.expand !== null) {
query.withGraphFetched(`[${thisData.expand.join(", ")}]`);
if (typeof data.expand !== 'undefined' && data.expand !== null) {
query.withGraphFetched('[' + data.expand.join(', ') + ']');
}
return query.then(utils.omitRow(omissions()));
})
.then((row) => {
if (!row || !row.id) {
throw new errs.ItemNotFoundError(thisData.id);
if (!row) {
throw new error.ItemNotFoundError(data.id);
}
// Custom omissions
if (typeof thisData.omit !== "undefined" && thisData.omit !== null) {
return _.omit(row, thisData.omit);
if (typeof data.omit !== 'undefined' && data.omit !== null) {
row = _.omit(row, data.omit);
}
if (row.avatar === "") {
row.avatar = "/images/default-avatar.jpg";
}
return row;
});
},
@@ -295,15 +213,20 @@ const internalUser = {
* @param user_id
*/
isEmailAvailable: (email, user_id) => {
const query = userModel.query().where("email", "=", email.toLowerCase().trim()).where("is_deleted", 0).first();
let query = userModel
.query()
.where('email', '=', email.toLowerCase().trim())
.where('is_deleted', 0)
.first();
if (typeof user_id !== "undefined") {
query.where("id", "!=", user_id);
if (typeof user_id !== 'undefined') {
query.where('id', '!=', user_id);
}
return query.then((user) => {
return !user;
});
return query
.then((user) => {
return !user;
});
},
/**
@@ -314,34 +237,33 @@ const internalUser = {
* @returns {Promise}
*/
delete: (access, data) => {
return access
.can("users:delete", data.id)
return access.can('users:delete', data.id)
.then(() => {
return internalUser.get(access, { id: data.id });
return internalUser.get(access, {id: data.id});
})
.then((user) => {
if (!user) {
throw new errs.ItemNotFoundError(data.id);
throw new error.ItemNotFoundError(data.id);
}
// Make sure user can't delete themselves
if (user.id === access.token.getUserId(0)) {
throw new errs.PermissionError("You cannot delete yourself.");
throw new error.PermissionError('You cannot delete yourself.');
}
return userModel
.query()
.where("id", user.id)
.where('id', user.id)
.patch({
is_deleted: 1,
is_deleted: 1
})
.then(() => {
// Add to audit log
return internalAuditLog.add(access, {
action: "deleted",
object_type: "user",
object_id: user.id,
meta: _.omit(user, omissions()),
action: 'deleted',
object_type: 'user',
object_id: user.id,
meta: _.omit(user, omissions())
});
});
})
@@ -350,12 +272,6 @@ const internalUser = {
});
},
deleteAll: async () => {
await userModel.query().patch({
is_deleted: 1,
});
},
/**
* This will only count the users
*
@@ -364,26 +280,26 @@ const internalUser = {
* @returns {*}
*/
getCount: (access, search_query) => {
return access
.can("users:list")
return access.can('users:list')
.then(() => {
const query = userModel.query().count("id as count").where("is_deleted", 0).first();
let query = userModel
.query()
.count('id as count')
.where('is_deleted', 0)
.first();
// Query is used for searching
if (typeof search_query === "string") {
if (typeof search_query === 'string') {
query.where(function () {
this.where("user.name", "like", `%${search_query}%`).orWhere(
"user.email",
"like",
`%${search_query}%`,
);
this.where('user.name', 'like', '%' + search_query + '%')
.orWhere('user.email', 'like', '%' + search_query + '%');
});
}
return query;
})
.then((row) => {
return Number.parseInt(row.count, 10);
return parseInt(row.count, 10);
});
},
@@ -395,28 +311,30 @@ const internalUser = {
* @param {String} [search_query]
* @returns {Promise}
*/
getAll: async (access, expand, search_query) => {
await access.can("users:list");
const query = userModel
.query()
.where("is_deleted", 0)
.groupBy("id")
.allowGraph("[permissions]")
.orderBy("name", "ASC");
getAll: (access, expand, search_query) => {
return access.can('users:list')
.then(() => {
let query = userModel
.query()
.where('is_deleted', 0)
.groupBy('id')
.allowGraph('[permissions]')
.orderBy('name', 'ASC');
// Query is used for searching
if (typeof search_query === "string") {
query.where(function () {
this.where("name", "like", `%${search_query}%`).orWhere("email", "like", `%${search_query}%`);
// Query is used for searching
if (typeof search_query === 'string') {
query.where(function () {
this.where('name', 'like', '%' + search_query + '%')
.orWhere('email', 'like', '%' + search_query + '%');
});
}
if (typeof expand !== 'undefined' && expand !== null) {
query.withGraphFetched('[' + expand.join(', ') + ']');
}
return query.then(utils.omitRows(omissions()));
});
}
if (typeof expand !== "undefined" && expand !== null) {
query.withGraphFetched(`[${expand.join(", ")}]`);
}
const res = await query;
return utils.omitRows(omissions())(res);
},
/**
@@ -424,11 +342,11 @@ const internalUser = {
* @param {Integer} [id_requested]
* @returns {[String]}
*/
getUserOmisionsByAccess: (access, idRequested) => {
getUserOmisionsByAccess: (access, id_requested) => {
let response = []; // Admin response
if (!access.token.hasScope("admin") && access.token.getUserId(0) !== idRequested) {
response = ["is_deleted"]; // Restricted response
if (!access.token.hasScope('admin') && access.token.getUserId(0) !== id_requested) {
response = ['roles', 'is_deleted']; // Restricted response
}
return response;
@@ -443,30 +361,26 @@ const internalUser = {
* @return {Promise}
*/
setPassword: (access, data) => {
return access
.can("users:password", data.id)
return access.can('users:password', data.id)
.then(() => {
return internalUser.get(access, { id: data.id });
return internalUser.get(access, {id: data.id});
})
.then((user) => {
if (user.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`User could not be updated, IDs do not match: ${user.id} !== ${data.id}`,
);
throw new error.InternalValidationError('User could not be updated, IDs do not match: ' + user.id + ' !== ' + data.id);
}
if (user.id === access.token.getUserId(0)) {
// they're setting their own password. Make sure their current password is correct
if (typeof data.current === "undefined" || !data.current) {
throw new errs.ValidationError("Current password was not supplied");
if (typeof data.current === 'undefined' || !data.current) {
throw new error.ValidationError('Current password was not supplied');
}
return internalToken
.getTokenFromEmail({
identity: user.email,
secret: data.current,
})
return internalToken.getTokenFromEmail({
identity: user.email,
secret: data.current
})
.then(() => {
return user;
});
@@ -478,36 +392,43 @@ const internalUser = {
// Get auth, patch if it exists
return authModel
.query()
.where("user_id", user.id)
.andWhere("type", data.type)
.where('user_id', user.id)
.andWhere('type', data.type)
.first()
.then((existing_auth) => {
if (existing_auth) {
// patch
return authModel.query().where("user_id", user.id).andWhere("type", data.type).patch({
type: data.type, // This is required for the model to encrypt on save
secret: data.secret,
});
return authModel
.query()
.where('user_id', user.id)
.andWhere('type', data.type)
.patch({
type: data.type, // This is required for the model to encrypt on save
secret: data.secret
});
} else {
// insert
return authModel
.query()
.insert({
user_id: user.id,
type: data.type,
secret: data.secret,
meta: {}
});
}
// insert
return authModel.query().insert({
user_id: user.id,
type: data.type,
secret: data.secret,
meta: {},
});
})
.then(() => {
// Add to Audit Log
return internalAuditLog.add(access, {
action: "updated",
object_type: "user",
object_id: user.id,
meta: {
name: user.name,
action: 'updated',
object_type: 'user',
object_id: user.id,
meta: {
name: user.name,
password_changed: true,
auth_type: data.type,
},
auth_type: data.type
}
});
});
})
@@ -522,17 +443,14 @@ const internalUser = {
* @return {Promise}
*/
setPermissions: (access, data) => {
return access
.can("users:permissions", data.id)
return access.can('users:permissions', data.id)
.then(() => {
return internalUser.get(access, { id: data.id });
return internalUser.get(access, {id: data.id});
})
.then((user) => {
if (user.id !== data.id) {
// Sanity check that something crazy hasn't happened
throw new errs.InternalValidationError(
`User could not be updated, IDs do not match: ${user.id} !== ${data.id}`,
);
throw new error.InternalValidationError('User could not be updated, IDs do not match: ' + user.id + ' !== ' + data.id);
}
return user;
@@ -541,30 +459,34 @@ const internalUser = {
// Get perms row, patch if it exists
return userPermissionModel
.query()
.where("user_id", user.id)
.where('user_id', user.id)
.first()
.then((existing_auth) => {
if (existing_auth) {
// patch
return userPermissionModel
.query()
.where("user_id", user.id)
.patchAndFetchById(existing_auth.id, _.assign({ user_id: user.id }, data));
.where('user_id', user.id)
.patchAndFetchById(existing_auth.id, _.assign({user_id: user.id}, data));
} else {
// insert
return userPermissionModel
.query()
.insertAndFetch(_.assign({user_id: user.id}, data));
}
// insert
return userPermissionModel.query().insertAndFetch(_.assign({ user_id: user.id }, data));
})
.then((permissions) => {
// Add to Audit Log
return internalAuditLog.add(access, {
action: "updated",
object_type: "user",
object_id: user.id,
meta: {
name: user.name,
permissions: permissions,
},
action: 'updated',
object_type: 'user',
object_id: user.id,
meta: {
name: user.name,
permissions: permissions
}
});
});
})
.then(() => {
@@ -578,15 +500,14 @@ const internalUser = {
* @param {Integer} data.id
*/
loginAs: (access, data) => {
return access
.can("users:loginas", data.id)
return access.can('users:loginas', data.id)
.then(() => {
return internalUser.get(access, data);
})
.then((user) => {
return internalToken.getTokenFromUser(user);
});
},
}
};
export default internalUser;
module.exports = internalUser;

View File

@@ -1,19 +1,19 @@
module.exports = {
development: {
client: "mysql2",
client: 'mysql',
migrations: {
tableName: "migrations",
stub: "lib/migrate_template.js",
directory: "migrations",
},
tableName: 'migrations',
stub: 'lib/migrate_template.js',
directory: 'migrations'
}
},
production: {
client: "mysql2",
client: 'mysql',
migrations: {
tableName: "migrations",
stub: "lib/migrate_template.js",
directory: "migrations",
},
},
tableName: 'migrations',
stub: 'lib/migrate_template.js',
directory: 'migrations'
}
}
};

View File

@@ -4,90 +4,91 @@
* "scope" in this file means "where did this token come from and what is using it", so 99% of the time
* the "scope" is going to be "user" because it would be a user token. This is not to be confused with
* the "role" which could be "user" or "admin". The scope in fact, could be "worker" or anything else.
*
*
*/
import fs from "node:fs";
import { dirname } from "node:path";
import { fileURLToPath } from "node:url";
import Ajv from "ajv/dist/2020.js";
import _ from "lodash";
import { access as logger } from "../logger.js";
import proxyHostModel from "../models/proxy_host.js";
import TokenModel from "../models/token.js";
import userModel from "../models/user.js";
import permsSchema from "./access/permissions.json" with { type: "json" };
import roleSchema from "./access/roles.json" with { type: "json" };
import errs from "./error.js";
const _ = require('lodash');
const logger = require('../logger').access;
const validator = require('ajv');
const error = require('./error');
const userModel = require('../models/user');
const proxyHostModel = require('../models/proxy_host');
const TokenModel = require('../models/token');
const roleSchema = require('./access/roles.json');
const permsSchema = require('./access/permissions.json');
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
export default function (tokenString) {
const Token = TokenModel();
let tokenData = null;
let initialised = false;
const objectCache = {};
let allowInternalAccess = false;
let userRoles = [];
let permissions = {};
module.exports = function (token_string) {
let Token = new TokenModel();
let token_data = null;
let initialized = false;
let object_cache = {};
let allow_internal_access = false;
let user_roles = [];
let permissions = {};
/**
* Loads the Token object from the token string
*
* @returns {Promise}
*/
this.init = async () => {
if (initialised) {
return;
}
if (!tokenString) {
throw new errs.PermissionError("Permission Denied");
}
tokenData = await Token.load(tokenString);
// At this point we need to load the user from the DB and make sure they:
// - exist (and not soft deleted)
// - still have the appropriate scopes for this token
// This is only required when the User ID is supplied or if the token scope has `user`
if (
tokenData.attrs.id ||
(typeof tokenData.scope !== "undefined" && _.indexOf(tokenData.scope, "user") !== -1)
) {
// Has token user id or token user scope
const user = await userModel
.query()
.where("id", tokenData.attrs.id)
.andWhere("is_deleted", 0)
.andWhere("is_disabled", 0)
.allowGraph("[permissions]")
.withGraphFetched("[permissions]")
.first();
if (user) {
// make sure user has all scopes of the token
// The `user` role is not added against the user row, so we have to just add it here to get past this check.
user.roles.push("user");
let ok = true;
_.forEach(tokenData.scope, (scope_item) => {
if (_.indexOf(user.roles, scope_item) === -1) {
ok = false;
}
});
if (!ok) {
throw new errs.AuthError("Invalid token scope for User");
}
initialised = true;
userRoles = user.roles;
permissions = user.permissions;
this.init = () => {
return new Promise((resolve, reject) => {
if (initialized) {
resolve();
} else if (!token_string) {
reject(new error.PermissionError('Permission Denied'));
} else {
throw new errs.AuthError("User cannot be loaded for Token");
resolve(Token.load(token_string)
.then((data) => {
token_data = data;
// At this point we need to load the user from the DB and make sure they:
// - exist (and not soft deleted)
// - still have the appropriate scopes for this token
// This is only required when the User ID is supplied or if the token scope has `user`
if (token_data.attrs.id || (typeof token_data.scope !== 'undefined' && _.indexOf(token_data.scope, 'user') !== -1)) {
// Has token user id or token user scope
return userModel
.query()
.where('id', token_data.attrs.id)
.andWhere('is_deleted', 0)
.andWhere('is_disabled', 0)
.allowGraph('[permissions]')
.withGraphFetched('[permissions]')
.first()
.then((user) => {
if (user) {
// make sure user has all scopes of the token
// The `user` role is not added against the user row, so we have to just add it here to get past this check.
user.roles.push('user');
let is_ok = true;
_.forEach(token_data.scope, (scope_item) => {
if (_.indexOf(user.roles, scope_item) === -1) {
is_ok = false;
}
});
if (!is_ok) {
throw new error.AuthError('Invalid token scope for User');
} else {
initialized = true;
user_roles = user.roles;
permissions = user.permissions;
}
} else {
throw new error.AuthError('User cannot be loaded for Token');
}
});
} else {
initialized = true;
}
}));
}
}
initialised = true;
});
};
/**
@@ -95,118 +96,141 @@ export default function (tokenString) {
* This only applies to USER token scopes, as all other tokens are not really bound
* by object scopes
*
* @param {String} objectType
* @param {String} object_type
* @returns {Promise}
*/
this.loadObjects = async (objectType) => {
let objects = null;
this.loadObjects = (object_type) => {
return new Promise((resolve, reject) => {
if (Token.hasScope('user')) {
if (typeof token_data.attrs.id === 'undefined' || !token_data.attrs.id) {
reject(new error.AuthError('User Token supplied without a User ID'));
} else {
let token_user_id = token_data.attrs.id ? token_data.attrs.id : 0;
let query;
if (Token.hasScope("user")) {
if (typeof tokenData.attrs.id === "undefined" || !tokenData.attrs.id) {
throw new errs.AuthError("User Token supplied without a User ID");
}
if (typeof object_cache[object_type] === 'undefined') {
switch (object_type) {
const tokenUserId = tokenData.attrs.id ? tokenData.attrs.id : 0;
// USERS - should only return yourself
case 'users':
resolve(token_user_id ? [token_user_id] : []);
break;
if (typeof objectCache[objectType] !== "undefined") {
objects = objectCache[objectType];
} else {
switch (objectType) {
// USERS - should only return yourself
case "users":
objects = tokenUserId ? [tokenUserId] : [];
break;
// Proxy Hosts
case 'proxy_hosts':
query = proxyHostModel
.query()
.select('id')
.andWhere('is_deleted', 0);
// Proxy Hosts
case "proxy_hosts": {
const query = proxyHostModel.query().select("id").andWhere("is_deleted", 0);
if (permissions.visibility === 'user') {
query.andWhere('owner_user_id', token_user_id);
}
if (permissions.visibility === "user") {
query.andWhere("owner_user_id", tokenUserId);
resolve(query
.then((rows) => {
let result = [];
_.forEach(rows, (rule_row) => {
result.push(rule_row.id);
});
// enum should not have less than 1 item
if (!result.length) {
result.push(0);
}
return result;
})
);
break;
// DEFAULT: null
default:
resolve(null);
break;
}
const rows = await query;
objects = [];
_.forEach(rows, (ruleRow) => {
objects.push(ruleRow.id);
});
// enum should not have less than 1 item
if (!objects.length) {
objects.push(0);
}
break;
} else {
resolve(object_cache[object_type]);
}
}
objectCache[objectType] = objects;
} else {
resolve(null);
}
}
return objects;
})
.then((objects) => {
object_cache[object_type] = objects;
return objects;
});
};
/**
* Creates a schema object on the fly with the IDs and other values required to be checked against the permissionSchema
*
* @param {String} permissionLabel
* @param {String} permission_label
* @returns {Object}
*/
this.getObjectSchema = async (permissionLabel) => {
const baseObjectType = permissionLabel.split(":").shift();
this.getObjectSchema = (permission_label) => {
let base_object_type = permission_label.split(':').shift();
const schema = {
$id: "objects",
description: "Actor Properties",
type: "object",
let schema = {
$id: 'objects',
$schema: 'http://json-schema.org/draft-07/schema#',
description: 'Actor Properties',
type: 'object',
additionalProperties: false,
properties: {
properties: {
user_id: {
anyOf: [
{
type: "number",
enum: [Token.get("attrs").id],
},
],
type: 'number',
enum: [Token.get('attrs').id]
}
]
},
scope: {
type: "string",
pattern: `^${Token.get("scope")}$`,
},
},
type: 'string',
pattern: '^' + Token.get('scope') + '$'
}
}
};
const result = await this.loadObjects(baseObjectType);
if (typeof result === "object" && result !== null) {
schema.properties[baseObjectType] = {
type: "number",
enum: result,
minimum: 1,
};
} else {
schema.properties[baseObjectType] = {
type: "number",
minimum: 1,
};
}
return this.loadObjects(base_object_type)
.then((object_result) => {
if (typeof object_result === 'object' && object_result !== null) {
schema.properties[base_object_type] = {
type: 'number',
enum: object_result,
minimum: 1
};
} else {
schema.properties[base_object_type] = {
type: 'number',
minimum: 1
};
}
return schema;
return schema;
});
};
// here:
return {
token: Token,
/**
*
* @param {Boolean} [allowInternal]
* @param {Boolean} [allow_internal]
* @returns {Promise}
*/
load: async (allowInternal) => {
if (tokenString) {
return await Token.load(tokenString);
}
allowInternalAccess = allowInternal;
return allowInternal || null;
load: (allow_internal) => {
return new Promise(function (resolve/*, reject*/) {
if (token_string) {
resolve(Token.load(token_string));
} else {
allow_internal_access = allow_internal;
resolve(allow_internal_access || null);
}
});
},
reloadObjects: this.loadObjects,
@@ -217,59 +241,74 @@ export default function (tokenString) {
* @param {*} [data]
* @returns {Promise}
*/
can: async (permission, data) => {
if (allowInternalAccess === true) {
return true;
can: (permission, data) => {
if (allow_internal_access === true) {
return Promise.resolve(true);
//return true;
} else {
return this.init()
.then(() => {
// initialized, token decoded ok
return this.getObjectSchema(permission)
.then((objectSchema) => {
let data_schema = {
[permission]: {
data: data,
scope: Token.get('scope'),
roles: user_roles,
permission_visibility: permissions.visibility,
permission_proxy_hosts: permissions.proxy_hosts,
permission_redirection_hosts: permissions.redirection_hosts,
permission_dead_hosts: permissions.dead_hosts,
permission_streams: permissions.streams,
permission_access_lists: permissions.access_lists,
permission_certificates: permissions.certificates
}
};
let permissionSchema = {
$schema: 'http://json-schema.org/draft-07/schema#',
$async: true,
$id: 'permissions',
additionalProperties: false,
properties: {}
};
permissionSchema.properties[permission] = require('./access/' + permission.replace(/:/gim, '-') + '.json');
// logger.info('objectSchema', JSON.stringify(objectSchema, null, 2));
// logger.info('permissionSchema', JSON.stringify(permissionSchema, null, 2));
// logger.info('data_schema', JSON.stringify(data_schema, null, 2));
let ajv = validator({
verbose: true,
allErrors: true,
format: 'full',
missingRefs: 'fail',
breakOnError: true,
coerceTypes: true,
schemas: [
roleSchema,
permsSchema,
objectSchema,
permissionSchema
]
});
return ajv.validate('permissions', data_schema)
.then(() => {
return data_schema[permission];
});
});
})
.catch((err) => {
err.permission = permission;
err.permission_data = data;
logger.error(permission, data, err.message);
throw new error.PermissionError('Permission Denied', err);
});
}
try {
await this.init();
const objectSchema = await this.getObjectSchema(permission);
const dataSchema = {
[permission]: {
data: data,
scope: Token.get("scope"),
roles: userRoles,
permission_visibility: permissions.visibility,
permission_proxy_hosts: permissions.proxy_hosts,
permission_redirection_hosts: permissions.redirection_hosts,
permission_dead_hosts: permissions.dead_hosts,
permission_streams: permissions.streams,
permission_access_lists: permissions.access_lists,
permission_certificates: permissions.certificates,
},
};
const permissionSchema = {
$async: true,
$id: "permissions",
type: "object",
additionalProperties: false,
properties: {},
};
const rawData = fs.readFileSync(`${__dirname}/access/${permission.replace(/:/gim, "-")}.json`, {
encoding: "utf8",
});
permissionSchema.properties[permission] = JSON.parse(rawData);
const ajv = new Ajv({
verbose: true,
allErrors: true,
breakOnError: true,
coerceTypes: true,
schemas: [roleSchema, permsSchema, objectSchema, permissionSchema],
});
const valid = await ajv.validate("permissions", dataSchema);
return valid && dataSchema[permission];
} catch (err) {
err.permission = permission;
err.permission_data = data;
logger.error(permission, data, err.message);
throw errs.PermissionError("Permission Denied", err);
}
},
}
};
}
};

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"required": ["permission_access_lists", "roles"],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"required": ["permission_access_lists", "roles"],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"required": ["permission_access_lists", "roles"],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"required": ["permission_access_lists", "roles"],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_access_lists",
"roles"
],
"required": ["permission_access_lists", "roles"],
"properties": {
"permission_access_lists": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"required": ["permission_certificates", "roles"],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"required": ["permission_certificates", "roles"],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"required": ["permission_certificates", "roles"],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"required": ["permission_certificates", "roles"],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_certificates",
"roles"
],
"required": ["permission_certificates", "roles"],
"properties": {
"permission_certificates": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"required": ["permission_dead_hosts", "roles"],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"required": ["permission_dead_hosts", "roles"],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"required": ["permission_dead_hosts", "roles"],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"required": ["permission_dead_hosts", "roles"],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_dead_hosts",
"roles"
],
"required": ["permission_dead_hosts", "roles"],
"properties": {
"permission_dead_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -1,4 +1,5 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "perms",
"definitions": {
"view": {

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"required": ["permission_proxy_hosts", "roles"],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"required": ["permission_proxy_hosts", "roles"],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"required": ["permission_proxy_hosts", "roles"],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"required": ["permission_proxy_hosts", "roles"],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_proxy_hosts",
"roles"
],
"required": ["permission_proxy_hosts", "roles"],
"properties": {
"permission_proxy_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"required": ["permission_redirection_hosts", "roles"],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"required": ["permission_redirection_hosts", "roles"],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"required": ["permission_redirection_hosts", "roles"],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"required": ["permission_redirection_hosts", "roles"],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_redirection_hosts",
"roles"
],
"required": ["permission_redirection_hosts", "roles"],
"properties": {
"permission_redirection_hosts": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -1,12 +1,10 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "roles",
"definitions": {
"admin": {
"type": "object",
"required": [
"scope",
"roles"
],
"required": ["scope", "roles"],
"properties": {
"scope": {
"type": "array",
@@ -26,9 +24,7 @@
},
"user": {
"type": "object",
"required": [
"scope"
],
"required": ["scope"],
"properties": {
"scope": {
"type": "array",

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"required": ["permission_streams", "roles"],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"required": ["permission_streams", "roles"],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"required": ["permission_streams", "roles"],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"required": ["permission_streams", "roles"],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/view"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"permission_streams",
"roles"
],
"required": ["permission_streams", "roles"],
"properties": {
"permission_streams": {
"$ref": "perms#/definitions/manage"
@@ -17,9 +14,7 @@
"type": "array",
"items": {
"type": "string",
"enum": [
"user"
]
"enum": ["user"]
}
}
}

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"data",
"scope"
],
"required": ["data", "scope"],
"properties": {
"data": {
"$ref": "objects#/properties/users"

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"data",
"scope"
],
"required": ["data", "scope"],
"properties": {
"data": {
"$ref": "objects#/properties/users"

View File

@@ -5,10 +5,7 @@
},
{
"type": "object",
"required": [
"data",
"scope"
],
"required": ["data", "scope"],
"properties": {
"data": {
"$ref": "objects#/properties/users"

View File

@@ -1,58 +0,0 @@
import dnsPlugins from "../certbot/dns-plugins.json" with { type: "json" };
import { certbot as logger } from "../logger.js";
import errs from "./error.js";
import utils from "./utils.js";
/**
* Installs a cerbot plugin given the key for the object from
* ../certbot/dns-plugins.json
*
* @param {string} pluginKey
* @returns {Object}
*/
const installPlugin = async (pluginKey) => {
if (typeof dnsPlugins[pluginKey] === "undefined") {
// throw Error(`Certbot plugin ${pluginKey} not found`);
throw new errs.ItemNotFoundError(pluginKey);
}
const plugin = dnsPlugins[pluginKey];
logger.start(`Installing ${pluginKey}...`);
return utils
.execFile("pip", ["install", "--upgrade", "--no-cache-dir", plugin.package_name])
.then((result) => {
logger.complete(`Installed ${pluginKey}`);
return result;
})
.catch((err) => {
throw err;
});
};
/**
* @param {array} pluginKeys
*/
const installPlugins = async (pluginKeys) => {
if (pluginKeys.length === 0) {
return;
}
let hasErrors = false;
for (const pluginKey of pluginKeys) {
try {
await installPlugin(pluginKey);
} catch (err) {
hasErrors = true;
logger.error(err.message);
break;
}
}
if (hasErrors) {
throw new errs.CommandError("Some plugins failed to install. Please check the logs above", 1);
}
};
export { installPlugins, installPlugin };

View File

@@ -1,82 +1,66 @@
import fs from "node:fs";
import crypto from "node:crypto";
import { global as logger } from "../logger.js";
const fs = require('fs');
const NodeRSA = require('node-rsa');
const logger = require('../logger').global;
const keysFile = "/data/npmplus/keys.json";
const sqliteEngine = "better-sqlite3";
const mysqlEngine = "mysql2";
const postgresEngine = "pg";
const keysFile = '/data/etc/npm/keys.json';
let instance = null;
// 1. Load from config file first (not recommended anymore)
// 2. Use config env variables next
const configure = () => {
const toBool = (v) => /^(1|true|yes|on)$/i.test((v || "").trim());
const filename = (process.env.NODE_CONFIG_DIR || '/data/etc/npm') + '/' + (process.env.NODE_ENV || 'default') + '.json';
if (fs.existsSync(filename)) {
let configData;
try {
configData = require(filename);
} catch (err) {
// do nothing
}
if (configData && configData.database) {
logger.info(`Using configuration from file: ${filename}`);
instance = configData;
instance.keys = getKeys();
return;
}
}
const envMysqlHost = process.env.DB_MYSQL_HOST || null;
const envMysqlUser = process.env.DB_MYSQL_USER || null;
const envMysqlName = process.env.DB_MYSQL_NAME || null;
const envMysqlSSL = toBool(process.env.DB_MYSQL_SSL);
const envMysqlSSLRejectUnauthorized =
process.env.DB_MYSQL_SSL_REJECT_UNAUTHORIZED === undefined
? true
: toBool(process.env.DB_MYSQL_SSL_REJECT_UNAUTHORIZED);
const envMysqlSSLVerifyIdentity =
process.env.DB_MYSQL_SSL_VERIFY_IDENTITY === undefined
? true
: toBool(process.env.DB_MYSQL_SSL_VERIFY_IDENTITY);
const envMysqlTls = process.env.DB_MYSQL_TLS || null;
const envMysqlCa = process.env.DB_MYSQL_CA || '/etc/ssl/certs/ca-certificates.crt';
if (envMysqlHost && envMysqlUser && envMysqlName) {
// we have enough mysql creds to go with mysql
logger.info("Using MySQL configuration");
logger.info('Using MySQL configuration');
instance = {
database: {
engine: mysqlEngine,
host: envMysqlHost,
port: process.env.DB_MYSQL_PORT || 3306,
user: envMysqlUser,
engine: 'mysql',
host: envMysqlHost,
port: process.env.DB_MYSQL_PORT || 3306,
user: envMysqlUser,
password: process.env.DB_MYSQL_PASSWORD,
name: envMysqlName,
ssl: envMysqlSSL
? { rejectUnauthorized: envMysqlSSLRejectUnauthorized, verifyIdentity: envMysqlSSLVerifyIdentity }
: false,
name: envMysqlName,
ssl: envMysqlTls ? { ca: fs.readFileSync(envMysqlCa) } : false,
},
keys: getKeys(),
};
return;
}
const envPostgresHost = process.env.DB_POSTGRES_HOST || null;
const envPostgresUser = process.env.DB_POSTGRES_USER || null;
const envPostgresName = process.env.DB_POSTGRES_NAME || null;
if (envPostgresHost && envPostgresUser && envPostgresName) {
// we have enough postgres creds to go with postgres
logger.info("Using Postgres configuration");
instance = {
database: {
engine: postgresEngine,
host: envPostgresHost,
port: process.env.DB_POSTGRES_PORT || 5432,
user: envPostgresUser,
password: process.env.DB_POSTGRES_PASSWORD,
name: envPostgresName,
},
keys: getKeys(),
};
return;
}
const envSqliteFile = "/data/npmplus/database.sqlite";
const envSqliteFile = process.env.DB_SQLITE_FILE || '/data/etc/npm/database.sqlite';
logger.info(`Using Sqlite: ${envSqliteFile}`);
instance = {
database: {
engine: "knex-native",
knex: {
client: sqliteEngine,
engine: 'knex-native',
knex: {
client: 'sqlite3',
connection: {
filename: envSqliteFile,
filename: envSqliteFile
},
useNullAsDefault: true,
},
useNullAsDefault: true
}
},
keys: getKeys(),
};
@@ -84,138 +68,120 @@ const configure = () => {
const getKeys = () => {
// Get keys from file
logger.info("Checking for keys file:", keysFile);
if (!fs.existsSync(keysFile)) {
generateKeys();
} else {
logger.info("Keys file exists OK");
} else if (process.env.DEBUG) {
logger.info('Keys file exists OK');
}
try {
// Load this json keysFile synchronously and return the json object
const rawData = fs.readFileSync(keysFile);
return JSON.parse(rawData);
return require(keysFile);
} catch (err) {
logger.error(`Could not read JWT key pair from config file: ${keysFile}`, err);
logger.error('Could not read JWT key pair from config file: ' + keysFile, err);
process.exit(1);
}
};
const generateKeys = () => {
logger.info("Creating a new JWT key pair...");
logger.info('Creating a new JWT key pair...');
// Now create the keys and save them in the config.
const { privateKey, publicKey } = crypto.generateKeyPairSync("rsa", {
modulusLength: 2048,
publicKeyEncoding: {
type: "spki",
format: "pem",
},
privateKeyEncoding: {
type: "pkcs8",
format: "pem",
},
});
const key = new NodeRSA({ b: 2048 });
key.generateKeyPair();
const keys = {
key: key.exportKey('private').toString(),
pub: key.exportKey('public').toString(),
};
// Write keys config
try {
fs.writeFileSync(keysFile, JSON.stringify({ key: privateKey, pub: publicKey }, null, 2));
fs.writeFileSync(keysFile, JSON.stringify(keys, null, 2));
} catch (err) {
logger.error(`Could not write JWT key pair to config file: ${keysFile}: ${err.message}`);
logger.error('Could not write JWT key pair to config file: ' + keysFile + ': ' + err.message);
process.exit(1);
}
logger.info(`Wrote JWT key pair to config file: ${keysFile}`);
logger.info('Wrote JWT key pair to config file: ' + keysFile);
};
/**
*
* @param {string} key ie: 'database' or 'database.engine'
* @returns {boolean}
*/
const configHas = (key) => {
instance === null && configure();
const keys = key.split(".");
let level = instance;
let has = true;
keys.forEach((keyItem) => {
if (typeof level[keyItem] === "undefined") {
has = false;
} else {
level = level[keyItem];
module.exports = {
/**
*
* @param {string} key ie: 'database' or 'database.engine'
* @returns {boolean}
*/
has: function(key) {
instance === null && configure();
const keys = key.split('.');
let level = instance;
let has = true;
keys.forEach((keyItem) =>{
if (typeof level[keyItem] === 'undefined') {
has = false;
} else {
level = level[keyItem];
}
});
return has;
},
/**
* Gets a specific key from the top level
*
* @param {string} key
* @returns {*}
*/
get: function (key) {
instance === null && configure();
if (key && typeof instance[key] !== 'undefined') {
return instance[key];
}
});
return instance;
},
return has;
};
/**
* Is this a sqlite configuration?
*
* @returns {boolean}
*/
isSqlite: function () {
instance === null && configure();
return instance.database.knex && instance.database.knex.client === 'sqlite3';
},
/**
* Gets a specific key from the top level
*
* @param {string} key
* @returns {*}
*/
const configGet = (key) => {
instance === null && configure();
if (key && typeof instance[key] !== "undefined") {
return instance[key];
/**
* Are we running in debug mdoe?
*
* @returns {boolean}
*/
debug: function () {
return !!process.env.DEBUG;
},
/**
* Returns a public key
*
* @returns {string}
*/
getPublicKey: function () {
instance === null && configure();
return instance.keys.pub;
},
/**
* Returns a private key
*
* @returns {string}
*/
getPrivateKey: function () {
instance === null && configure();
return instance.keys.key;
},
/**
* @returns {boolean}
*/
useLetsencryptStaging: function () {
return !!process.env.LE_STAGING;
}
return instance;
};
/**
* Is this a sqlite configuration?
*
* @returns {boolean}
*/
const isSqlite = () => {
instance === null && configure();
return instance.database.knex && instance.database.knex.client === sqliteEngine;
};
/**
* Is this a mysql configuration?
*
* @returns {boolean}
*/
const isMysql = () => {
instance === null && configure();
return instance.database.engine === mysqlEngine;
};
/**
* Is this a postgres configuration?
*
* @returns {boolean}
*/
const isPostgres = () => {
instance === null && configure();
return instance.database.engine === postgresEngine;
};
/**
* Are we running in CI?
*
* @returns {boolean}
*/
const isCI = () => process.env.CI === "true" && process.env.DEBUG === "true";
/**
* Returns a public key
*
* @returns {string}
*/
const getPublicKey = () => {
instance === null && configure();
return instance.keys.pub;
};
/**
* Returns a private key
*
* @returns {string}
*/
const getPrivateKey = () => {
instance === null && configure();
return instance.keys.key;
};
export { isCI, configHas, configGet, isSqlite, isMysql, isPostgres, getPrivateKey, getPublicKey };

View File

@@ -1,103 +1,90 @@
import _ from "lodash";
const _ = require('lodash');
const util = require('util');
const errs = {
PermissionError: function (_, previous) {
module.exports = {
PermissionError: function (message, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.name = this.constructor.name;
this.previous = previous;
this.message = "Permission Denied";
this.public = true;
this.status = 403;
this.message = 'Permission Denied';
this.public = true;
this.status = 403;
},
ItemNotFoundError: function (id, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.name = this.constructor.name;
this.previous = previous;
this.message = "Not Found";
if (id) {
this.message = `Not Found - ${id}`;
}
this.public = true;
this.status = 404;
this.message = 'Item Not Found - ' + id;
this.public = true;
this.status = 404;
},
AuthError: function (message, messageI18n, previous) {
AuthError: function (message, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.name = this.constructor.name;
this.previous = previous;
this.message = message;
this.message_i18n = messageI18n;
this.public = true;
this.status = 400;
this.message = message;
this.public = true;
this.status = 401;
},
InternalError: function (message, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.name = this.constructor.name;
this.previous = previous;
this.message = message;
this.status = 500;
this.public = false;
this.message = message;
this.status = 500;
this.public = false;
},
InternalValidationError: function (message, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.name = this.constructor.name;
this.previous = previous;
this.message = message;
this.status = 400;
this.public = false;
this.message = message;
this.status = 400;
this.public = false;
},
ConfigurationError: function (message, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.name = this.constructor.name;
this.previous = previous;
this.message = message;
this.status = 400;
this.public = true;
this.message = message;
this.status = 400;
this.public = true;
},
CacheError: function (message, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.message = message;
this.name = this.constructor.name;
this.message = message;
this.previous = previous;
this.status = 500;
this.public = false;
this.status = 500;
this.public = false;
},
ValidationError: function (message, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.name = this.constructor.name;
this.previous = previous;
this.message = message;
this.public = true;
this.status = 400;
this.message = message;
this.public = true;
this.status = 400;
},
AssertionFailedError: function (message, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.name = this.constructor.name;
this.previous = previous;
this.message = message;
this.public = false;
this.status = 400;
},
CommandError: function (stdErr, code, previous) {
Error.captureStackTrace(this, this.constructor);
this.name = this.constructor.name;
this.previous = previous;
this.message = stdErr;
this.code = code;
this.public = false;
},
this.message = message;
this.public = false;
this.status = 400;
}
};
_.forEach(errs, (err) => {
err.prototype = Object.create(Error.prototype);
_.forEach(module.exports, function (error) {
util.inherits(error, Error);
});
export default errs;

View File

@@ -0,0 +1,40 @@
const validator = require('../validator');
module.exports = function (req, res, next) {
if (req.headers.origin) {
const originSchema = {
oneOf: [
{
type: 'string',
pattern: '^[a-z\\-]+:\\/\\/(?:[\\w\\-\\.]+(:[0-9]+)?/?)?$'
},
{
type: 'string',
pattern: '^[a-z\\-]+:\\/\\/(?:\\[([a-z0-9]{0,4}\\:?)+\\])?/?(:[0-9]+)?$'
}
]
};
// very relaxed validation....
validator(originSchema, req.headers.origin)
.then(function () {
res.set({
'Access-Control-Allow-Origin': req.headers.origin,
'Access-Control-Allow-Credentials': true,
'Access-Control-Allow-Methods': 'OPTIONS, GET, POST',
'Access-Control-Allow-Headers': 'Content-Type, Cache-Control, Pragma, Expires, Authorization, X-Dataset-Total, X-Dataset-Offset, X-Dataset-Limit',
'Access-Control-Max-Age': 5 * 60,
'Access-Control-Expose-Headers': 'X-Dataset-Total, X-Dataset-Offset, X-Dataset-Limit'
});
next();
})
.catch(next);
} else {
// No origin
next();
}
};

View File

@@ -1,30 +1,15 @@
import Access from "../access.js";
const Access = require('../access');
export default () => {
return async (req, res, next) => {
const token = req.cookies?.token || null;
//if (!token) {
// return res.status(401).json({
// error: {
// message: "Missing token",
// },
// });
//}
try {
res.locals.access = null;
const access = new Access(token);
await access.load();
res.locals.access = access;
next();
} catch {
res.clearCookie("token", { path: "/api" });
return res.status(403).json({
error: {
message: "Invalid or expired token",
},
});
}
module.exports = () => {
return function (req, res, next) {
res.locals.access = null;
let access = new Access(res.locals.token || null);
access.load()
.then(() => {
res.locals.access = access;
next();
})
.catch(next);
};
};

View File

@@ -0,0 +1,13 @@
module.exports = function () {
return function (req, res, next) {
if (req.headers.authorization) {
let parts = req.headers.authorization.split(' ');
if (parts && parts[0] === 'Bearer' && parts[1]) {
res.locals.token = parts[1];
}
}
next();
};
};

View File

@@ -1,6 +1,7 @@
import _ from "lodash";
let _ = require('lodash');
module.exports = function (default_sort, default_offset, default_limit, max_limit) {
export default (default_sort, default_offset, default_limit, max_limit) => {
/**
* This will setup the req query params with filtered data and defaults
*
@@ -10,35 +11,34 @@ export default (default_sort, default_offset, default_limit, max_limit) => {
*
*/
return (req, _res, next) => {
req.query.offset =
typeof req.query.limit === "undefined" ? default_offset || 0 : Number.parseInt(req.query.offset, 10);
req.query.limit =
typeof req.query.limit === "undefined" ? default_limit || 50 : Number.parseInt(req.query.limit, 10);
return function (req, res, next) {
req.query.offset = typeof req.query.limit === 'undefined' ? default_offset || 0 : parseInt(req.query.offset, 10);
req.query.limit = typeof req.query.limit === 'undefined' ? default_limit || 50 : parseInt(req.query.limit, 10);
if (max_limit && req.query.limit > max_limit) {
req.query.limit = max_limit;
}
// Sorting
let sort = typeof req.query.sort === "undefined" ? default_sort : req.query.sort;
const myRegexp = /.*\.(asc|desc)$/gi;
const sort_array = [];
let sort = typeof req.query.sort === 'undefined' ? default_sort : req.query.sort;
let myRegexp = /.*\.(asc|desc)$/ig;
let sort_array = [];
sort = sort.split(",");
_.map(sort, (val) => {
const matches = myRegexp.exec(val);
sort = sort.split(',');
_.map(sort, function (val) {
let matches = myRegexp.exec(val);
if (matches !== null) {
const dir = matches[1];
let dir = matches[1];
sort_array.push({
field: val.substr(0, val.length - (dir.length + 1)),
dir: dir.toLowerCase(),
dir: dir.toLowerCase()
});
} else {
sort_array.push({
field: val,
dir: "asc",
dir: 'asc'
});
}
});

View File

@@ -1,8 +1,9 @@
export default (req, res, next) => {
if (req.params.user_id === "me" && res.locals.access) {
req.params.user_id = res.locals.access.token.get("attrs").id;
module.exports = (req, res, next) => {
if (req.params.user_id === 'me' && res.locals.access) {
req.params.user_id = res.locals.access.token.get('attrs').id;
} else {
req.params.user_id = Number.parseInt(req.params.user_id, 10);
req.params.user_id = parseInt(req.params.user_id, 10);
}
next();
};

View File

@@ -1,58 +1,32 @@
import dayjs from "dayjs";
import { ref } from "objection";
import { isPostgres } from "./config.js";
const moment = require('moment');
/**
* Takes an expression such as 30d and returns a dayjs object of that date in future
*
* Key Shorthand
* ==================
* years y
* quarters Q
* months M
* weeks w
* days d
* hours h
* minutes m
* seconds s
* milliseconds ms
*
* @param {String} expression
* @returns {Object}
*/
const parseDatePeriod = (expression) => {
const matches = expression.match(/^([0-9]+)(y|Q|M|w|d|h|m|s|ms)$/m);
if (matches) {
return dayjs().add(matches[1], matches[2]);
module.exports = {
/**
* Takes an expression such as 30d and returns a moment object of that date in future
*
* Key Shorthand
* ==================
* years y
* quarters Q
* months M
* weeks w
* days d
* hours h
* minutes m
* seconds s
* milliseconds ms
*
* @param {String} expression
* @returns {Object}
*/
parseDatePeriod: function (expression) {
let matches = expression.match(/^([0-9]+)(y|Q|M|w|d|h|m|s|ms)$/m);
if (matches) {
return moment().add(matches[1], matches[2]);
}
return null;
}
return null;
};
const convertIntFieldsToBool = (obj, fields) => {
fields.forEach((field) => {
if (typeof obj[field] !== "undefined") {
obj[field] = obj[field] === 1;
}
});
return obj;
};
const convertBoolFieldsToInt = (obj, fields) => {
fields.forEach((field) => {
if (typeof obj[field] !== "undefined") {
obj[field] = obj[field] ? 1 : 0;
}
});
return obj;
};
/**
* Casts a column to json if using postgres
*
* @param {string} colName
* @returns {string|Objection.ReferenceBuilder}
*/
const castJsonIfNeed = (colName) => (isPostgres() ? ref(colName).castText() : colName);
export { parseDatePeriod, convertIntFieldsToBool, convertBoolFieldsToInt, castJsonIfNeed };

View File

@@ -1,34 +1,33 @@
import { migrate as logger } from "../logger.js";
const migrateName = "identifier_for_migrate";
const migrate_name = 'identifier_for_migrate';
const logger = require('../logger').migrate;
/**
* Migrate
*
* @see http://knexjs.org/#Schema
*
* @param {Object} knex
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
const up = (_knex) => {
logger.info(`[${migrateName}] Migrating Up...`);
exports.up = function (knex, Promise) {
logger.info('[' + migrate_name + '] Migrating Up...');
// Create Table example:
/*
return knex.schema.createTable('notification', (table) => {
/*return knex.schema.createTable('notification', (table) => {
table.increments().primary();
table.string('name').notNull();
table.string('type').notNull();
table.integer('created_on').notNull();
table.integer('modified_on').notNull();
})
.then(function () {
logger.info('[' + migrateName + '] Notification Table created');
});
*/
.then(function () {
logger.info('[' + migrate_name + '] Notification Table created');
});*/
logger.info(`[${migrateName}] Migrating Up Complete`);
logger.info('[' + migrate_name + '] Migrating Up Complete');
return Promise.resolve(true);
};
@@ -36,24 +35,21 @@ const up = (_knex) => {
/**
* Undo Migrate
*
* @param {Object} knex
* @param {Object} knex
* @param {Promise} Promise
* @returns {Promise}
*/
const down = (_knex) => {
logger.info(`[${migrateName}] Migrating Down...`);
exports.down = function (knex, Promise) {
logger.info('[' + migrate_name + '] Migrating Down...');
// Drop table example:
/*
return knex.schema.dropTable('notification')
.then(() => {
logger.info(`[${migrateName}] Notification Table dropped`);
});
*/
/*return knex.schema.dropTable('notification')
.then(() => {
logger.info('[' + migrate_name + '] Notification Table dropped');
});*/
logger.info(`[${migrateName}] Migrating Down Complete`);
logger.info('[' + migrate_name + '] Migrating Down Complete');
return Promise.resolve(true);
};
export { up, down };

View File

@@ -1,110 +1,129 @@
import { execFile as nodeExecFile } from "node:child_process";
import { promisify } from "node:util";
import { dirname } from "node:path";
import { fileURLToPath } from "node:url";
import { Liquid } from "liquidjs";
import crypto from "node:crypto";
import fs from "node:fs";
import _ from "lodash";
import { debug, global as logger } from "../logger.js";
import errs from "./error.js";
const _ = require('lodash');
const exec = require('child_process').exec;
const spawn = require('child_process').spawn;
const execFile = require('child_process').execFile;
const { Liquid } = require('liquidjs');
const logger = require('../logger').global;
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
module.exports = {
const nodeExecFilePromises = promisify(nodeExecFile);
/**
* @param {String} cmd
* @returns {Promise}
*/
exec: function (cmd) {
return new Promise((resolve, reject) => {
exec(cmd, function (err, stdout, /*stderr*/) {
if (err && typeof err === 'object') {
reject(err);
} else {
resolve(stdout.trim());
}
});
});
},
const writeHash = () => {
const envVars = fs.readdirSync(`${__dirname}/../templates`).flatMap((file) => {
const content = fs.readFileSync(`${__dirname}/../templates/${file}`, "utf8");
const matches = content.match(/env\.[A-Z0-9_]+/g) || [];
return matches.map((match) => match.replace("env.", ""));
});
const uniqueEnvVars =
[...new Set(envVars)]
.sort()
.map((varName) => process.env[varName])
.join("") + process.env.TV;
const hash = crypto.createHash("sha512").update(uniqueEnvVars).digest("hex");
fs.writeFileSync("/data/npmplus/env.sha512sum", hash);
};
/**
* @param {String} cmd
* @returns {Promise}
*/
execfg: function (cmd) {
return new Promise((resolve, reject) => {
const childProcess = spawn(cmd, {
shell: true,
detached: true,
stdio: 'inherit' // Use the same stdio as the current process
});
/**
* @param {String} cmd
* @param {Array} args
* @returns {Promise}
*/
const execFile = async (cmd, args) => {
debug(logger, `CMD: ${cmd} ${args ? args.join(" ") : ""}`);
childProcess.on('error', (err) => {
reject(err);
});
try {
const { stdout, stderr } = await nodeExecFilePromises(cmd, args);
return `${stdout || ""}${stderr || ""}`.trim();
} catch (err) {
if (err && typeof err === "object") {
throw new errs.CommandError(`${err.stdout || ""}${err.stderr || ""}`.trim(), 1, err);
}
throw err;
childProcess.on('close', (code) => {
if (code !== 0) {
reject(new Error(`Command '${cmd}' exited with code ${code}`));
} else {
resolve();
}
});
});
},
/**
* @param {String} cmd
* @param {Array} args
* @returns {Promise}
*/
execFile: function (cmd, args) {
logger.debug('CMD: ' + cmd + ' ' + (args ? args.join(' ') : ''));
return new Promise((resolve, reject) => {
execFile(cmd, args, function (err, stdout, /*stderr*/) {
if (err && typeof err === 'object') {
reject(err);
} else {
resolve(stdout.trim());
}
});
});
},
/**
* Used in objection query builder
*
* @param {Array} omissions
* @returns {Function}
*/
omitRow: function (omissions) {
/**
* @param {Object} row
* @returns {Object}
*/
return (row) => {
return _.omit(row, omissions);
};
},
/**
* Used in objection query builder
*
* @param {Array} omissions
* @returns {Function}
*/
omitRows: function (omissions) {
/**
* @param {Array} rows
* @returns {Object}
*/
return (rows) => {
rows.forEach((row, idx) => {
rows[idx] = _.omit(row, omissions);
});
return rows;
};
},
/**
* @returns {Object} Liquid render engine
*/
getRenderEngine: function () {
const renderEngine = new Liquid({
root: __dirname + '/../templates/'
});
/**
* nginxAccessRule expects the object given to have 2 properties:
*
* directive string
* address string
*/
renderEngine.registerFilter('nginxAccessRule', (v) => {
if (typeof v.directive !== 'undefined' && typeof v.address !== 'undefined' && v.directive && v.address) {
return `${v.directive} ${v.address};`;
}
return '';
});
return renderEngine;
}
};
/**
* Used in objection query builder
*
* @param {Array} omissions
* @returns {Function}
*/
const omitRow = (omissions) => {
/**
* @param {Object} row
* @returns {Object}
*/
return (row) => {
return _.omit(row, omissions);
};
};
/**
* Used in objection query builder
*
* @param {Array} omissions
* @returns {Function}
*/
const omitRows = (omissions) => {
/**
* @param {Array} rows
* @returns {Object}
*/
return (rows) => {
rows.forEach((row, idx) => {
rows[idx] = _.omit(row, omissions);
});
return rows;
};
};
/**
* @returns {Object} Liquid render engine
*/
const getRenderEngine = () => {
const renderEngine = new Liquid({
root: `${__dirname}/../templates/`,
});
/**
* nginxAccessRule expects the object given to have 2 properties:
*
* directive string
* address string
*/
renderEngine.registerFilter("nginxAccessRule", (v) => {
if (typeof v.directive !== "undefined" && typeof v.address !== "undefined" && v.directive && v.address) {
return `${v.directive} ${v.address};`;
}
return "";
});
return renderEngine;
};
export default { writeHash, execFile, omitRow, omitRows, getRenderEngine };

View File

@@ -1,12 +1,13 @@
import Ajv from "ajv/dist/2020.js";
import errs from "../error.js";
const error = require('../error');
const path = require('path');
const parser = require('@apidevtools/json-schema-ref-parser');
const ajv = new Ajv({
verbose: true,
allErrors: true,
allowUnionTypes: true,
strict: false,
coerceTypes: true,
const ajv = require('ajv')({
verbose: true,
validateSchema: true,
allErrors: false,
format: 'full',
coerceTypes: true
});
/**
@@ -14,28 +15,31 @@ const ajv = new Ajv({
* @param {Object} payload
* @returns {Promise}
*/
const apiValidator = async (schema, payload /*, description*/) => {
if (!schema) {
throw new errs.ValidationError("Schema is undefined");
}
function apiValidator (schema, payload/*, description*/) {
return new Promise(function Promise_apiValidator (resolve, reject) {
if (typeof payload === 'undefined') {
reject(new error.ValidationError('Payload is undefined'));
}
// Can't use falsy check here as valid payload could be `0` or `false`
if (typeof payload === "undefined") {
throw new errs.ValidationError("Payload is undefined");
}
let validate = ajv.compile(schema);
let valid = validate(payload);
const validate = ajv.compile(schema);
if (valid && !validate.errors) {
resolve(payload);
} else {
let message = ajv.errorsText(validate.errors);
let err = new error.ValidationError(message);
err.debug = [validate.errors, payload];
reject(err);
}
});
}
const valid = validate(payload);
apiValidator.loadSchemas = parser
.dereference(path.resolve('schema/index.json'))
.then((schema) => {
ajv.addSchema(schema);
return schema;
});
if (valid && !validate.errors) {
return payload;
}
const message = ajv.errorsText(validate.errors);
const err = new errs.ValidationError(message);
err.debug = { validationErrors: validate.errors, payload };
throw err;
};
export default apiValidator;
module.exports = apiValidator;

Some files were not shown because too many files have changed in this diff Show More