mirror of
https://github.com/kerberos-io/agent.git
synced 2026-03-03 15:19:05 +00:00
Compare commits
287 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ffa97598b8 | ||
|
|
f5afbf3a63 | ||
|
|
e666695c96 | ||
|
|
55816e4b7b | ||
|
|
016fb51951 | ||
|
|
550a444650 | ||
|
|
4332e43f27 | ||
|
|
fdc3bfb4a4 | ||
|
|
c17d6b7117 | ||
|
|
5d7a8103c0 | ||
|
|
5d7cb98b8f | ||
|
|
f6046c6a6c | ||
|
|
f59f9d71a9 | ||
|
|
ff72f9647d | ||
|
|
fa604b16cf | ||
|
|
0342869733 | ||
|
|
8685ce31a2 | ||
|
|
0e259f0e7a | ||
|
|
5823abed95 | ||
|
|
86acff58f0 | ||
|
|
d3fc5d4c29 | ||
|
|
50bb40938c | ||
|
|
1977d98ad9 | ||
|
|
448d4a946d | ||
|
|
61ac314bb7 | ||
|
|
c1b144ca28 | ||
|
|
e16987bf9d | ||
|
|
9991597984 | ||
|
|
2c0314cea4 | ||
|
|
0584e52b98 | ||
|
|
1fc90eaee2 | ||
|
|
aef3eacbc9 | ||
|
|
2843568473 | ||
|
|
53ffc8cae0 | ||
|
|
86e654fe19 | ||
|
|
46d57f7664 | ||
|
|
963d8672eb | ||
|
|
9b7a62816a | ||
|
|
237134fe0e | ||
|
|
c8730e8f26 | ||
|
|
acbbe8b444 | ||
|
|
f690016aa5 | ||
|
|
396cfe5d8b | ||
|
|
39fe640ccf | ||
|
|
d389c9b0b6 | ||
|
|
b149686db8 | ||
|
|
c4358cbfad | ||
|
|
cfc5bd3dfe | ||
|
|
c29c1b6a92 | ||
|
|
0f45a2a4b4 | ||
|
|
92edcc13c0 | ||
|
|
5392e2ba90 | ||
|
|
79e1f659c7 | ||
|
|
bf35e5efb6 | ||
|
|
c50137f255 | ||
|
|
f12da749b2 | ||
|
|
a166083423 | ||
|
|
b400d4e773 | ||
|
|
120054d3e5 | ||
|
|
620117c31b | ||
|
|
4e371488c1 | ||
|
|
b154b56308 | ||
|
|
6d92817237 | ||
|
|
b8c1855830 | ||
|
|
a9f7ff4b72 | ||
|
|
b3cd080e14 | ||
|
|
bfde87f888 | ||
|
|
c4453bb8b3 | ||
|
|
40f65a30b3 | ||
|
|
5361de63e0 | ||
|
|
3a8552d362 | ||
|
|
d3840103fc | ||
|
|
d12a9f0612 | ||
|
|
c0d74f7e09 | ||
|
|
8ebea9e4c5 | ||
|
|
89269caf92 | ||
|
|
0c83170f51 | ||
|
|
6081cb4be9 | ||
|
|
ea1dbb3087 | ||
|
|
0523208d36 | ||
|
|
919f21b48b | ||
|
|
2c1c10a2ac | ||
|
|
7e3320b252 | ||
|
|
35ccac8b65 | ||
|
|
dad8165d11 | ||
|
|
ba54188de2 | ||
|
|
3b440c9905 | ||
|
|
42b98b7f20 | ||
|
|
ba3312b57c | ||
|
|
223ba255e9 | ||
|
|
a1df2be207 | ||
|
|
d7f225ca73 | ||
|
|
b3cfabb5df | ||
|
|
5310dd4550 | ||
|
|
cde7dbb58a | ||
|
|
65e68231c7 | ||
|
|
5502555869 | ||
|
|
ad6e7e752f | ||
|
|
63af4660ef | ||
|
|
24fc340001 | ||
|
|
78d786b69d | ||
|
|
756aeaa0eb | ||
|
|
055fb67d7a | ||
|
|
bee522a6bf | ||
|
|
3fbf59c622 | ||
|
|
abd8b8b605 | ||
|
|
abdad47bf3 | ||
|
|
d2c24edf5d | ||
|
|
22f4a7f119 | ||
|
|
a25d3d32e4 | ||
|
|
ed68c32e04 | ||
|
|
4114b3839a | ||
|
|
3f73c009fd | ||
|
|
02fb70c76e | ||
|
|
aaddcb854d | ||
|
|
e73c7a6ecc | ||
|
|
1dc2202f37 | ||
|
|
ac710ae1f5 | ||
|
|
f5ea82ff03 | ||
|
|
ef52325240 | ||
|
|
354855feb1 | ||
|
|
c4cd25b588 | ||
|
|
dbb870229e | ||
|
|
a66fe8c054 | ||
|
|
2352431c79 | ||
|
|
49bc168812 | ||
|
|
98f1ebf20a | ||
|
|
65feb6d182 | ||
|
|
58555d352f | ||
|
|
839a177cf0 | ||
|
|
404517ec40 | ||
|
|
035bd18bc2 | ||
|
|
8bf7a0d244 | ||
|
|
607d8fd0d1 | ||
|
|
12807e289c | ||
|
|
3a984f1c73 | ||
|
|
b84e34da06 | ||
|
|
541d151570 | ||
|
|
4ad97e1286 | ||
|
|
a80b375e89 | ||
|
|
91cb390f6e | ||
|
|
90780dae28 | ||
|
|
ddb08e90e1 | ||
|
|
0d95026819 | ||
|
|
79db3a9dfe | ||
|
|
9f63ffd540 | ||
|
|
9c7116a462 | ||
|
|
dd9b4d43ac | ||
|
|
aa63eca24c | ||
|
|
6df97171d9 | ||
|
|
56f7d69b3d | ||
|
|
3e2b29284e | ||
|
|
18ceca7510 | ||
|
|
5a08d1f3de | ||
|
|
18af6db00c | ||
|
|
6d170c8dc0 | ||
|
|
9c4c3c654d | ||
|
|
6952e387f4 | ||
|
|
66c9ae5c27 | ||
|
|
0fb7601dcb | ||
|
|
07c6e680d1 | ||
|
|
b972bc3040 | ||
|
|
969d42dbca | ||
|
|
6680df9382 | ||
|
|
8877157db5 | ||
|
|
ac814dc357 | ||
|
|
4fcb12c3a3 | ||
|
|
7bcc30f4b7 | ||
|
|
481f917fcf | ||
|
|
700a32e4c8 | ||
|
|
b5a72d904e | ||
|
|
cf3e491462 | ||
|
|
6068705c07 | ||
|
|
37beaa64d7 | ||
|
|
8c5b03487b | ||
|
|
360ae0c0db | ||
|
|
6aad8b7b35 | ||
|
|
9ce037fdc0 | ||
|
|
0eb77ccd16 | ||
|
|
fb876bd216 | ||
|
|
865aec88fc | ||
|
|
9792bdf494 | ||
|
|
d836e89e7f | ||
|
|
53a52b3594 | ||
|
|
ba6ce25b21 | ||
|
|
8c9e18475f | ||
|
|
4548d5328b | ||
|
|
da870fe890 | ||
|
|
66b660e688 | ||
|
|
08f8ca78d6 | ||
|
|
1e61e99005 | ||
|
|
c272e1ab5c | ||
|
|
5cff11c0af | ||
|
|
28b213779f | ||
|
|
666ff202ad | ||
|
|
9cb3c9753a | ||
|
|
c4577e94b1 | ||
|
|
9756183d3b | ||
|
|
83c65fe3d8 | ||
|
|
e6717c87cd | ||
|
|
5a3c1d6c9d | ||
|
|
81045ea955 | ||
|
|
9f9fe3bd37 | ||
|
|
84f7f844c9 | ||
|
|
4fde419db9 | ||
|
|
78cad6cf06 | ||
|
|
4763e5a92e | ||
|
|
50939ee4ce | ||
|
|
884bc2acc1 | ||
|
|
11fd041fa9 | ||
|
|
a6d5c2b614 | ||
|
|
9e3d705c6f | ||
|
|
1004731903 | ||
|
|
9f2ec91688 | ||
|
|
185135ed94 | ||
|
|
27e7d98c68 | ||
|
|
79f56771e3 | ||
|
|
a7839147d6 | ||
|
|
834d82d532 | ||
|
|
989f2f5943 | ||
|
|
3af1df5b19 | ||
|
|
acf06e6e63 | ||
|
|
3f43e15cc2 | ||
|
|
c14683ec0d | ||
|
|
213aaa5c15 | ||
|
|
9fb00c32d5 | ||
|
|
57ec08066c | ||
|
|
e0c6375261 | ||
|
|
79205abe29 | ||
|
|
24326558d0 | ||
|
|
3f981c0f2f | ||
|
|
b6eb7b8317 | ||
|
|
4267ae6305 | ||
|
|
0cb40bd93a | ||
|
|
d2a8890a43 | ||
|
|
e5a5a5326b | ||
|
|
61febd55c8 | ||
|
|
3eac752654 | ||
|
|
df4f1863fc | ||
|
|
acee2784d3 | ||
|
|
8ecb2f94a9 | ||
|
|
8657baf641 | ||
|
|
13d1948c9f | ||
|
|
8e8d51b719 | ||
|
|
ca2413363e | ||
|
|
b067758915 | ||
|
|
b2b8485b28 | ||
|
|
c69d635431 | ||
|
|
a305ca36ce | ||
|
|
a6a97b09f0 | ||
|
|
4d17a15633 | ||
|
|
5fdb4b712e | ||
|
|
3d39251ac6 | ||
|
|
9e59cd1596 | ||
|
|
0ada943699 | ||
|
|
ecadf7a4db | ||
|
|
413ed12fe2 | ||
|
|
6195fa5b9c | ||
|
|
d31524ae52 | ||
|
|
472a40a5f6 | ||
|
|
fb9de04002 | ||
|
|
3f29d1c46f | ||
|
|
b67a72ba9a | ||
|
|
8fc9bc264d | ||
|
|
b2589f498d | ||
|
|
b1ff5134f2 | ||
|
|
3551d02d50 | ||
|
|
4c413012a4 | ||
|
|
74ea2f6cdd | ||
|
|
2a7d9b62d4 | ||
|
|
21d81b94dd | ||
|
|
091662ff26 | ||
|
|
803e8f55ef | ||
|
|
14d38ecf08 | ||
|
|
34d945055b | ||
|
|
8c44da8233 | ||
|
|
a8b79947ef | ||
|
|
7c653f809d | ||
|
|
49f1603f40 | ||
|
|
b4369ea932 | ||
|
|
83ba7baa4b | ||
|
|
9339ae30fd | ||
|
|
c18f2bd445 | ||
|
|
319876bbb0 | ||
|
|
442ba97c61 | ||
|
|
00e0b0b547 | ||
|
|
145f478249 |
@@ -1,2 +1,26 @@
|
||||
FROM kerberos/devcontainer:0a50dc9
|
||||
LABEL AUTHOR=Kerberos.io
|
||||
FROM mcr.microsoft.com/devcontainers/go:1.24-bookworm
|
||||
|
||||
# Install node environment
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
nodejs \
|
||||
npm \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install ffmpeg
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ffmpeg \
|
||||
libavcodec-extra \
|
||||
libavutil-dev \
|
||||
libavformat-dev \
|
||||
libavfilter-dev \
|
||||
libavdevice-dev \
|
||||
libswscale-dev \
|
||||
libswresample-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
USER vscode
|
||||
|
||||
# Install go swagger
|
||||
RUN go install github.com/swaggo/swag/cmd/swag@latest
|
||||
@@ -1,33 +1,24 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
|
||||
// https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/docker-existing-dockerfile
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||
{
|
||||
"name": "A Dockerfile containing FFmpeg, OpenCV, Go and Yarn",
|
||||
// Sets the run context to one level up instead of the .devcontainer folder.
|
||||
"context": "..",
|
||||
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
|
||||
"dockerFile": "./Dockerfile",
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
"forwardPorts": [
|
||||
3000,
|
||||
80
|
||||
"name": "go:1.24-bookworm",
|
||||
"runArgs": [
|
||||
"--name=agent",
|
||||
"--network=host"
|
||||
],
|
||||
// Uncomment the next line to run commands after the container is created - for example installing curl.
|
||||
"postCreateCommand": "cd ui && yarn install && yarn build && cd ../machinery && go mod download",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers-contrib/features/ansible:1": {}
|
||||
},
|
||||
"dockerFile": "Dockerfile",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"ms-kubernetes-tools.vscode-kubernetes-tools",
|
||||
"GitHub.copilot"
|
||||
"GitHub.copilot",
|
||||
"ms-azuretools.vscode-docker",
|
||||
"mongodb.mongodb-vscode"
|
||||
]
|
||||
}
|
||||
},
|
||||
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
|
||||
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
|
||||
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
|
||||
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
|
||||
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
|
||||
// "remoteUser": "vscode"
|
||||
"forwardPorts": [
|
||||
3000,
|
||||
8080
|
||||
],
|
||||
"postCreateCommand": "cd ui && yarn install && yarn build && cd ../machinery && go mod download"
|
||||
}
|
||||
82
.github/workflows/docker-dev.yml
vendored
82
.github/workflows/docker-dev.yml
vendored
@@ -2,61 +2,57 @@ name: Docker development build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ develop ]
|
||||
branches: [develop]
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
# If contains the keyword "#release" in the commit message.
|
||||
if: ${{ !contains(github.event.head_commit.message, '#release') }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Create new and append to latest manifest
|
||||
run: docker buildx imagetools create -t kerberos/agent-dev:latest kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Create new and append to latest manifest
|
||||
run: docker buildx imagetools create -t kerberos/agent-dev:latest kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
build-other:
|
||||
# If contains the keyword "#release" in the commit message.
|
||||
if: ${{ !contains(github.event.head_commit.message, '#release') }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
#architecture: [arm64, arm/v7, arm/v6]
|
||||
architecture: [arm64, arm/v7]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create --append -t kerberos/agent-dev:latest kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create --append -t kerberos/agent-dev:latest kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
|
||||
54
.github/workflows/docker-nightly.yml
vendored
54
.github/workflows/docker-nightly.yml
vendored
@@ -7,18 +7,16 @@ on:
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
# If contains the keyword "[release]" in the commit message.
|
||||
if: "contains(github.event.head_commit.message, '[release]')"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
run: git clone https://github.com/kerberos-io/agent && cd agent
|
||||
- name: Set up QEMU
|
||||
@@ -28,31 +26,29 @@ jobs:
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: cd agent && docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
run: cd agent && docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: cd agent && docker buildx imagetools create -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
build-other:
|
||||
# If contains the keyword "[release]" in the commit message.
|
||||
if: "contains(github.event.head_commit.message, '[release]')"
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [arm64, arm/v7, arm/v6]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
run: git clone https://github.com/kerberos-io/agent && cd agent
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: cd agent && docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: cd agent && docker buildx imagetools create --append -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
run: git clone https://github.com/kerberos-io/agent && cd agent
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: cd agent && docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: cd agent && docker buildx imagetools create --append -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
|
||||
123
.github/workflows/docker.yml
vendored
123
.github/workflows/docker.yml
vendored
@@ -1,17 +1,19 @@
|
||||
name: Docker master build
|
||||
|
||||
name: Create a new release
|
||||
on:
|
||||
push:
|
||||
# If pushed to master branch.
|
||||
branches: [ master ]
|
||||
release:
|
||||
types: [created]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag for the Docker image"
|
||||
required: true
|
||||
default: "test"
|
||||
|
||||
env:
|
||||
REPO: kerberos/agent
|
||||
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
# If contains the keyword "[release]" in the commit message.
|
||||
if: "contains(github.event.head_commit.message, '[release]')"
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -22,8 +24,8 @@ jobs:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
@@ -37,26 +39,25 @@ jobs:
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}} --push .
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}} --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create -t $REPO:${{ steps.short-sha.outputs.sha }} $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
|
||||
run: docker buildx imagetools create -t $REPO:${{ github.event.inputs.tag || github.ref_name }} $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
|
||||
run: docker buildx imagetools create -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
if: github.event.inputs.tag == 'test'
|
||||
- name: Run Buildx with output
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{steps.short-sha.outputs.sha}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{github.event.inputs.tag || github.ref_name}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
- name: Strip binary
|
||||
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
|
||||
# We'll make a GitHub release and push the build (tar) as an artifact
|
||||
- uses: rickstaa/action-create-tag@v1
|
||||
with:
|
||||
tag: ${{ steps.short-sha.outputs.sha }}
|
||||
message: "Release ${{ steps.short-sha.outputs.sha }}"
|
||||
- name: Create a release
|
||||
- name: Create a release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
latest: true
|
||||
name: ${{ steps.short-sha.outputs.sha }}
|
||||
tag: ${{ steps.short-sha.outputs.sha }}
|
||||
allowUpdates: true
|
||||
name: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
tag: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
generateReleaseNotes: false
|
||||
omitBodyDuringUpdate: true
|
||||
artifacts: "agent-${{matrix.architecture}}.tar"
|
||||
# Taken from GoReleaser's own release workflow.
|
||||
# The available Snapcraft Action has some bugs described in the issue below.
|
||||
@@ -68,10 +69,8 @@ jobs:
|
||||
# mkdir -p $HOME/.cache/snapcraft/download
|
||||
# mkdir -p $HOME/.cache/snapcraft/stage-packages
|
||||
#- name: Use Snapcraft
|
||||
# run: tar -xf agent-${{matrix.architecture}}.tar && snapcraft
|
||||
# run: tar -xf agent-${{matrix.architecture}}.tar && snapcraft
|
||||
build-other:
|
||||
# If contains the keyword "[release]" in the commit message.
|
||||
if: "contains(github.event.head_commit.message, '[release]')"
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -81,39 +80,41 @@ jobs:
|
||||
architecture: [arm64, arm-v7, arm-v6]
|
||||
#architecture: [arm64, arm-v7]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
id: short-sha
|
||||
with:
|
||||
length: 7
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}} --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t $REPO:${{ steps.short-sha.outputs.sha }} $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create --append -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
|
||||
- name: Run Buildx with output
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{steps.short-sha.outputs.sha}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
- name: Strip binary
|
||||
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
|
||||
- name: Create a release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
latest: true
|
||||
allowUpdates: true
|
||||
name: ${{ steps.short-sha.outputs.sha }}
|
||||
tag: ${{ steps.short-sha.outputs.sha }}
|
||||
artifacts: "agent-${{matrix.architecture}}.tar"
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
id: short-sha
|
||||
with:
|
||||
length: 7
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}} --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t $REPO:${{ github.event.inputs.tag || github.ref_name }} $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create --append -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
if: github.event.inputs.tag == 'test'
|
||||
- name: Run Buildx with output
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{github.event.inputs.tag || github.ref_name}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
- name: Strip binary
|
||||
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
|
||||
- name: Create a release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
latest: true
|
||||
allowUpdates: true
|
||||
name: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
tag: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
generateReleaseNotes: false
|
||||
omitBodyDuringUpdate: true
|
||||
artifacts: "agent-${{matrix.architecture}}.tar"
|
||||
|
||||
44
.github/workflows/go.yml
vendored
44
.github/workflows/go.yml
vendored
@@ -2,37 +2,37 @@ name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ develop, master ]
|
||||
branches: [develop, master]
|
||||
pull_request:
|
||||
branches: [ develop, master ]
|
||||
branches: [develop, master]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: kerberos/base:0a50dc9
|
||||
|
||||
image: kerberos/base:eb6b088
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
#No longer supported Go versions.
|
||||
#go-version: ['1.17', '1.18', '1.19']
|
||||
go-version: ['1.20', '1.21']
|
||||
#go-version: ['1.17', '1.18', '1.19', '1.20', '1.21']
|
||||
go-version: ["1.24"]
|
||||
|
||||
steps:
|
||||
- name: Set up Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up git ownershi
|
||||
run: git config --system --add safe.directory /__w/agent/agent
|
||||
- name: Get dependencies
|
||||
run: cd machinery && go mod download
|
||||
- name: Build
|
||||
run: cd machinery && go build -v ./...
|
||||
- name: Vet
|
||||
run: cd machinery && go vet -v ./...
|
||||
- name: Test
|
||||
run: cd machinery && go test -v ./...
|
||||
- name: Set up Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up git ownershi
|
||||
run: git config --system --add safe.directory /__w/agent/agent
|
||||
- name: Get dependencies
|
||||
run: cd machinery && go mod download
|
||||
- name: Build
|
||||
run: cd machinery && go build -v ./...
|
||||
- name: Vet
|
||||
run: cd machinery && go vet -v ./...
|
||||
- name: Test
|
||||
run: cd machinery && go test -v ./...
|
||||
|
||||
19
.github/workflows/pr-description.yaml
vendored
Normal file
19
.github/workflows/pr-description.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Autofill PR description
|
||||
|
||||
on: pull_request
|
||||
|
||||
jobs:
|
||||
openai-pr-description:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Autofill PR description if empty using OpenAI
|
||||
uses: cedricve/azureopenai-pr-description@master
|
||||
with:
|
||||
github_token: ${{ secrets.TOKEN }}
|
||||
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
|
||||
azure_openai_api_key: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
azure_openai_endpoint: ${{ secrets.AZURE_OPENAI_ENDPOINT }}
|
||||
azure_openai_version: ${{ secrets.AZURE_OPENAI_VERSION }}
|
||||
overwrite_description: true
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -10,5 +10,6 @@ machinery/data/recordings
|
||||
machinery/data/snapshots
|
||||
machinery/test*
|
||||
machinery/init-dev.sh
|
||||
machinery/.env
|
||||
machinery/.env.local
|
||||
machinery/vendor
|
||||
deployments/docker/private-docker-compose.yaml
|
||||
19
.travis.yml
19
.travis.yml
@@ -1,19 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
- cd machinery
|
||||
- go mod download
|
||||
|
||||
script:
|
||||
- go vet
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
33
.vscode/launch.json
vendored
Normal file
33
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Golang",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/machinery/main.go",
|
||||
"args": [
|
||||
"-action",
|
||||
"run",
|
||||
"-port",
|
||||
"8080"
|
||||
],
|
||||
"envFile": "${workspaceFolder}/machinery/.env.local",
|
||||
"buildFlags": "--tags dynamic",
|
||||
},
|
||||
{
|
||||
"name": "Launch React",
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/ui",
|
||||
"runtimeExecutable": "yarn",
|
||||
"runtimeArgs": [
|
||||
"start"
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,5 +1,6 @@
|
||||
|
||||
FROM kerberos/base:0a50dc9 AS build-machinery
|
||||
ARG BASE_IMAGE_VERSION=70ec57e
|
||||
FROM kerberos/base:${BASE_IMAGE_VERSION} AS build-machinery
|
||||
LABEL AUTHOR=Kerberos.io
|
||||
|
||||
ENV GOROOT=/usr/local/go
|
||||
@@ -43,8 +44,7 @@ RUN cd /go/src/github.com/kerberos-io/agent/machinery && \
|
||||
mkdir -p /agent/data/log && \
|
||||
mkdir -p /agent/data/recordings && \
|
||||
mkdir -p /agent/data/capture-test && \
|
||||
mkdir -p /agent/data/config && \
|
||||
rm -rf /go/src/gitlab.com/
|
||||
mkdir -p /agent/data/config
|
||||
|
||||
####################################
|
||||
# Let's create a /dist folder containing just the files necessary for runtime.
|
||||
@@ -58,18 +58,6 @@ RUN cp -r /agent ./
|
||||
|
||||
RUN /dist/agent/main version
|
||||
|
||||
###############################################
|
||||
# Build Bento4 -> we want fragmented mp4 files
|
||||
|
||||
ENV BENTO4_VERSION 1.6.0-639
|
||||
RUN cd /tmp && git clone https://github.com/axiomatic-systems/Bento4 && cd Bento4 && \
|
||||
git checkout tags/v${BENTO4_VERSION} && \
|
||||
cd Build && \
|
||||
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||
make && \
|
||||
mv /tmp/Bento4/Build/mp4fragment /dist/agent/ && \
|
||||
rm -rf /tmp/Bento4
|
||||
|
||||
FROM node:18.14.0-alpine3.16 AS build-ui
|
||||
|
||||
RUN apk update && apk upgrade --available && sync
|
||||
@@ -111,7 +99,6 @@ RUN apk update && apk add ca-certificates curl libstdc++ libc6-compat --no-cache
|
||||
# Try running agent
|
||||
|
||||
RUN mv /agent/* /home/agent/
|
||||
RUN cp /home/agent/mp4fragment /usr/local/bin/
|
||||
RUN /home/agent/main version
|
||||
|
||||
#######################
|
||||
@@ -148,4 +135,4 @@ HEALTHCHECK CMD curl --fail http://localhost:80 || exit 1
|
||||
# Leeeeettttt'ssss goooooo!!!
|
||||
# Run the shizzle from the right working directory.
|
||||
WORKDIR /home/agent
|
||||
CMD ["./main", "-action", "run", "-port", "80"]
|
||||
CMD ["./main", "-action", "run", "-port", "80"]
|
||||
203
README.md
203
README.md
@@ -17,11 +17,14 @@
|
||||
<a href="LICENSE"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
|
||||
[](https://brianmacdonald.github.io/Ethonate/address#0xf4a759C9436E2280Ea9cdd23d3144D95538fF4bE)
|
||||
<a target="_blank" href="https://twitter.com/kerberosio?ref_src=twsrc%5Etfw"><img src="https://img.shields.io/twitter/url.svg?label=Follow%20%40kerberosio&style=social&url=https%3A%2F%2Ftwitter.com%2Fkerberosio" alt="Twitter Widget"></a>
|
||||
[](https://discord.gg/Bj77Vqfp2G)
|
||||
[](https://snapcraft.io/kerberosio)
|
||||
|
||||
[](https://joinslack.kerberos.io/)
|
||||
|
||||
[**Docker Hub**](https://hub.docker.com/r/kerberos/agent) | [**Documentation**](https://doc.kerberos.io) | [**Website**](https://kerberos.io) | [**View Demo**](https://demo.kerberos.io)
|
||||
|
||||
> Before you continue, this repository discusses one of the components of the Kerberos.io stack, the Kerberos Agent, in depth. If you are [looking for an end-to-end deployment guide have a look here](https://github.com/kerberos-io/deployment).
|
||||
|
||||
Kerberos Agent is an isolated and scalable video (surveillance) management agent made available as Open Source under the MIT License. This means that all the source code is available for you or your company, and you can use, transform and distribute the source code; as long you keep a reference of the original license. Kerberos Agent can be used for commercial usage (which was not the case for v2). Read more [about the license here](LICENSE).
|
||||
|
||||

|
||||
@@ -30,7 +33,7 @@ Kerberos Agent is an isolated and scalable video (surveillance) management agent
|
||||
|
||||
- An IP camera which supports a RTSP H264 or H265 encoded stream,
|
||||
- (or) a USB camera, Raspberry Pi camera or other camera, that [you can transform to a valid RTSP H264 or H265 stream](https://github.com/kerberos-io/camera-to-rtsp).
|
||||
- Any hardware (ARMv6, ARMv7, ARM64, AMD) that can run a binary or container, for example: a Raspberry Pi, NVidia Jetson, Intel NUC, a VM, Bare metal machine or a full blown Kubernetes cluster.
|
||||
- Any hardware (ARMv6, ARMv7, ARM64, AMD64) that can run a binary or container, for example: a Raspberry Pi, NVidia Jetson, Intel NUC, a VM, Bare metal machine or a full blown Kubernetes cluster.
|
||||
|
||||
## :video_camera: Is my camera working?
|
||||
|
||||
@@ -64,8 +67,8 @@ There are a myriad of cameras out there (USB, IP and other cameras), and it migh
|
||||
|
||||
1. [Contribute with Codespaces](#contribute-with-codespaces)
|
||||
2. [Develop and build](#develop-and-build)
|
||||
3. [Building from source](#building-from-source)
|
||||
4. [Building for Docker](#building-for-docker)
|
||||
3. [Building from source](#building-from-source)
|
||||
4. [Building for Docker](#building-for-docker)
|
||||
|
||||
### Varia
|
||||
|
||||
@@ -75,17 +78,17 @@ There are a myriad of cameras out there (USB, IP and other cameras), and it migh
|
||||
|
||||
## Quickstart - Docker
|
||||
|
||||
The easiest to get your Kerberos Agent up and running is to use our public image on [Docker hub](https://hub.docker.com/r/kerberos/agent). Once you have selected a specific tag, run below `docker` command, which will open the web interface of your Kerberos agent on port `80`, and off you go. For a more configurable and persistent deployment have a look at [Running and automating a Kerberos Agent](#running-and-automating-a-kerberos-agent).
|
||||
The easiest way to get your Kerberos Agent up and running is to use our public image on [Docker hub](https://hub.docker.com/r/kerberos/agent). Once you have selected a specific tag, run `docker` command below, which will open the web interface of your Kerberos agent on port `80`, and off you go. For a more configurable and persistent deployment have a look at [Running and automating a Kerberos Agent](#running-and-automating-a-kerberos-agent).
|
||||
|
||||
docker run -p 80:80 --name mycamera -d --restart=always kerberos/agent:latest
|
||||
|
||||
If you want to connect to an USB or Raspberry Pi camera, [you'll need to run our side car container](https://github.com/kerberos-io/camera-to-rtsp) which proxy the camera to an RTSP stream. In that case you'll want to configure the Kerberos Agent container to run in the host network, so it can connect directly to the RTSP sidecar.
|
||||
If you want to connect to a USB or Raspberry Pi camera, [you'll need to run our side car container](https://github.com/kerberos-io/camera-to-rtsp) which proxies the camera to an RTSP stream. In that case you'll want to configure the Kerberos Agent container to run in the host network, so it can connect directly to the RTSP sidecar.
|
||||
|
||||
docker run --network=host --name mycamera -d --restart=always kerberos/agent:latest
|
||||
|
||||
## Quickstart - Balena
|
||||
|
||||
Run Kerberos Agent with [Balena Cloud](https://www.balena.io/) super powers. Monitor your Kerberos Agent with seamless remote access, over the air updates, an encrypted public `https` endpoint and many more. Checkout our application `video-surveillance` on [Balena Hub](https://hub.balena.io/apps/2064752/video-surveillance), and create your first or fleet of Kerberos Agent(s).
|
||||
Run Kerberos Agent with [Balena Cloud](https://www.balena.io/) super powers. Monitor your Kerberos Agent with seamless remote access, over the air updates, an encrypted public `https` endpoint and much more. Checkout our application `video-surveillance` on [Balena Hub](https://hub.balena.io/apps/2064752/video-surveillance), and create your first or fleet of Kerberos Agent(s).
|
||||
|
||||
[](https://dashboard.balena-cloud.com/deploy?repoUrl=https://github.com/kerberos-io/balena-agent)
|
||||
|
||||
@@ -101,15 +104,17 @@ Once installed you can find your Kerberos Agent configration at `/var/snap/kerbe
|
||||
|
||||
## A world of Kerberos Agents
|
||||
|
||||
The Kerberos Agent is an isolated and scalable video (surveillance) management agent with a strong focus on user experience, scalability, resilience, extension and integration. Next to the Kerberos Agent, Kerberos.io provides many other tools such as [Kerberos Factory](https://github.com/kerberos-io/factory), [Kerberos Vault](https://github.com/kerberos-io/vault) and [Kerberos Hub](https://github.com/kerberos-io/hub) to provide additional capabilities: bring your own cloud, bring your own storage, central overview, live streaming, machine learning etc.
|
||||
The Kerberos Agent is an isolated and scalable video (surveillance) management agent with a strong focus on user experience, scalability, resilience, extension and integration. Next to the Kerberos Agent, Kerberos.io provides many other tools such as [Kerberos Factory](https://github.com/kerberos-io/factory), [Kerberos Vault](https://github.com/kerberos-io/vault), and [Kerberos Hub](https://github.com/kerberos-io/hub) to provide additional capabilities: bring your own cloud, bring your own storage, central overview, live streaming, machine learning, etc.
|
||||
|
||||
As mentioned above Kerberos.io applies the concept of agents. An agent is running next to (or on) your camera, and is processing a single camera feed. It applies motion based or continuous recording and make those recordings available through a user friendly web interface. A Kerberos Agent allows you to connect to other cloud services or integrates with custom applications. Kerberos Agent is used for personal usage and scales to enterprise production level deployments.
|
||||
[](https://github.com/kerberos-io/deployment)
|
||||
|
||||
As mentioned above Kerberos.io applies the concept of agents. An agent is running next to (or on) your camera, and is processing a single camera feed. It applies motion based or continuous recording and makes those recordings available through a user friendly web interface. A Kerberos Agent allows you to connect to other cloud services or integrate with custom applications. Kerberos Agent is used for personal applications and scales to enterprise production level deployments. Learn more about the [deployment strategies here](<(https://github.com/kerberos-io/deployment)>).
|
||||
|
||||
This repository contains everything you'll need to know about our core product, Kerberos Agent. Below you'll find a brief list of features and functions.
|
||||
|
||||
- Low memory and CPU usage.
|
||||
- Simplified and modern user interface.
|
||||
- Multi architecture (ARMv7, ARMv8, amd64, etc).).
|
||||
- Multi architecture (ARMv6, ARMv7, ARM64, AMD64)
|
||||
- Multi stream, for example recording in H265, live streaming and motion detection in H264.
|
||||
- Multi camera support: IP Cameras (H264 and H265), USB cameras and Raspberry Pi Cameras [through a RTSP proxy](https://github.com/kerberos-io/camera-to-rtsp).
|
||||
- Single camera per instance (e.g. one container per camera).
|
||||
@@ -129,7 +134,7 @@ This repository contains everything you'll need to know about our core product,
|
||||
|
||||
## How to run and deploy a Kerberos Agent
|
||||
|
||||
As described before a Kerberos Agent is a container, which can be deployed through various ways and automation tools such as `docker`, `docker compose`, `kubernetes` and the list goes on. To simplify your life we have come with concrete and working examples of deployments to help you speed up your Kerberos.io journey.
|
||||
A Kerberos Agent, as previously mentioned, is a container. You can deploy it using various methods and automation tools, including `docker`, `docker compose`, `kubernetes` and more. To streamline your Kerberos.io experience, we provide concrete deployment examples to speed up your Kerberos.io journey”
|
||||
|
||||
We have documented the different deployment models [in the `deployments` directory](https://github.com/kerberos-io/agent/tree/master/deployments) of this repository. There you'll learn and find how to deploy using:
|
||||
|
||||
@@ -143,7 +148,7 @@ We have documented the different deployment models [in the `deployments` directo
|
||||
- [Balena](https://github.com/kerberos-io/agent/tree/master/deployments#8-balena)
|
||||
- [Snap](https://github.com/kerberos-io/agent/tree/master/deployments#9-snap)
|
||||
|
||||
By default your Kerberos Agents will store all its configuration and recordings inside the container. To help you automate and have a more consistent data governance, you can attach volumes to configure and persist data of your Kerberos Agents, and/or configure each Kerberos Agent through environment variables.
|
||||
By default, your Kerberos Agents store all configuration and recordings within the container. To help you automate and have a more consistent data governance, you can attach volumes to configure and persist data of your Kerberos Agents and/or configure each Kerberos Agent through environment variables.
|
||||
|
||||
## Access the Kerberos Agent
|
||||
|
||||
@@ -158,23 +163,23 @@ The default username and password for the Kerberos Agent is:
|
||||
|
||||
## Configure and persist with volume mounts
|
||||
|
||||
An example of how to mount a host directory is shown below using `docker`, but is applicable for [all the deployment models and tools described above](#running-and-automating-a-kerberos-agent).
|
||||
An example of how to mount a host directory is shown below using `docker`, but is applicable for [all of the deployment models and tools described above](#running-and-automating-a-kerberos-agent).
|
||||
|
||||
You attach a volume to your container by leveraging the `-v` option. To mount your own configuration file and recordings folder, execute as following:
|
||||
You attach a volume to your container by leveraging the `-v` option. To mount your own configuration file and recordings folder, run the following commands:
|
||||
|
||||
docker run -p 80:80 --name mycamera \
|
||||
-v $(pwd)/agent/config:/home/agent/data/config \
|
||||
-v $(pwd)/agent/recordings:/home/agent/data/recordings \
|
||||
-d --restart=always kerberos/agent:latest
|
||||
|
||||
More example [can be found in the deployment section](https://github.com/kerberos-io/agent/tree/master/deployments) for each deployment and automation tool. Please note to verify the permissions of the directory/volume you are attaching. More information in [this issue](https://github.com/kerberos-io/agent/issues/80).
|
||||
More examples for each deployment and automation tool [can be found in the deployment section](https://github.com/kerberos-io/agent/tree/master/deployments). Be sure to verify the permissions of the directory/volume you are attaching. More information in [this issue](https://github.com/kerberos-io/agent/issues/80).
|
||||
|
||||
chmod -R 755 kerberos-agent/
|
||||
chown 100:101 kerberos-agent/ -R
|
||||
|
||||
## Configure with environment variables
|
||||
|
||||
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deployments easier when leveraging `docker compose` or `kubernetes` deployments much easier and scalable. Using this approach we simplify automation through `ansible` and `terraform`.
|
||||
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deploying with `docker compose` or `kubernetes` much easier and more scalable. Using this approach, we simplify automation through `ansible` and `terraform`.
|
||||
|
||||
docker run -p 80:80 --name mycamera \
|
||||
-e AGENT_NAME=mycamera \
|
||||
@@ -185,77 +190,88 @@ Next to attaching the configuration file, it is also possible to override the co
|
||||
|
||||
| Name | Description | Default Value |
|
||||
| --------------------------------------- | ----------------------------------------------------------------------------------------------- | ------------------------------ |
|
||||
| `LOG_LEVEL` | Level for logging, could be "info", "warning", "debug", "error" or "fatal". | "info" |
|
||||
| `LOG_OUTPUT` | Logging output format "json" or "text". | "text" |
|
||||
| `AGENT_MODE` | You can choose to run this in 'release' for production, and or 'demo' for showcasing. | "release" |
|
||||
| `AGENT_TLS_INSECURE` | Specify if you want to use `InsecureSkipVerify` for the internal HTTP client. | "false" |
|
||||
| `AGENT_USERNAME` | The username used to authenticate against the Kerberos Agent login page. | "root" |
|
||||
| `AGENT_PASSWORD` | The password used to authenticate against the Kerberos Agent login page. | "root" |
|
||||
| `AGENT_KEY` | A unique identifier for your Kerberos Agent, this is auto-generated but can be overriden. | "" |
|
||||
| `AGENT_NAME` | The agent friendly-name. | "agent" |
|
||||
| `AGENT_TIMEZONE` | Timezone which is used for converting time. | "Africa/Ceuta" |
|
||||
| `AGENT_REMOVE_AFTER_UPLOAD` | When enabled, recordings uploaded successfully to a storage will be removed from disk. | "true" |
|
||||
| `AGENT_OFFLINE` | Makes sure no external connection is made. | "false" |
|
||||
| `AGENT_AUTO_CLEAN` | Cleans up the recordings directory. | "true" |
|
||||
| `AGENT_AUTO_CLEAN_MAX_SIZE` | If `AUTO_CLEAN` enabled, set the max size of the recordings directory in (MB). | "100" |
|
||||
| `AGENT_TIME` | Enable the timetable for Kerberos Agent | "false" |
|
||||
| `AGENT_TIMETABLE` | A (weekly) time table to specify when to make recordings "start1,end1,start2,end2;start1.. | "" |
|
||||
| `AGENT_REGION_POLYGON` | A single polygon set for motion detection: "x1,y1;x2,y2;x3,y3;... | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_RTSP` | Full-HD RTSP endpoint to the camera you're targetting. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_SUB_RTSP` | Sub-stream RTSP endpoint used for livestreaming (WebRTC). | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF` | Mark as a compliant ONVIF device. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR` | ONVIF endpoint/address running on the camera. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME` | ONVIF username to authenticate against. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD` | ONVIF password to authenticate against. | "" |
|
||||
| `AGENT_CAPTURE_MOTION` | Toggle for enabling or disabling motion. | "true" |
|
||||
| `AGENT_CAPTURE_LIVEVIEW` | Toggle for enabling or disabling liveview. | "true" |
|
||||
| `AGENT_CAPTURE_SNAPSHOTS` | Toggle for enabling or disabling snapshot generation. | "true" |
|
||||
| `AGENT_CAPTURE_RECORDING` | Toggle for enabling making recordings. | "true" |
|
||||
| `AGENT_CAPTURE_CONTINUOUS` | Toggle for enabling continuous "true" or motion "false". | "false" |
|
||||
| `AGENT_CAPTURE_PRERECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) before after motion event. | "10" |
|
||||
| `AGENT_CAPTURE_POSTRECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) after motion event. | "20" |
|
||||
| `AGENT_CAPTURE_MAXLENGTH` | The maximum length of a single recording (seconds). | "30" |
|
||||
| `AGENT_CAPTURE_PIXEL_CHANGE` | If `CONTINUOUS` set to `false`, the number of pixel require to change before motion triggers. | "150" |
|
||||
| `AGENT_CAPTURE_FRAGMENTED` | Set the format of the recorded MP4 to fragmented (suitable for HLS). | "false" |
|
||||
| `AGENT_CAPTURE_FRAGMENTED_DURATION` | If `AGENT_CAPTURE_FRAGMENTED` set to `true`, define the duration (seconds) of a fragment. | "8" |
|
||||
| `AGENT_MQTT_URI` | A MQTT broker endpoint that is used for bi-directional communication (live view, onvif, etc) | "tcp://mqtt.kerberos.io:1883" |
|
||||
| `AGENT_MQTT_USERNAME` | Username of the MQTT broker. | "" |
|
||||
| `AGENT_MQTT_PASSWORD` | Password of the MQTT broker. | "" |
|
||||
| `AGENT_STUN_URI` | When using WebRTC, you'll need to provide a STUN server. | "stun:turn.kerberos.io:8443" |
|
||||
| `AGENT_TURN_URI` | When using WebRTC, you'll need to provide a TURN server. | "turn:turn.kerberos.io:8443" |
|
||||
| `AGENT_TURN_USERNAME` | TURN username used for WebRTC. | "username1" |
|
||||
| `AGENT_TURN_PASSWORD` | TURN password used for WebRTC. | "password1" |
|
||||
| `AGENT_CLOUD` | Store recordings in Kerberos Hub (s3), Kerberos Vault (kstorage) or Dropbox (dropbox). | "s3" |
|
||||
| `AGENT_HUB_ENCRYPTION` | Turning on/off encryption of traffic from your Kerberos Agent to Kerberos Hub. | "true" |
|
||||
| `AGENT_HUB_URI` | The Kerberos Hub API, defaults to our Kerberos Hub SAAS. | "https://api.hub.domain.com" |
|
||||
| `AGENT_HUB_KEY` | The access key linked to your account in Kerberos Hub. | "" |
|
||||
| `AGENT_HUB_PRIVATE_KEY` | The secret access key linked to your account in Kerberos Hub. | "" |
|
||||
| `AGENT_HUB_REGION` | The Kerberos Hub region, to which you want to upload. | "" |
|
||||
| `AGENT_HUB_SITE` | The site ID of a site you've created in your Kerberos Hub account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_URI` | The Kerberos Vault API url. | "https://vault.domain.com/api" |
|
||||
| `AGENT_KERBEROSVAULT_ACCESS_KEY` | The access key of a Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECRET_KEY` | The secret key of a Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_PROVIDER` | A Kerberos Vault provider you have created (optional). | "" |
|
||||
| `AGENT_KERBEROSVAULT_DIRECTORY` | The directory, in the provider, where the recordings will be stored in. | "" |
|
||||
| `AGENT_DROPBOX_ACCESS_TOKEN` | The Access Token from your Dropbox app, that is used to leverage the Dropbox SDK. | "" |
|
||||
| `AGENT_DROPBOX_DIRECTORY` | The directory, in the provider, where the recordings will be stored in. | "" |
|
||||
| `AGENT_ENCRYPTION` | Enable 'true' or disable 'false' end-to-end encryption for MQTT messages. | "false" |
|
||||
| `AGENT_ENCRYPTION_RECORDINGS` | Enable 'true' or disable 'false' end-to-end encryption for recordings. | "false" |
|
||||
| `AGENT_ENCRYPTION_FINGERPRINT` | The fingerprint of the keypair (public/private keys), so you know which one to use. | "" |
|
||||
| `AGENT_ENCRYPTION_PRIVATE_KEY` | The private key (assymetric/RSA) to decryptand sign requests send over MQTT. | "" |
|
||||
| `AGENT_ENCRYPTION_SYMMETRIC_KEY` | The symmetric key (AES) to encrypt and decrypt request send over MQTT. | "" |
|
||||
| `LOG_LEVEL` | Level for logging, could be "info", "warning", "debug", "error" or "fatal". | "info" |
|
||||
| `LOG_OUTPUT` | Logging output format "json" or "text". | "text" |
|
||||
| `AGENT_MODE` | You can choose to run this in 'release' for production, and or 'demo' for showcasing. | "release" |
|
||||
| `AGENT_TLS_INSECURE` | Specify if you want to use `InsecureSkipVerify` for the internal HTTP client. | "false" |
|
||||
| `AGENT_USERNAME` | The username used to authenticate against the Kerberos Agent login page. | "root" |
|
||||
| `AGENT_PASSWORD` | The password used to authenticate against the Kerberos Agent login page. | "root" |
|
||||
| `AGENT_KEY` | A unique identifier for your Kerberos Agent, this is auto-generated but can be overriden. | "" |
|
||||
| `AGENT_NAME` | The agent friendly-name. | "agent" |
|
||||
| `AGENT_TIMEZONE` | Timezone which is used for converting time. | "Africa/Ceuta" |
|
||||
| `AGENT_REMOVE_AFTER_UPLOAD` | When enabled, recordings uploaded successfully to a storage will be removed from disk. | "true" |
|
||||
| `AGENT_OFFLINE` | Makes sure no external connection is made. | "false" |
|
||||
| `AGENT_AUTO_CLEAN` | Cleans up the recordings directory. | "true" |
|
||||
| `AGENT_AUTO_CLEAN_MAX_SIZE` | If `AUTO_CLEAN` enabled, set the max size of the recordings directory (in MB). | "100" |
|
||||
| `AGENT_TIME` | Enable the timetable for Kerberos Agent | "false" |
|
||||
| `AGENT_TIMETABLE` | A (weekly) time table to specify when to make recordings "start1,end1,start2,end2;start1.. | "" |
|
||||
| `AGENT_REGION_POLYGON` | A single polygon set for motion detection: "x1,y1;x2,y2;x3,y3;... | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_RTSP` | Full-HD RTSP endpoint to the camera you're targetting. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_SUB_RTSP` | Sub-stream RTSP endpoint used for livestreaming (WebRTC). | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF` | Mark as a compliant ONVIF device. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR` | ONVIF endpoint/address running on the camera. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME` | ONVIF username to authenticate against. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD` | ONVIF password to authenticate against. | "" |
|
||||
| `AGENT_CAPTURE_MOTION` | Toggle for enabling or disabling motion. | "true" |
|
||||
| `AGENT_CAPTURE_LIVEVIEW` | Toggle for enabling or disabling liveview. | "true" |
|
||||
| `AGENT_CAPTURE_SNAPSHOTS` | Toggle for enabling or disabling snapshot generation. | "true" |
|
||||
| `AGENT_CAPTURE_RECORDING` | Toggle for enabling making recordings. | "true" |
|
||||
| `AGENT_CAPTURE_CONTINUOUS` | Toggle for enabling continuous "true" or motion "false". | "false" |
|
||||
| `AGENT_CAPTURE_PRERECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) before/after motion event. | "10" |
|
||||
| `AGENT_CAPTURE_POSTRECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) after motion event. | "20" |
|
||||
| `AGENT_CAPTURE_MAXLENGTH` | The maximum length of a single recording (seconds). | "30" |
|
||||
| `AGENT_CAPTURE_PIXEL_CHANGE` | If `CONTINUOUS` set to `false`, the number of pixel require to change before motion triggers. | "150" |
|
||||
| `AGENT_CAPTURE_FRAGMENTED` | Set the format of the recorded MP4 to fragmented (suitable for HLS). | "false" |
|
||||
| `AGENT_CAPTURE_FRAGMENTED_DURATION` | If `AGENT_CAPTURE_FRAGMENTED` set to `true`, define the duration (seconds) of a fragment. | "8" |
|
||||
| `AGENT_MQTT_URI` | An MQTT broker endpoint that is used for bi-directional communication (live view, onvif, etc) | "tcp://mqtt.kerberos.io:1883" |
|
||||
| `AGENT_MQTT_USERNAME` | Username of the MQTT broker. | "" |
|
||||
| `AGENT_MQTT_PASSWORD` | Password of the MQTT broker. | "" |
|
||||
| `AGENT_REALTIME_PROCESSING` | If `AGENT_REALTIME_PROCESSING` set to `true`, the agent will send key frames to the topic | "" |
|
||||
| `AGENT_REALTIME_PROCESSING_TOPIC` | The topic to which keyframes will be sent in base64 encoded format. | "" |
|
||||
| `AGENT_STUN_URI` | When using WebRTC, you'll need to provide a STUN server. | "stun:turn.kerberos.io:8443" |
|
||||
| `AGENT_FORCE_TURN` | Force using a TURN server, by generating relay candidates only. | "false" |
|
||||
| `AGENT_TURN_URI` | When using WebRTC, you'll need to provide a TURN server. | "turn:turn.kerberos.io:8443" |
|
||||
| `AGENT_TURN_USERNAME` | TURN username used for WebRTC. | "username1" |
|
||||
| `AGENT_TURN_PASSWORD` | TURN password used for WebRTC. | "password1" |
|
||||
| `AGENT_CLOUD` | Store recordings in Kerberos Hub (s3), Kerberos Vault (kstorage), or Dropbox (dropbox). | "s3" |
|
||||
| `AGENT_HUB_ENCRYPTION` | Turning on/off encryption of traffic from your Kerberos Agent to Kerberos Hub. | "true" |
|
||||
| `AGENT_HUB_URI` | The Kerberos Hub API, defaults to our Kerberos Hub SAAS. | "https://api.hub.domain.com" |
|
||||
| `AGENT_HUB_KEY` | The access key linked to your account in Kerberos Hub. | "" |
|
||||
| `AGENT_HUB_PRIVATE_KEY` | The secret access key linked to your account in Kerberos Hub. | "" |
|
||||
| `AGENT_HUB_REGION` | The Kerberos Hub region, to which you want to upload. | "" |
|
||||
| `AGENT_HUB_SITE` | The site ID of a site you've created in your Kerberos Hub account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_URI` | The Kerberos Vault API url. | "https://vault.domain.com/api" |
|
||||
| `AGENT_KERBEROSVAULT_ACCESS_KEY` | The access key of a Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECRET_KEY` | The secret key of a Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_PROVIDER` | A Kerberos Vault provider you have created (optional). | "" |
|
||||
| `AGENT_KERBEROSVAULT_DIRECTORY` | The directory, in the Kerberos vault, where the recordings will be stored. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_URI` | The Kerberos Vault API url. | "https://vault.domain.com/api" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_ACCESS_KEY` | The access key of a secondary Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_SECRET_KEY` | The secret key of a secondary Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_PROVIDER` | A secondary Kerberos Vault provider you have created (optional). | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_DIRECTORY` | The directory, in the secondary Kerberos vault, where the recordings will be stored. | "" |
|
||||
| `AGENT_DROPBOX_ACCESS_TOKEN` | The Access Token from your Dropbox app, that is used to leverage the Dropbox SDK. | "" |
|
||||
| `AGENT_DROPBOX_DIRECTORY` | The directory, in Dropbox, where the recordings will be stored. | "" |
|
||||
| `AGENT_ENCRYPTION` | Enable 'true' or disable 'false' end-to-end encryption for MQTT messages. | "false" |
|
||||
| `AGENT_ENCRYPTION_RECORDINGS` | Enable 'true' or disable 'false' end-to-end encryption for recordings. | "false" |
|
||||
| `AGENT_ENCRYPTION_FINGERPRINT` | The fingerprint of the keypair (public/private keys), so you know which one to use. | "" |
|
||||
| `AGENT_ENCRYPTION_PRIVATE_KEY` | The private key (assymetric/RSA) to decrypt and sign requests send over MQTT. | "" |
|
||||
| `AGENT_ENCRYPTION_SYMMETRIC_KEY` | The symmetric key (AES) to encrypt and decrypt requests sent over MQTT. | "" |
|
||||
| `AGENT_SIGNING` | Enable 'true' or disable 'false' for signing recordings. | "true" |
|
||||
| `AGENT_SIGNING_PRIVATE_KEY` | The private key (RSA) to sign the recordings fingerprint to validate origin. | "" - uses default one if empty |
|
||||
|
||||
|
||||
## Encryption
|
||||
|
||||
You can encrypt your recordings and outgoing MQTT messages with your own AES and RSA keys by enabling the encryption settings. Once enabled all your recordings will be encrypted using AES-256-CBC and your symmetric key. You can either use the default `openssl` toolchain to decrypt the recordings with your AES key, as following:
|
||||
You can encrypt your recordings and outgoing MQTT messages with your own AES and RSA keys by enabling the encryption settings. Once enabled, all your recordings will be encrypted using AES-256-CBC and your symmetric key. You can use the default `openssl` toolchain to decrypt the recordings with your AES key, as following:
|
||||
|
||||
openssl aes-256-cbc -d -md md5 -in encrypted.mp4 -out decrypted.mp4 -k your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
|
||||
|
||||
, and additionally you can decrypt a folder of recordings, using the Kerberos Agent binary as following:
|
||||
Or you can decrypt a folder of recordings, using the Kerberos Agent binary as following:
|
||||
|
||||
go run main.go -action decrypt ./data/recordings your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
|
||||
|
||||
or for a single file:
|
||||
Or for a single file:
|
||||
|
||||
go run main.go -action decrypt ./data/recordings/video.mp4 your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
|
||||
|
||||
@@ -264,8 +280,9 @@ or for a single file:
|
||||
If we talk about video encoders and decoders (codecs) there are 2 major video codecs on the market: H264 and H265. Taking into account your use case, you might use one over the other. We will provide an (not complete) overview of the advantages and disadvantages of each codec in the field of video surveillance and video analytics. If you would like to know more, you should look for additional resources on the internet (or if you like to read physical items, books still exists nowadays).
|
||||
|
||||
- H264 (also known as AVC or MPEG-4 Part 10)
|
||||
|
||||
- Is the most common one and most widely supported for IP cameras.
|
||||
- Supported in the majority of browsers, operating system and third-party applications.
|
||||
- Supported in the majority of browsers, operating system, and third-party applications.
|
||||
- Can be embedded in commercial and 3rd party applications.
|
||||
- Different levels of compression (high, medium, low, ..)
|
||||
- Better quality / compression ratio, shows less artifacts at medium compression ratios.
|
||||
@@ -279,14 +296,14 @@ If we talk about video encoders and decoders (codecs) there are 2 major video co
|
||||
- H265 shows artifacts in motion based environments (which is less with H264).
|
||||
- Recording the same video (resolution, duration and FPS) in H264 and H265 will result in approx 50% the file size.
|
||||
- Not supported in technologies such as WebRTC
|
||||
|
||||
|
||||
Conclusion: depending on the use case you might choose one over the other, and you can use both at the same time. For example you can use H264 (main stream) for livestreaming, and H265 (sub stream) for recording. If you wish to play recordings in a cross-platform and cross-browser environment, you might opt for H264 for better support.
|
||||
|
||||
## Contribute with Codespaces
|
||||
|
||||
One of the major blockers for letting you contribute to an Open Source project is to setup your local development machine. Why? Because you might have already some tools and libraries installed that are used for other projects, and the libraries you would need for Kerberos Agent, for example FFmpeg, might require a different version. Welcome to the dependency hell..
|
||||
One of the major blockers for letting you contribute to an Open Source project is to set up your local development machine. Why? Because you might already have some tools and libraries installed that are used for other projects, and the libraries you would need for Kerberos Agent, for example FFmpeg, might require a different version. Welcome to dependency hell...
|
||||
|
||||
By leveraging codespaces, which the Kerberos Agent repo supports, you will be able to setup the required development environment in a few minutes. By opening the `<> Code` tab on the top of the page, you will be able to create a codespace, [using the Kerberos Devcontainer](https://github.com/kerberos-io/devcontainer) base image. This image requires all the relevant dependencies: FFmpeg, OpenCV, Golang, Node, Yarn, etc.
|
||||
By leveraging codespaces, which the Kerberos Agent repo supports, you will be able to set up the required development environment in a few minutes. By opening the `<> Code` tab on the top of the page, you will be able to create a codespace, [using the Kerberos Devcontainer](https://github.com/kerberos-io/devcontainer) base image. This image requires all the relevant dependencies: FFmpeg, OpenCV, Golang, Node, Yarn, etc.
|
||||
|
||||

|
||||
|
||||
@@ -313,7 +330,7 @@ On opening of the GitHub Codespace, some dependencies will be installed. Once th
|
||||
WS_URL: `${websocketprotocol}//${externalHost}/ws`,
|
||||
};
|
||||
|
||||
Go and open two terminals one for the `ui` project and one for the `machinery` project.
|
||||
Go and open two terminals: one for the `ui` project and one for the `machinery` project.
|
||||
|
||||
1. Terminal A:
|
||||
|
||||
@@ -329,11 +346,11 @@ Once executed, a popup will show up mentioning `portforwarding`. You should see
|
||||
|
||||

|
||||
|
||||
As mentioned above, copy the hostname of the `machinery` DNS name, and past it in the `ui/src/config.json` file. Once done reload, the `ui` page in your browser, and you should be able to access the login page with the default credentials `root` and `root`.
|
||||
As mentioned above, copy the hostname of the `machinery` DNS name, and paste it in the `ui/src/config.json` file. Once done, reload the `ui` page in your browser, and you should be able to access the login page with the default credentials `root` and `root`.
|
||||
|
||||
## Develop and build
|
||||
|
||||
Kerberos Agent is divided in two parts a `machinery` and `web`. Both parts live in this repository in their relative folders. For development or running the application on your local machine, you have to run both the `machinery` and the `web` as described below. When running in production everything is shipped as only one artifact, read more about this at [Building for production](#building-for-production).
|
||||
The Kerberos Agent is divided in two parts: a `machinery` and `web` part. Both parts live in this repository in their relative folders. For development or running the application on your local machine, you have to run both the `machinery` and the `web` as described below. When running in production everything is shipped as only one artifact, read more about this at [Building for production](#building-for-production).
|
||||
|
||||
### UI
|
||||
|
||||
@@ -347,13 +364,13 @@ This will start a webserver and launches the web app on port `3000`.
|
||||
|
||||

|
||||
|
||||
Once signed in you'll see the dashboard page showing up. After successfull configuration of your agent, you'll should see a live view and possible events recorded to disk.
|
||||
Once signed in you'll see the dashboard page. After successfull configuration of your agent, you'll should see a live view and possible events recorded to disk.
|
||||
|
||||

|
||||
|
||||
### Machinery
|
||||
|
||||
The `machinery` is a **Golang** project which delivers two functions: it acts as the Kerberos Agent which is doing all the heavy lifting with camera processing and other kinds of logic, on the other hand it acts as a webserver (Rest API) that allows communication from the web (React) or any other custom application. The API is documented using `swagger`.
|
||||
The `machinery` is a **Golang** project which delivers two functions: it acts as the Kerberos Agent which is doing all the heavy lifting with camera processing and other kinds of logic and on the other hand it acts as a webserver (Rest API) that allows communication from the web (React) or any other custom application. The API is documented using `swagger`.
|
||||
|
||||
You can simply run the `machinery` using following commands.
|
||||
|
||||
@@ -361,13 +378,13 @@ You can simply run the `machinery` using following commands.
|
||||
cd machinery
|
||||
go run main.go -action run -port 80
|
||||
|
||||
This will launch the Kerberos Agent and run a webserver on port `80`. You can change the port by your own preference. We strongly support the usage of [Goland](https://www.jetbrains.com/go/) or [Visual Studio Code](https://code.visualstudio.com/), as it comes with all the debugging and linting features builtin.
|
||||
This will launch the Kerberos Agent and run a webserver on port `80`. You can change the port by your own preference. We strongly support the usage of [Goland](https://www.jetbrains.com/go/) or [Visual Studio Code](https://code.visualstudio.com/), as it comes with all the debugging and linting features built in.
|
||||
|
||||

|
||||
|
||||
## Building from source
|
||||
|
||||
Running Kerberos Agent in production only require a single binary to run. Nevertheless, we have two parts, the `machinery` and the `web`, we merge them during build time. So this is what happens.
|
||||
Running Kerberos Agent in production only requires a single binary to run. Nevertheless, we have two parts: the `machinery` and the `web`, we merge them during build time. So this is what happens.
|
||||
|
||||
### UI
|
||||
|
||||
@@ -378,7 +395,7 @@ To build the Kerberos Agent web app, you simply have to run the `build` command
|
||||
|
||||
### Machinery
|
||||
|
||||
Building the `machinery` is also super easy 🚀, by using `go build` you can create a single binary which ships it all; thank you Golang. After building you will endup with a binary called `main`, this is what contains everything you need to run Kerberos Agent.
|
||||
Building the `machinery` is also super easy 🚀, by using `go build` you can create a single binary which ships it all; thank you Golang. After building you will end up with a binary called `main`, this is what contains everything you need to run Kerberos Agent.
|
||||
|
||||
Remember the build step of the `web` part, during build time we move the build directory to the `machinery` directory. Inside the `machinery` web server [we reference the](https://github.com/kerberos-io/agent/blob/master/machinery/src/routers/http/Server.go#L44) `build` directory. This makes it possible to just a have single web server that runs it all.
|
||||
|
||||
@@ -387,8 +404,8 @@ Remember the build step of the `web` part, during build time we move the build d
|
||||
|
||||
## Building for Docker
|
||||
|
||||
Inside the root of this `agent` repository, you will find a `Dockerfile`. This file contains the instructions for building and shipping **Kerberos Agent**. Important to note is that start from a prebuild base image, `kerberos/base:xxx`.
|
||||
This base image contains already a couple of tools, such as Golang, FFmpeg and OpenCV. We do this for faster compilation times.
|
||||
Inside the root of this `agent` repository, you will find a `Dockerfile`. This file contains the instructions for building and shipping a **Kerberos Agent**. Important to note is that you start from a prebuilt base image, `kerberos/base:xxx`.
|
||||
This base image already contains a couple of tools, such as Golang, FFmpeg and OpenCV. We do this for faster compilation times.
|
||||
|
||||
By running the `docker build` command, you will create the Kerberos Agent Docker image. After building you can simply run the image as a Docker container.
|
||||
|
||||
@@ -404,7 +421,7 @@ Read more about this [at the FAQ](#faq) below.
|
||||
|
||||
## Contributors
|
||||
|
||||
This project exists thanks to all the people who contribute.
|
||||
This project exists thanks to all the people who contribute. Bravo!
|
||||
|
||||
<a href="https://github.com/kerberos-io/agent/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=kerberos-io/agent" />
|
||||
|
||||
2958
assets/img/edge-deployment-agent.svg
Normal file
2958
assets/img/edge-deployment-agent.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 696 KiB |
10
build.sh
10
build.sh
@@ -1,10 +0,0 @@
|
||||
export version=0.0.1
|
||||
export name=agent
|
||||
|
||||
docker build -t $name .
|
||||
|
||||
docker tag $name kerberos/$name:$version
|
||||
docker push kerberos/$name:$version
|
||||
|
||||
docker tag $name kerberos/$name:latest
|
||||
docker push kerberos/$name:latest
|
||||
@@ -36,12 +36,12 @@ You attach a volume to your container by leveraging the `-v` option. To mount yo
|
||||
|
||||
docker run -p 80:80 --name mycamera \
|
||||
-v $(pwd)/agent/config:/home/agent/data/config \
|
||||
-v $(pwd)/agent/recordings:/home/agent/data/recordings\
|
||||
-d --restart=alwayskerberos/agent:latest
|
||||
-v $(pwd)/agent/recordings:/home/agent/data/recordings \
|
||||
-d --restart=always kerberos/agent:latest
|
||||
|
||||
### Override with environment variables
|
||||
|
||||
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deployments easier when leveraging `docker compose` or `kubernetes` deployments much easier and scalable. Using this approach we simplify automation through `ansible` and `terraform`. You'll find [the full list of environment variables on the main README.md file](https://github.com/kerberos-io/agent#override-with-environment-variables).
|
||||
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deployments when leveraging `docker compose` or `kubernetes` much easier and more scalable. Using this approach we simplify automation through `ansible` and `terraform`. You'll find [the full list of environment variables on the main README.md file](https://github.com/kerberos-io/agent#override-with-environment-variables).
|
||||
|
||||
### 2. Running multiple containers with Docker compose
|
||||
|
||||
|
||||
@@ -1,35 +1,38 @@
|
||||
version: "3.9"
|
||||
x-common-variables: &common-variables
|
||||
# Add variables here to add them to all agents
|
||||
AGENT_HUB_KEY: "xxxxx" # The access key linked to your account in Kerberos Hub.
|
||||
AGENT_HUB_PRIVATE_KEY: "xxxxx" # The secret access key linked to your account in Kerberos Hub.
|
||||
# find full list of environment variables here: https://github.com/kerberos-io/agent#override-with-environment-variables
|
||||
services:
|
||||
kerberos-agent1:
|
||||
image: "kerberos/agent:latest"
|
||||
ports:
|
||||
- "8081:80"
|
||||
environment:
|
||||
- AGENT_NAME=agent1
|
||||
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
|
||||
- AGENT_HUB_KEY=xxx
|
||||
- AGENT_HUB_PRIVATE_KEY=xxx
|
||||
- AGENT_CAPTURE_CONTINUOUS=true
|
||||
- AGENT_CAPTURE_PRERECORDING=10
|
||||
- AGENT_CAPTURE_POSTRECORDING=10
|
||||
- AGENT_CAPTURE_MAXLENGTH=60
|
||||
- AGENT_CAPTURE_PIXEL_CHANGE=150
|
||||
# find full list of environment variables here: https://github.com/kerberos-io/agent#override-with-environment-variables
|
||||
<<: *common-variables
|
||||
AGENT_NAME: agent1
|
||||
AGENT_CAPTURE_IPCAMERA_RTSP: rtsp://username:password@x.x.x.x/Streaming/Channels/101 # Hikvision camera RTSP url example
|
||||
AGENT_KEY: "1"
|
||||
kerberos-agent2:
|
||||
image: "kerberos/agent:latest"
|
||||
ports:
|
||||
- "8082:80"
|
||||
environment:
|
||||
- AGENT_NAME=agent2
|
||||
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
|
||||
- AGENT_HUB_KEY=yyy
|
||||
- AGENT_HUB_PRIVATE_KEY=yyy
|
||||
<<: *common-variables
|
||||
AGENT_NAME: agent2
|
||||
AGENT_CAPTURE_IPCAMERA_RTSP: rtsp://username:password@x.x.x.x/channel1 # Linksys camera RTSP url example
|
||||
AGENT_KEY: "2"
|
||||
kerberos-agent3:
|
||||
image: "kerberos/agent:latest"
|
||||
ports:
|
||||
- "8083:80"
|
||||
environment:
|
||||
- AGENT_NAME=agent3
|
||||
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
|
||||
- AGENT_HUB_KEY=zzz
|
||||
- AGENT_HUB_PRIVATE_KEY=zzz
|
||||
<<: *common-variables
|
||||
AGENT_NAME: agent3
|
||||
AGENT_CAPTURE_IPCAMERA_RTSP: rtsp://username:password@x.x.x.x/cam/realmonitor?channel=1&subtype=1 # Dahua camera RTSP url example
|
||||
AGENT_KEY: "3"
|
||||
networks:
|
||||
default:
|
||||
name: cluster-net
|
||||
external: true
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: agent
|
||||
image: kerberos/agent:latest
|
||||
image: kerberos/agent:3.2.3
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
@@ -50,4 +50,4 @@ spec:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: agent
|
||||
app: agent
|
||||
|
||||
BIN
machinery/.DS_Store
vendored
Normal file
BIN
machinery/.DS_Store
vendored
Normal file
Binary file not shown.
31
machinery/.env
Normal file
31
machinery/.env
Normal file
@@ -0,0 +1,31 @@
|
||||
AGENT_NAME=camera-name
|
||||
AGENT_KEY=uniq-camera-id
|
||||
AGENT_TIMEZONE=Europe/Brussels
|
||||
#AGENT_CAPTURE_CONTINUOUS=true
|
||||
#AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://fake.kerberos.io/stream
|
||||
#AGENT_CAPTURE_IPCAMERA_SUB_RTSP=rtsp://fake.kerberos.io/stream
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR=x.x.x.x
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME=xxx
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD=xxx
|
||||
AGENT_HUB_URI=https://api.cloud.kerberos.io
|
||||
AGENT_HUB_KEY=AKIXxxx4JBEI
|
||||
AGENT_HUB_PRIVATE_KEY=DIOXxxxAlYpaxxxxXioL0txxx
|
||||
AGENT_HUB_SITE=681xxxxxxx9bcda5
|
||||
|
||||
# By default will send to Hub (=S3), if you wish to send to Kerberos Vault, set to "kstorage"
|
||||
AGENT_CLOUD=s3
|
||||
AGENT_KERBEROSVAULT_URI=
|
||||
AGENT_KERBEROSVAULT_PROVIDER=
|
||||
AGENT_KERBEROSVAULT_DIRECTORY=
|
||||
AGENT_KERBEROSVAULT_ACCESS_KEY=
|
||||
AGENT_KERBEROSVAULT_SECRET_KEY=
|
||||
AGENT_KERBEROSVAULT_MAX_RETRIES=10
|
||||
AGENT_KERBEROSVAULT_TIMEOUT=120
|
||||
AGENT_KERBEROSVAULT_SECONDARY_URI=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_PROVIDER=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_DIRECTORY=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_ACCESS_KEY=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_SECRET_KEY=
|
||||
|
||||
# Open telemetry tracing endpoint
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT=
|
||||
18
machinery/.vscode/launch.json
vendored
18
machinery/.vscode/launch.json
vendored
@@ -1,18 +0,0 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Package",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "main.go",
|
||||
"args": ["-action", "run"],
|
||||
"envFile": "${workspaceFolder}/.env",
|
||||
"buildFlags": "--tags dynamic",
|
||||
},
|
||||
]
|
||||
}
|
||||
@@ -26,6 +26,7 @@
|
||||
"recording": "true",
|
||||
"snapshots": "true",
|
||||
"liveview": "true",
|
||||
"liveview_chunking": "false",
|
||||
"motion": "true",
|
||||
"postrecording": 20,
|
||||
"prerecording": 10,
|
||||
@@ -98,11 +99,13 @@
|
||||
"region": "eu-west-1"
|
||||
},
|
||||
"kstorage": {},
|
||||
"kstorage_secondary": {},
|
||||
"dropbox": {},
|
||||
"mqtturi": "tcp://mqtt.kerberos.io:1883",
|
||||
"mqtt_username": "",
|
||||
"mqtt_password": "",
|
||||
"stunuri": "stun:turn.kerberos.io:8443",
|
||||
"turn_force": "false",
|
||||
"turnuri": "turn:turn.kerberos.io:8443",
|
||||
"turn_username": "username1",
|
||||
"turn_password": "password1",
|
||||
@@ -113,5 +116,8 @@
|
||||
"hub_private_key": "",
|
||||
"hub_site": "",
|
||||
"condition_uri": "",
|
||||
"encryption": {}
|
||||
}
|
||||
"encryption": {},
|
||||
"signing": {},
|
||||
"realtimeprocessing": "false",
|
||||
"realtimeprocessing_topic": ""
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
// Package docs GENERATED BY SWAG; DO NOT EDIT
|
||||
// This file was generated by swaggo/swag
|
||||
// Package docs Code generated by swaggo/swag. DO NOT EDIT
|
||||
package docs
|
||||
|
||||
import "github.com/swaggo/swag"
|
||||
@@ -388,7 +387,7 @@ const docTemplate = `{
|
||||
"operationId": "snapshot-base64",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -403,7 +402,7 @@ const docTemplate = `{
|
||||
"operationId": "snapshot-jpeg",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -476,7 +475,7 @@ const docTemplate = `{
|
||||
"operationId": "config",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -500,7 +499,7 @@ const docTemplate = `{
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -515,7 +514,7 @@ const docTemplate = `{
|
||||
"operationId": "dashboard",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -530,7 +529,7 @@ const docTemplate = `{
|
||||
"operationId": "days",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -590,7 +589,7 @@ const docTemplate = `{
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -803,6 +802,9 @@ const docTemplate = `{
|
||||
"description": "obsolete",
|
||||
"type": "string"
|
||||
},
|
||||
"hub_encryption": {
|
||||
"type": "string"
|
||||
},
|
||||
"hub_key": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -839,6 +841,12 @@ const docTemplate = `{
|
||||
"offline": {
|
||||
"type": "string"
|
||||
},
|
||||
"realtimeprocessing": {
|
||||
"type": "string"
|
||||
},
|
||||
"realtimeprocessing_topic": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"$ref": "#/definitions/models.Region"
|
||||
},
|
||||
@@ -863,6 +871,9 @@ const docTemplate = `{
|
||||
"timezone": {
|
||||
"type": "string"
|
||||
},
|
||||
"turn_force": {
|
||||
"type": "string"
|
||||
},
|
||||
"turn_password": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -957,9 +968,18 @@ const docTemplate = `{
|
||||
"rtsp": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_fps": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_height": {
|
||||
"type": "integer"
|
||||
},
|
||||
"sub_rtsp": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_width": {
|
||||
"type": "integer"
|
||||
},
|
||||
"width": {
|
||||
"type": "integer"
|
||||
}
|
||||
@@ -1166,6 +1186,8 @@ var SwaggerInfo = &swag.Spec{
|
||||
Description: "This is the API for using and configure Kerberos Agent.",
|
||||
InfoInstanceName: "swagger",
|
||||
SwaggerTemplate: docTemplate,
|
||||
LeftDelim: "{{",
|
||||
RightDelim: "}}",
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -380,7 +380,7 @@
|
||||
"operationId": "snapshot-base64",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -395,7 +395,7 @@
|
||||
"operationId": "snapshot-jpeg",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -468,7 +468,7 @@
|
||||
"operationId": "config",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -492,7 +492,7 @@
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -507,7 +507,7 @@
|
||||
"operationId": "dashboard",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -522,7 +522,7 @@
|
||||
"operationId": "days",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -582,7 +582,7 @@
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -795,6 +795,9 @@
|
||||
"description": "obsolete",
|
||||
"type": "string"
|
||||
},
|
||||
"hub_encryption": {
|
||||
"type": "string"
|
||||
},
|
||||
"hub_key": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -831,6 +834,12 @@
|
||||
"offline": {
|
||||
"type": "string"
|
||||
},
|
||||
"realtimeprocessing": {
|
||||
"type": "string"
|
||||
},
|
||||
"realtimeprocessing_topic": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"$ref": "#/definitions/models.Region"
|
||||
},
|
||||
@@ -855,6 +864,9 @@
|
||||
"timezone": {
|
||||
"type": "string"
|
||||
},
|
||||
"turn_force": {
|
||||
"type": "string"
|
||||
},
|
||||
"turn_password": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -949,9 +961,18 @@
|
||||
"rtsp": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_fps": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_height": {
|
||||
"type": "integer"
|
||||
},
|
||||
"sub_rtsp": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_width": {
|
||||
"type": "integer"
|
||||
},
|
||||
"width": {
|
||||
"type": "integer"
|
||||
}
|
||||
|
||||
@@ -95,6 +95,8 @@ definitions:
|
||||
heartbeaturi:
|
||||
description: obsolete
|
||||
type: string
|
||||
hub_encryption:
|
||||
type: string
|
||||
hub_key:
|
||||
type: string
|
||||
hub_private_key:
|
||||
@@ -119,6 +121,10 @@ definitions:
|
||||
type: string
|
||||
offline:
|
||||
type: string
|
||||
realtimeprocessing:
|
||||
type: string
|
||||
realtimeprocessing_topic:
|
||||
type: string
|
||||
region:
|
||||
$ref: '#/definitions/models.Region'
|
||||
remove_after_upload:
|
||||
@@ -135,6 +141,8 @@ definitions:
|
||||
type: array
|
||||
timezone:
|
||||
type: string
|
||||
turn_force:
|
||||
type: string
|
||||
turn_password:
|
||||
type: string
|
||||
turn_username:
|
||||
@@ -196,8 +204,14 @@ definitions:
|
||||
type: string
|
||||
rtsp:
|
||||
type: string
|
||||
sub_fps:
|
||||
type: string
|
||||
sub_height:
|
||||
type: integer
|
||||
sub_rtsp:
|
||||
type: string
|
||||
sub_width:
|
||||
type: integer
|
||||
width:
|
||||
type: integer
|
||||
type: object
|
||||
@@ -564,7 +578,7 @@ paths:
|
||||
operationId: snapshot-base64
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get a snapshot from the camera in base64.
|
||||
tags:
|
||||
- camera
|
||||
@@ -574,7 +588,7 @@ paths:
|
||||
operationId: snapshot-jpeg
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get a snapshot from the camera in jpeg format.
|
||||
tags:
|
||||
- camera
|
||||
@@ -624,7 +638,7 @@ paths:
|
||||
operationId: config
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get the current configuration.
|
||||
tags:
|
||||
- config
|
||||
@@ -640,7 +654,7 @@ paths:
|
||||
$ref: '#/definitions/models.Config'
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Update the current configuration.
|
||||
tags:
|
||||
- config
|
||||
@@ -650,7 +664,7 @@ paths:
|
||||
operationId: dashboard
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get all information showed on the dashboard.
|
||||
tags:
|
||||
- general
|
||||
@@ -660,7 +674,7 @@ paths:
|
||||
operationId: days
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get all days stored in the recordings directory.
|
||||
tags:
|
||||
- general
|
||||
@@ -698,7 +712,7 @@ paths:
|
||||
$ref: '#/definitions/models.EventFilter'
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get the latest recordings (events) from the recordings directory.
|
||||
tags:
|
||||
- general
|
||||
|
||||
193
machinery/go.mod
193
machinery/go.mod
@@ -1,152 +1,139 @@
|
||||
module github.com/kerberos-io/agent/machinery
|
||||
|
||||
go 1.20
|
||||
go 1.24.2
|
||||
|
||||
//replace github.com/kerberos-io/joy4 v1.0.63 => ../../../../github.com/kerberos-io/joy4
|
||||
|
||||
//replace github.com/kerberos-io/onvif v0.0.10 => ../../../../github.com/kerberos-io/onvif
|
||||
replace google.golang.org/genproto => google.golang.org/genproto v0.0.0-20250519155744-55703ea1f237
|
||||
|
||||
require (
|
||||
github.com/Eyevinn/mp4ff v0.48.0
|
||||
github.com/InVisionApp/conjungo v1.1.0
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1
|
||||
github.com/bluenviron/gortsplib/v4 v4.6.1
|
||||
github.com/bluenviron/mediacommon v1.5.1
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.3
|
||||
github.com/bluenviron/gortsplib/v4 v4.14.1
|
||||
github.com/bluenviron/mediacommon v1.14.0
|
||||
github.com/cedricve/go-onvif v0.0.0-20200222191200-567e8ce298f6
|
||||
github.com/dromara/carbon/v2 v2.6.8
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/eclipse/paho.mqtt.golang v1.4.2
|
||||
github.com/elastic/go-sysinfo v1.9.0
|
||||
github.com/gin-contrib/cors v1.4.0
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/contrib v0.0.0-20221130124618-7e01895a63f2
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/eclipse/paho.mqtt.golang v1.5.0
|
||||
github.com/elastic/go-sysinfo v1.15.3
|
||||
github.com/gin-contrib/cors v1.7.5
|
||||
github.com/gin-contrib/pprof v1.5.3
|
||||
github.com/gin-gonic/contrib v0.0.0-20250521004450-2b1292699c15
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/gofrs/uuid v4.4.0+incompatible
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3
|
||||
github.com/golang-module/carbon/v2 v2.2.3
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/kellydunn/golang-geo v0.7.0
|
||||
github.com/kerberos-io/joy4 v1.0.64
|
||||
github.com/kerberos-io/onvif v0.0.14
|
||||
github.com/kerberos-io/onvif v1.0.0
|
||||
github.com/minio/minio-go/v6 v6.0.57
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/pion/rtp v1.8.3
|
||||
github.com/pion/webrtc/v3 v3.1.50
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/swaggo/files v1.0.0
|
||||
github.com/swaggo/gin-swagger v1.5.3
|
||||
github.com/swaggo/swag v1.8.9
|
||||
github.com/pion/interceptor v0.1.40
|
||||
github.com/pion/rtp v1.8.19
|
||||
github.com/pion/webrtc/v4 v4.1.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/swaggo/files v1.0.1
|
||||
github.com/swaggo/gin-swagger v1.6.0
|
||||
github.com/swaggo/swag v1.16.4
|
||||
github.com/tevino/abool v1.2.0
|
||||
github.com/yapingcat/gomedia v0.0.0-20231203152327-9078d4068ce7
|
||||
github.com/zaf/g711 v0.0.0-20220109202201-cf0017bf0359
|
||||
go.mongodb.org/mongo-driver v1.7.5
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.46.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
github.com/zaf/g711 v1.4.0
|
||||
go.mongodb.org/mongo-driver v1.17.3
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.0.0-20211129110424-6491aa3bf583 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0-rc.1 // indirect
|
||||
github.com/DataDog/datadog-go v4.8.2+incompatible // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.0.2 // indirect
|
||||
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
|
||||
github.com/DataDog/gostackparse v0.5.0 // indirect
|
||||
github.com/DataDog/sketches-go v1.2.1 // indirect
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beevik/etree v1.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/bluenviron/mediacommon/v2 v2.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.13.2 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/clbanning/mxj v1.8.4 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.7.0 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/elastic/go-windows v1.0.0 // indirect
|
||||
github.com/elgs/gostrgen v0.0.0-20220325073726-0c3e00d082f6 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/elastic/go-windows v1.0.2 // indirect
|
||||
github.com/elgs/gostrgen v0.0.0-20161222160715-9d61ae07eeae // indirect
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/gin-contrib/sse v1.0.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/spec v0.20.4 // indirect
|
||||
github.com/go-openapi/swag v0.19.15 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/pprof v0.0.0-20210423192551-a2663126120b // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/icholy/digest v0.1.23 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.0 // indirect
|
||||
github.com/juju/errors v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/klauspost/cpuid v1.2.3 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/kylelemons/go-gypsy v1.0.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.0 // indirect
|
||||
github.com/minio/sha256-simd v0.1.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/onsi/gomega v1.27.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/philhofer/fwd v1.1.1 // indirect
|
||||
github.com/pion/datachannel v1.5.5 // indirect
|
||||
github.com/pion/dtls/v2 v2.1.5 // indirect
|
||||
github.com/pion/ice/v2 v2.2.12 // indirect
|
||||
github.com/pion/interceptor v0.1.11 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.5 // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/logging v0.2.3 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.12 // indirect
|
||||
github.com/pion/sctp v1.8.5 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.6 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.10 // indirect
|
||||
github.com/pion/stun v0.3.5 // indirect
|
||||
github.com/pion/transport v0.14.1 // indirect
|
||||
github.com/pion/turn/v2 v2.0.8 // indirect
|
||||
github.com/pion/udp v0.1.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/tinylib/msgp v1.1.6 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.5 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.0.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.2 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
github.com/ziutek/mymysql v1.5.4 // indirect
|
||||
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/net v0.19.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.6 // indirect
|
||||
google.golang.org/grpc v1.32.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
golang.org/x/arch v0.16.0 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/grpc v1.72.1 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/ini.v1 v1.42.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
inet.af/netaddr v0.0.0-20220617031823-097006376321 // indirect
|
||||
)
|
||||
|
||||
1946
machinery/go.sum
1946
machinery/go.sum
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -11,47 +12,61 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/onvif"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
|
||||
configService "github.com/kerberos-io/agent/machinery/src/config"
|
||||
"github.com/kerberos-io/agent/machinery/src/routers"
|
||||
"github.com/kerberos-io/agent/machinery/src/utils"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/profiler"
|
||||
)
|
||||
|
||||
var VERSION = "3.0.0"
|
||||
var VERSION = utils.VERSION
|
||||
|
||||
func startTracing(agentKey string, otelEndpoint string) (*trace.TracerProvider, error) {
|
||||
serviceName := "agent-" + agentKey
|
||||
headers := map[string]string{
|
||||
"content-type": "application/json",
|
||||
}
|
||||
|
||||
exporter, err := otlptrace.New(
|
||||
context.Background(),
|
||||
otlptracehttp.NewClient(
|
||||
otlptracehttp.WithEndpoint(otelEndpoint),
|
||||
otlptracehttp.WithHeaders(headers),
|
||||
otlptracehttp.WithInsecure(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating new exporter: %w", err)
|
||||
}
|
||||
|
||||
tracerprovider := trace.NewTracerProvider(
|
||||
trace.WithBatcher(
|
||||
exporter,
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
trace.WithBatchTimeout(trace.DefaultScheduleDelay*time.Millisecond),
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
),
|
||||
trace.WithResource(
|
||||
resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String(serviceName),
|
||||
attribute.String("environment", "develop"),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
otel.SetTracerProvider(tracerprovider)
|
||||
|
||||
return tracerprovider, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// You might be interested in debugging the agent.
|
||||
if os.Getenv("DATADOG_AGENT_ENABLED") == "true" {
|
||||
if os.Getenv("DATADOG_AGENT_K8S_ENABLED") == "true" {
|
||||
tracer.Start()
|
||||
defer tracer.Stop()
|
||||
} else {
|
||||
service := os.Getenv("DATADOG_AGENT_SERVICE")
|
||||
environment := os.Getenv("DATADOG_AGENT_ENVIRONMENT")
|
||||
log.Log.Info("Starting Datadog Agent with service: " + service + " and environment: " + environment)
|
||||
rules := []tracer.SamplingRule{tracer.RateRule(1)}
|
||||
tracer.Start(
|
||||
tracer.WithSamplingRules(rules),
|
||||
tracer.WithService(service),
|
||||
tracer.WithEnv(environment),
|
||||
)
|
||||
defer tracer.Stop()
|
||||
err := profiler.Start(
|
||||
profiler.WithService(service),
|
||||
profiler.WithEnv(environment),
|
||||
profiler.WithProfileTypes(
|
||||
profiler.CPUProfile,
|
||||
profiler.HeapProfile,
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
log.Log.Fatal(err.Error())
|
||||
}
|
||||
defer profiler.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Start the show ;)
|
||||
// We'll parse the flags (named variables), and start the agent.
|
||||
@@ -114,7 +129,7 @@ func main() {
|
||||
|
||||
case "run":
|
||||
{
|
||||
// Print Kerberos.io ASCII art
|
||||
// Print Agent ASCII art
|
||||
utils.PrintASCIIArt()
|
||||
|
||||
// Print the environment variables which include "AGENT_" as prefix.
|
||||
@@ -127,12 +142,29 @@ func main() {
|
||||
configuration.Name = name
|
||||
configuration.Port = port
|
||||
|
||||
// Open this configuration either from Kerberos Agent or Kerberos Factory.
|
||||
// Open this configuration either from Agent or Factory.
|
||||
configService.OpenConfig(configDirectory, &configuration)
|
||||
|
||||
// We will override the configuration with the environment variables
|
||||
configService.OverrideWithEnvironmentVariables(&configuration)
|
||||
|
||||
// Start OpenTelemetry tracing
|
||||
if otelEndpoint := os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT"); otelEndpoint == "" {
|
||||
log.Log.Info("main.Main(): No OpenTelemetry endpoint provided, skipping tracing")
|
||||
} else {
|
||||
log.Log.Info("main.Main(): Starting OpenTelemetry tracing with endpoint: " + otelEndpoint)
|
||||
agentKey := configuration.Config.Key
|
||||
traceProvider, err := startTracing(agentKey, otelEndpoint)
|
||||
if err != nil {
|
||||
log.Log.Error("traceprovider: " + err.Error())
|
||||
}
|
||||
defer func() {
|
||||
if err := traceProvider.Shutdown(context.Background()); err != nil {
|
||||
log.Log.Error("traceprovider: " + err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Printing final configuration
|
||||
utils.PrintConfiguration(&configuration)
|
||||
|
||||
@@ -175,7 +207,7 @@ func main() {
|
||||
HandleBootstrap: make(chan string, 1),
|
||||
}
|
||||
|
||||
go components.Bootstrap(configDirectory, &configuration, &communication, &capture)
|
||||
go components.Bootstrap(ctx, configDirectory, &configuration, &communication, &capture)
|
||||
|
||||
// Start the REST API.
|
||||
routers.StartWebserver(configDirectory, &configuration, &communication, &capture)
|
||||
|
||||
@@ -38,16 +38,16 @@ func (c *Capture) SetBackChannelClient(rtspUrl string) *Golibrtsp {
|
||||
// RTSPClient is a interface that abstracts the RTSP client implementation.
|
||||
type RTSPClient interface {
|
||||
// Connect to the RTSP server.
|
||||
Connect(ctx context.Context) error
|
||||
Connect(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Connect to a backchannel RTSP server.
|
||||
ConnectBackChannel(ctx context.Context) error
|
||||
ConnectBackChannel(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
Start(ctx context.Context, streamType string, queue *packets.Queue, configuration *models.Configuration, communication *models.Communication) error
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
StartBackChannel(ctx context.Context) (err error)
|
||||
StartBackChannel(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Decode a packet into a image.
|
||||
DecodePacket(pkt packets.Packet) (image.YCbCr, error)
|
||||
@@ -59,7 +59,7 @@ type RTSPClient interface {
|
||||
WritePacket(pkt packets.Packet) error
|
||||
|
||||
// Close the connection to the RTSP server.
|
||||
Close() error
|
||||
Close(ctx context.Context) error
|
||||
|
||||
// Get a list of streams from the RTSP server.
|
||||
GetStreams() ([]packets.Stream, error)
|
||||
|
||||
@@ -33,8 +33,11 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/packets"
|
||||
"github.com/pion/rtp"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
var tracer = otel.Tracer("github.com/kerberos-io/agent/machinery/src/capture")
|
||||
|
||||
// Implements the RTSPClient interface.
|
||||
type Golibrtsp struct {
|
||||
RTSPClient
|
||||
@@ -63,7 +66,12 @@ type Golibrtsp struct {
|
||||
AudioG711Index int8
|
||||
AudioG711Media *description.Media
|
||||
AudioG711Forma *format.G711
|
||||
AudioG711Decoder *rtpsimpleaudio.Decoder
|
||||
AudioG711Decoder *rtplpcm.Decoder
|
||||
|
||||
AudioOpusIndex int8
|
||||
AudioOpusMedia *description.Media
|
||||
AudioOpusForma *format.Opus
|
||||
AudioOpusDecoder *rtpsimpleaudio.Decoder
|
||||
|
||||
HasBackChannel bool
|
||||
AudioG711IndexBackChannel int8
|
||||
@@ -76,10 +84,47 @@ type Golibrtsp struct {
|
||||
AudioMPEG4Decoder *rtpmpeg4audio.Decoder
|
||||
|
||||
Streams []packets.Stream
|
||||
|
||||
// FPS calculation fields
|
||||
lastFrameTime time.Time
|
||||
frameTimeBuffer []time.Duration
|
||||
frameBufferSize int
|
||||
frameBufferIndex int
|
||||
fpsMutex sync.Mutex
|
||||
|
||||
// I-frame interval tracking fields
|
||||
packetsSinceLastKeyframe int
|
||||
lastKeyframePacketCount int
|
||||
keyframeIntervals []int
|
||||
keyframeBufferSize int
|
||||
keyframeBufferIndex int
|
||||
keyframeMutex sync.Mutex
|
||||
}
|
||||
|
||||
// Init function
|
||||
var H264FrameDecoder *Decoder
|
||||
var H265FrameDecoder *Decoder
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
// setup H264 -> raw frames decoder
|
||||
H264FrameDecoder, err = newDecoder("H264")
|
||||
if err != nil {
|
||||
log.Log.Error("capture.golibrtsp.init(): " + err.Error())
|
||||
}
|
||||
|
||||
// setup H265 -> raw frames decoder
|
||||
H265FrameDecoder, err = newDecoder("H265")
|
||||
if err != nil {
|
||||
log.Log.Error("capture.golibrtsp.init(): " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Connect to the RTSP server.
|
||||
func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) Connect(ctx context.Context, ctxOtel context.Context) (err error) {
|
||||
|
||||
_, span := tracer.Start(ctxOtel, "Connect")
|
||||
defer span.End()
|
||||
|
||||
transport := gortsplib.TransportTCP
|
||||
g.Client = gortsplib.Client{
|
||||
@@ -107,8 +152,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Iniatlise the mutex.
|
||||
// Initialize the mutex and FPS calculation.
|
||||
g.VideoDecoderMutex = &sync.Mutex{}
|
||||
g.initFPSCalculation()
|
||||
|
||||
// find the H264 media and format
|
||||
var formaH264 *format.H264
|
||||
@@ -124,43 +170,53 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(H264): " + err.Error())
|
||||
} else {
|
||||
// Get SPS from the SDP
|
||||
// Get SPS and PPS from the SDP
|
||||
// Calculate the width and height of the video
|
||||
var sps h264.SPS
|
||||
err = sps.Unmarshal(formaH264.SPS)
|
||||
if err != nil {
|
||||
log.Log.Debug("capture.golibrtsp.Connect(H264): " + err.Error())
|
||||
return
|
||||
errSPS := sps.Unmarshal(formaH264.SPS)
|
||||
// It might be that the SPS is not available yet, so we'll proceed,
|
||||
// but try to fetch it later on.
|
||||
if errSPS != nil {
|
||||
log.Log.Debug("capture.golibrtsp.Connect(H264): " + errSPS.Error())
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH264.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
SPS: []byte{},
|
||||
PPS: []byte{},
|
||||
Width: 0,
|
||||
Height: 0,
|
||||
FPS: 0,
|
||||
IsBackChannel: false,
|
||||
})
|
||||
} else {
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH264.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
SPS: formaH264.SPS,
|
||||
PPS: formaH264.PPS,
|
||||
Width: sps.Width(),
|
||||
Height: sps.Height(),
|
||||
FPS: sps.FPS(),
|
||||
IsBackChannel: false,
|
||||
})
|
||||
}
|
||||
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Name: formaH264.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
SPS: formaH264.SPS,
|
||||
PPS: formaH264.PPS,
|
||||
Width: sps.Width(),
|
||||
Height: sps.Height(),
|
||||
FPS: sps.FPS(),
|
||||
IsBackChannel: false,
|
||||
})
|
||||
|
||||
// Set the index for the video
|
||||
g.VideoH264Index = int8(len(g.Streams)) - 1
|
||||
|
||||
// setup RTP/H264 -> H264 decoder
|
||||
rtpDec, err := formaH264.CreateDecoder()
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(H264): " + err.Error())
|
||||
}
|
||||
g.VideoH264Decoder = rtpDec
|
||||
|
||||
// setup H264 -> raw frames decoder
|
||||
frameDec, err := newDecoder("H264")
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
}
|
||||
g.VideoH264FrameDecoder = frameDec
|
||||
g.VideoH264FrameDecoder = H264FrameDecoder
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,8 +242,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
log.Log.Info("capture.golibrtsp.Connect(H265): " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH265.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
@@ -206,16 +263,11 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
// setup RTP/H265 -> H265 decoder
|
||||
rtpDec, err := formaH265.CreateDecoder()
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(H265): " + err.Error())
|
||||
}
|
||||
g.VideoH265Decoder = rtpDec
|
||||
|
||||
// setup H265 -> raw frames decoder
|
||||
frameDec, err := newDecoder("H265")
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
}
|
||||
g.VideoH265FrameDecoder = frameDec
|
||||
g.VideoH265FrameDecoder = H265FrameDecoder
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,8 +292,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
log.Log.Error("capture.golibrtsp.Connect(G711): " + err.Error())
|
||||
} else {
|
||||
g.AudioG711Decoder = audiortpDec
|
||||
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "PCM_MULAW",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -254,6 +307,42 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Look for audio stream.
|
||||
// find the Opus media and format
|
||||
audioFormaOpus, audioMediOpus := FindOPUS(desc, false)
|
||||
g.AudioOpusMedia = audioMediOpus
|
||||
g.AudioOpusForma = audioFormaOpus
|
||||
if audioMediOpus == nil {
|
||||
log.Log.Debug("capture.golibrtsp.Connect(Opus): " + "audio media not found")
|
||||
} else {
|
||||
// setup a audio media
|
||||
_, err = g.Client.Setup(desc.BaseURL, audioMediOpus, 0, 0)
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(Opus): " + err.Error())
|
||||
} else {
|
||||
// create decoder
|
||||
audiortpDec, err := audioFormaOpus.CreateDecoder()
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(Opus): " + err.Error())
|
||||
} else {
|
||||
g.AudioOpusDecoder = audiortpDec
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "OPUS",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
IsBackChannel: false,
|
||||
})
|
||||
|
||||
// Set the index for the audio
|
||||
g.AudioOpusIndex = int8(len(g.Streams)) - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look for audio stream.
|
||||
// find the AAC media and format
|
||||
audioFormaMPEG4, audioMediMPEG4 := FindMPEG4Audio(desc, false)
|
||||
@@ -268,11 +357,15 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(MPEG4): " + err.Error())
|
||||
} else {
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "AAC",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
IsBackChannel: false,
|
||||
SampleRate: audioFormaMPEG4.Config.SampleRate,
|
||||
Channels: audioFormaMPEG4.Config.ChannelCount,
|
||||
})
|
||||
|
||||
// Set the index for the audio
|
||||
@@ -292,7 +385,11 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (g *Golibrtsp) ConnectBackChannel(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) ConnectBackChannel(ctx context.Context, ctxRunAgent context.Context) (err error) {
|
||||
|
||||
_, span := tracer.Start(ctxRunAgent, "ConnectBackChannel")
|
||||
defer span.End()
|
||||
|
||||
// Transport TCP
|
||||
transport := gortsplib.TransportTCP
|
||||
g.Client = gortsplib.Client{
|
||||
@@ -337,7 +434,9 @@ func (g *Golibrtsp) ConnectBackChannel(ctx context.Context) (err error) {
|
||||
g.HasBackChannel = false
|
||||
} else {
|
||||
g.HasBackChannel = true
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "PCM_MULAW",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -357,8 +456,9 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
// called when a MULAW audio RTP packet arrives
|
||||
if g.AudioG711Media != nil && g.AudioG711Forma != nil {
|
||||
g.Client.OnPacketRTP(g.AudioG711Media, g.AudioG711Forma, func(rtppkt *rtp.Packet) {
|
||||
// decode timestamp
|
||||
pts, ok := g.Client.PacketPTS(g.AudioG711Media, rtppkt)
|
||||
// decode timestamp
|
||||
pts2, ok := g.Client.PacketPTS2(g.AudioG711Media, rtppkt)
|
||||
if !ok {
|
||||
log.Log.Debug("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
return
|
||||
@@ -375,8 +475,10 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
IsKeyFrame: false,
|
||||
Packet: rtppkt,
|
||||
Data: op,
|
||||
Time: pts,
|
||||
CompositionTime: pts,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CompositionTime: pts2,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
Idx: g.AudioG711Index,
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -391,6 +493,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
g.Client.OnPacketRTP(g.AudioMPEG4Media, g.AudioMPEG4Forma, func(rtppkt *rtp.Packet) {
|
||||
// decode timestamp
|
||||
pts, ok := g.Client.PacketPTS(g.AudioMPEG4Media, rtppkt)
|
||||
pts2, ok := g.Client.PacketPTS2(g.AudioMPEG4Media, rtppkt)
|
||||
if !ok {
|
||||
log.Log.Error("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
return
|
||||
@@ -414,8 +517,10 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
IsKeyFrame: false,
|
||||
Packet: rtppkt,
|
||||
Data: enc,
|
||||
Time: pts,
|
||||
CompositionTime: pts,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CompositionTime: pts2,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
Idx: g.AudioG711Index,
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -428,6 +533,9 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
// called when a video RTP packet arrives for H264
|
||||
var filteredAU [][]byte
|
||||
if g.VideoH264Media != nil && g.VideoH264Forma != nil {
|
||||
|
||||
//dtsExtractor := h264.NewDTSExtractor2()
|
||||
|
||||
g.Client.OnPacketRTP(g.VideoH264Media, g.VideoH264Forma, func(rtppkt *rtp.Packet) {
|
||||
|
||||
// This will check if we need to stop the thread,
|
||||
@@ -442,6 +550,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
|
||||
// decode timestamp
|
||||
pts, ok := g.Client.PacketPTS(g.VideoH264Media, rtppkt)
|
||||
pts2, ok := g.Client.PacketPTS2(g.VideoH264Media, rtppkt)
|
||||
if !ok {
|
||||
log.Log.Debug("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
return
|
||||
@@ -468,6 +577,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
// Check if we have a keyframe.
|
||||
nonIDRPresent := false
|
||||
idrPresent := false
|
||||
|
||||
for _, nalu := range au {
|
||||
typ := h264.NALUType(nalu[0] & 0x1F)
|
||||
switch typ {
|
||||
@@ -477,6 +587,37 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
idrPresent = true
|
||||
case h264.NALUTypeNonIDR:
|
||||
nonIDRPresent = true
|
||||
case h264.NALUTypeSPS:
|
||||
// Read out sps
|
||||
var sps h264.SPS
|
||||
errSPS := sps.Unmarshal(nalu)
|
||||
if errSPS == nil {
|
||||
// Debug SPS information
|
||||
g.debugSPSInfo(&sps, streamType)
|
||||
|
||||
// Get width
|
||||
g.Streams[g.VideoH264Index].Width = sps.Width()
|
||||
if streamType == "main" {
|
||||
configuration.Config.Capture.IPCamera.Width = sps.Width()
|
||||
} else if streamType == "sub" {
|
||||
configuration.Config.Capture.IPCamera.SubWidth = sps.Width()
|
||||
}
|
||||
// Get height
|
||||
g.Streams[g.VideoH264Index].Height = sps.Height()
|
||||
if streamType == "main" {
|
||||
configuration.Config.Capture.IPCamera.Height = sps.Height()
|
||||
} else if streamType == "sub" {
|
||||
configuration.Config.Capture.IPCamera.SubHeight = sps.Height()
|
||||
}
|
||||
// Get FPS using enhanced method
|
||||
fps := g.getEnhancedFPS(&sps, g.VideoH264Index)
|
||||
g.Streams[g.VideoH264Index].FPS = fps
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.Start(%s): Final FPS=%.2f", streamType, fps))
|
||||
g.VideoH264Forma.SPS = nalu
|
||||
|
||||
}
|
||||
case h264.NALUTypePPS:
|
||||
g.VideoH264Forma.PPS = nalu
|
||||
}
|
||||
filteredAU = append(filteredAU, nalu)
|
||||
}
|
||||
@@ -496,14 +637,31 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
IsKeyFrame: idrPresent,
|
||||
Packet: rtppkt,
|
||||
Data: enc,
|
||||
Time: pts,
|
||||
CompositionTime: pts,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
CompositionTime: pts2,
|
||||
Idx: g.VideoH264Index,
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
Codec: "H264",
|
||||
}
|
||||
|
||||
// Track keyframe intervals
|
||||
keyframeInterval := g.trackKeyframeInterval(idrPresent)
|
||||
if idrPresent && keyframeInterval > 0 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
gopDuration := float64(keyframeInterval) / g.Streams[g.VideoH265Index].FPS
|
||||
gopSize := int(avgInterval) // Store GOP size in a separate variable
|
||||
g.Streams[g.VideoH264Index].GopSize = gopSize
|
||||
log.Log.Info(fmt.Sprintf("capture.golibrtsp.Start(%s): Keyframe interval=%d packets, Avg=%.1f, GOP=%.1fs, GOPSize=%d",
|
||||
streamType, keyframeInterval, avgInterval, gopDuration, gopSize))
|
||||
preRecording := configuration.Config.Capture.PreRecording
|
||||
if preRecording > 0 && int(gopDuration) > 0 {
|
||||
queue.SetMaxGopCount(int(preRecording)/int(gopDuration) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
pkt.Data = pkt.Data[4:]
|
||||
if pkt.IsKeyFrame {
|
||||
annexbNALUStartCode := func() []byte { return []byte{0x00, 0x00, 0x00, 0x01} }
|
||||
@@ -560,6 +718,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
|
||||
// decode timestamp
|
||||
pts, ok := g.Client.PacketPTS(g.VideoH265Media, rtppkt)
|
||||
pts2, ok := g.Client.PacketPTS2(g.VideoH265Media, rtppkt)
|
||||
if !ok {
|
||||
log.Log.Debug("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
return
|
||||
@@ -623,14 +782,31 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
IsKeyFrame: isRandomAccess,
|
||||
Packet: rtppkt,
|
||||
Data: enc,
|
||||
Time: pts,
|
||||
CompositionTime: pts,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
CompositionTime: pts2,
|
||||
Idx: g.VideoH265Index,
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
Codec: "H265",
|
||||
}
|
||||
|
||||
// Track keyframe intervals for H265
|
||||
keyframeInterval := g.trackKeyframeInterval(isRandomAccess)
|
||||
if isRandomAccess && keyframeInterval > 0 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
gopDuration := float64(keyframeInterval) / g.Streams[g.VideoH265Index].FPS
|
||||
gopSize := int(avgInterval) // Store GOP size in a separate variable
|
||||
g.Streams[g.VideoH265Index].GopSize = gopSize
|
||||
log.Log.Info(fmt.Sprintf("capture.golibrtsp.Start(%s): Keyframe interval=%d packets, Avg=%.1f, GOP=%.1fs, GOPSize=%d",
|
||||
streamType, keyframeInterval, avgInterval, gopDuration, gopSize))
|
||||
preRecording := configuration.Config.Capture.PreRecording
|
||||
if preRecording > 0 && int(gopDuration) > 0 {
|
||||
queue.SetMaxGopCount(int(preRecording)/int(gopDuration) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
queue.WritePacket(pkt)
|
||||
|
||||
// This will check if we need to stop the thread,
|
||||
@@ -673,7 +849,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
}
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
func (g *Golibrtsp) StartBackChannel(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) StartBackChannel(ctx context.Context, ctxRunAgent context.Context) (err error) {
|
||||
log.Log.Info("capture.golibrtsp.StartBackChannel(): started")
|
||||
// Wait for a second, so we can be sure the stream is playing.
|
||||
time.Sleep(1 * time.Second)
|
||||
@@ -755,8 +931,8 @@ func (g *Golibrtsp) DecodePacketRaw(pkt packets.Packet) (image.Gray, error) {
|
||||
}
|
||||
|
||||
// Get a list of streams from the RTSP server.
|
||||
func (j *Golibrtsp) GetStreams() ([]packets.Stream, error) {
|
||||
return j.Streams, nil
|
||||
func (g *Golibrtsp) GetStreams() ([]packets.Stream, error) {
|
||||
return g.Streams, nil
|
||||
}
|
||||
|
||||
// Get a list of video streams from the RTSP server.
|
||||
@@ -782,15 +958,22 @@ func (g *Golibrtsp) GetAudioStreams() ([]packets.Stream, error) {
|
||||
}
|
||||
|
||||
// Close the connection to the RTSP server.
|
||||
func (g *Golibrtsp) Close() error {
|
||||
func (g *Golibrtsp) Close(ctxOtel context.Context) error {
|
||||
|
||||
_, span := tracer.Start(ctxOtel, "Close")
|
||||
defer span.End()
|
||||
|
||||
// Close the demuxer.
|
||||
g.Client.Close()
|
||||
if g.VideoH264Decoder != nil {
|
||||
g.VideoH264FrameDecoder.Close()
|
||||
}
|
||||
if g.VideoH265FrameDecoder != nil {
|
||||
g.VideoH265FrameDecoder.Close()
|
||||
}
|
||||
|
||||
// We will have created the decoders globally, so we don't need to close them here.
|
||||
|
||||
//if g.VideoH264Decoder != nil {
|
||||
// g.VideoH264FrameDecoder.Close()
|
||||
//}
|
||||
//if g.VideoH265FrameDecoder != nil {
|
||||
// g.VideoH265FrameDecoder.Close()
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -948,6 +1131,21 @@ func FindPCMU(desc *description.Session, isBackChannel bool) (*format.G711, *des
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func FindOPUS(desc *description.Session, isBackChannel bool) (*format.Opus, *description.Media) {
|
||||
for _, media := range desc.Medias {
|
||||
if media.IsBackChannel == isBackChannel {
|
||||
for _, forma := range media.Formats {
|
||||
if opus, ok := forma.(*format.Opus); ok {
|
||||
if opus.ChannelCount > 0 {
|
||||
return opus, media
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func FindMPEG4Audio(desc *description.Session, isBackChannel bool) (*format.MPEG4Audio, *description.Media) {
|
||||
for _, media := range desc.Medias {
|
||||
if media.IsBackChannel == isBackChannel {
|
||||
@@ -966,7 +1164,7 @@ func WriteMPEG4Audio(forma *format.MPEG4Audio, aus [][]byte) ([]byte, error) {
|
||||
pkts := make(mpeg4audio.ADTSPackets, len(aus))
|
||||
for i, au := range aus {
|
||||
pkts[i] = &mpeg4audio.ADTSPacket{
|
||||
Type: forma.Config.Type,
|
||||
Type: mpeg4audio.ObjectType(forma.Config.Type),
|
||||
SampleRate: forma.Config.SampleRate,
|
||||
ChannelCount: forma.Config.ChannelCount,
|
||||
AU: au,
|
||||
@@ -978,3 +1176,185 @@ func WriteMPEG4Audio(forma *format.MPEG4Audio, aus [][]byte) ([]byte, error) {
|
||||
}
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
// Initialize FPS calculation buffers
|
||||
func (g *Golibrtsp) initFPSCalculation() {
|
||||
g.frameBufferSize = 30 // Store last 30 frame intervals
|
||||
g.frameTimeBuffer = make([]time.Duration, g.frameBufferSize)
|
||||
g.frameBufferIndex = 0
|
||||
g.lastFrameTime = time.Time{}
|
||||
|
||||
// Initialize I-frame interval tracking
|
||||
g.keyframeBufferSize = 10 // Store last 10 keyframe intervals
|
||||
g.keyframeIntervals = make([]int, g.keyframeBufferSize)
|
||||
g.keyframeBufferIndex = 0
|
||||
g.packetsSinceLastKeyframe = 0
|
||||
g.lastKeyframePacketCount = 0
|
||||
}
|
||||
|
||||
// Calculate FPS from frame timestamps
|
||||
func (g *Golibrtsp) calculateFPSFromTimestamps() float64 {
|
||||
g.fpsMutex.Lock()
|
||||
defer g.fpsMutex.Unlock()
|
||||
|
||||
if g.lastFrameTime.IsZero() {
|
||||
g.lastFrameTime = time.Now()
|
||||
return 0
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
interval := now.Sub(g.lastFrameTime)
|
||||
g.lastFrameTime = now
|
||||
|
||||
// Store the interval
|
||||
g.frameTimeBuffer[g.frameBufferIndex] = interval
|
||||
g.frameBufferIndex = (g.frameBufferIndex + 1) % g.frameBufferSize
|
||||
|
||||
// Calculate average FPS from stored intervals
|
||||
var totalInterval time.Duration
|
||||
validSamples := 0
|
||||
|
||||
for _, interval := range g.frameTimeBuffer {
|
||||
if interval > 0 {
|
||||
totalInterval += interval
|
||||
validSamples++
|
||||
}
|
||||
}
|
||||
|
||||
if validSamples == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
avgInterval := totalInterval / time.Duration(validSamples)
|
||||
if avgInterval == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return float64(time.Second) / float64(avgInterval)
|
||||
}
|
||||
|
||||
// Get enhanced FPS information from SPS with fallback
|
||||
func (g *Golibrtsp) getEnhancedFPS(sps *h264.SPS, streamIndex int8) float64 {
|
||||
// First try to get FPS from SPS
|
||||
spsFPS := sps.FPS()
|
||||
|
||||
// Check if SPS FPS is reasonable (between 1 and 120 fps)
|
||||
if spsFPS > 0 && spsFPS <= 120 {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.getEnhancedFPS(): SPS FPS: %.2f", spsFPS))
|
||||
return spsFPS
|
||||
}
|
||||
|
||||
// Fallback to timestamp-based calculation
|
||||
timestampFPS := g.calculateFPSFromTimestamps()
|
||||
if timestampFPS > 0 && timestampFPS <= 120 {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.getEnhancedFPS(): Timestamp FPS: %.2f", timestampFPS))
|
||||
return timestampFPS
|
||||
}
|
||||
|
||||
// Return SPS FPS even if it seems unreasonable, or default
|
||||
if spsFPS > 0 {
|
||||
return spsFPS
|
||||
}
|
||||
|
||||
return 25.0 // Default fallback FPS
|
||||
}
|
||||
|
||||
// Track I-frame intervals by counting packets between keyframes
|
||||
func (g *Golibrtsp) trackKeyframeInterval(isKeyframe bool) int {
|
||||
g.keyframeMutex.Lock()
|
||||
defer g.keyframeMutex.Unlock()
|
||||
|
||||
g.packetsSinceLastKeyframe++
|
||||
|
||||
if isKeyframe {
|
||||
// Store the interval since the last keyframe
|
||||
if g.lastKeyframePacketCount > 0 {
|
||||
interval := g.packetsSinceLastKeyframe
|
||||
g.keyframeIntervals[g.keyframeBufferIndex] = interval
|
||||
g.keyframeBufferIndex = (g.keyframeBufferIndex + 1) % g.keyframeBufferSize
|
||||
}
|
||||
|
||||
// Reset counter for next interval
|
||||
g.lastKeyframePacketCount = g.packetsSinceLastKeyframe
|
||||
g.packetsSinceLastKeyframe = 0
|
||||
|
||||
return g.lastKeyframePacketCount
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Get average keyframe interval (GOP size)
|
||||
func (g *Golibrtsp) getAverageKeyframeInterval() float64 {
|
||||
g.keyframeMutex.Lock()
|
||||
defer g.keyframeMutex.Unlock()
|
||||
|
||||
var totalInterval int
|
||||
validSamples := 0
|
||||
|
||||
for _, interval := range g.keyframeIntervals {
|
||||
if interval > 0 {
|
||||
totalInterval += interval
|
||||
validSamples++
|
||||
}
|
||||
}
|
||||
|
||||
if validSamples == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return float64(totalInterval) / float64(validSamples)
|
||||
}
|
||||
|
||||
// Calculate GOP size in seconds based on FPS and keyframe interval
|
||||
func (g *Golibrtsp) getGOPDuration(fps float64) float64 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
if avgInterval > 0 && fps > 0 {
|
||||
return avgInterval / fps
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Get detailed SPS timing information
|
||||
func (g *Golibrtsp) getSPSTimingInfo(sps *h264.SPS) (hasVUI bool, timeScale uint32, numUnitsInTick uint32, fps float64) {
|
||||
// Try to get FPS from SPS
|
||||
fps = sps.FPS()
|
||||
|
||||
// Note: The gortsplib SPS struct may not expose VUI parameters directly
|
||||
// but we can still work with the calculated FPS
|
||||
if fps > 0 {
|
||||
hasVUI = true
|
||||
// These are estimated values based on common patterns
|
||||
if fps == 25.0 {
|
||||
timeScale = 50
|
||||
numUnitsInTick = 1
|
||||
} else if fps == 30.0 {
|
||||
timeScale = 60
|
||||
numUnitsInTick = 1
|
||||
} else if fps == 24.0 {
|
||||
timeScale = 48
|
||||
numUnitsInTick = 1
|
||||
} else {
|
||||
// Generic calculation
|
||||
timeScale = uint32(fps * 2)
|
||||
numUnitsInTick = 1
|
||||
}
|
||||
}
|
||||
|
||||
return hasVUI, timeScale, numUnitsInTick, fps
|
||||
}
|
||||
|
||||
// Debug SPS information
|
||||
func (g *Golibrtsp) debugSPSInfo(sps *h264.SPS, streamType string) {
|
||||
hasVUI, timeScale, numUnitsInTick, fps := g.getSPSTimingInfo(sps)
|
||||
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): Width=%d, Height=%d",
|
||||
streamType, sps.Width(), sps.Height()))
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): HasVUI=%t, FPS=%.2f",
|
||||
streamType, hasVUI, fps))
|
||||
|
||||
if hasVUI {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): TimeScale=%d, NumUnitsInTick=%d",
|
||||
streamType, timeScale, numUnitsInTick))
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,8 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/packets"
|
||||
"github.com/kerberos-io/agent/machinery/src/utils"
|
||||
"github.com/yapingcat/gomedia/go-mp4"
|
||||
"github.com/kerberos-io/agent/machinery/src/video"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func CleanupRecordingDirectory(configDirectory string, configuration *models.Configuration) {
|
||||
@@ -63,22 +64,46 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
} else {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(): started")
|
||||
|
||||
recordingPeriod := config.Capture.PostRecording // number of seconds to record.
|
||||
maxRecordingPeriod := config.Capture.MaxLengthRecording // maximum number of seconds to record.
|
||||
preRecording := config.Capture.PreRecording * 1000
|
||||
postRecording := config.Capture.PostRecording * 1000 // number of seconds to record.
|
||||
maxRecordingPeriod := config.Capture.MaxLengthRecording * 1000 // maximum number of seconds to record.
|
||||
|
||||
// Synchronise the last synced time
|
||||
now := time.Now().Unix()
|
||||
startRecording := now
|
||||
timestamp := now
|
||||
// We will calculate the maxRecordingPeriod based on the preRecording and postRecording values.
|
||||
if maxRecordingPeriod == 0 {
|
||||
// If maxRecordingPeriod is not set, we will use the preRecording and postRecording values
|
||||
maxRecordingPeriod = preRecording + postRecording
|
||||
}
|
||||
|
||||
// For continuous and motion based recording we will use a single file.
|
||||
var file *os.File
|
||||
if maxRecordingPeriod < preRecording+postRecording {
|
||||
log.Log.Error("capture.main.HandleRecordStream(): maxRecordingPeriod is less than preRecording + postRecording, this is not allowed. Setting maxRecordingPeriod to preRecording + postRecording.")
|
||||
maxRecordingPeriod = preRecording + postRecording
|
||||
}
|
||||
|
||||
if config.FriendlyName != "" {
|
||||
config.Name = config.FriendlyName
|
||||
}
|
||||
|
||||
// Get the audio and video codec from the camera.
|
||||
// We only expect one audio and one video codec.
|
||||
// If there are multiple audio or video streams, we will use the first one.
|
||||
audioCodec := ""
|
||||
videoCodec := ""
|
||||
audioStreams, _ := rtspClient.GetAudioStreams()
|
||||
videoStreams, _ := rtspClient.GetVideoStreams()
|
||||
if len(audioStreams) > 0 {
|
||||
audioCodec = audioStreams[0].Name
|
||||
config.Capture.IPCamera.SampleRate = audioStreams[0].SampleRate
|
||||
config.Capture.IPCamera.Channels = audioStreams[0].Channels
|
||||
}
|
||||
if len(videoStreams) > 0 {
|
||||
videoCodec = videoStreams[0].Name
|
||||
}
|
||||
|
||||
// Check if continuous recording.
|
||||
if config.Capture.Continuous == "true" {
|
||||
|
||||
//var cws *cacheWriterSeeker
|
||||
var myMuxer *mp4.Movmuxer
|
||||
var mp4Video *video.MP4
|
||||
var videoTrack uint32
|
||||
var audioTrack uint32
|
||||
var name string
|
||||
@@ -86,15 +111,15 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// Do not do anything!
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): start recording")
|
||||
|
||||
now = time.Now().Unix()
|
||||
timestamp = now
|
||||
start := false
|
||||
|
||||
// If continuous record the full length
|
||||
recordingPeriod = maxRecordingPeriod
|
||||
postRecording = maxRecordingPeriod
|
||||
// Recording file name
|
||||
fullName := ""
|
||||
|
||||
var startRecording int64 = 0 // start recording timestamp in milliseconds
|
||||
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
@@ -110,20 +135,21 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
|
||||
nextPkt, cursorError = recordingCursor.ReadPacket()
|
||||
|
||||
now := time.Now().Unix()
|
||||
now := time.Now().UnixMilli()
|
||||
|
||||
if start && // If already recording and current frame is a keyframe and we should stop recording
|
||||
nextPkt.IsKeyFrame && (timestamp+recordingPeriod-now <= 0 || now-startRecording >= maxRecordingPeriod) {
|
||||
nextPkt.IsKeyFrame && (startRecording+postRecording-now <= 0 || now-startRecording > maxRecordingPeriod-500) {
|
||||
|
||||
// Write the last packet
|
||||
ttime := convertPTS(pkt.Time)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
// Write the last packet
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
// Write the last packet
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
@@ -132,21 +158,44 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
}
|
||||
|
||||
// This will write the trailer a well.
|
||||
if err := myMuxer.WriteTrailer(); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
|
||||
// Close mp4
|
||||
mp4Video.Close(&config)
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): recording finished: file save: " + name)
|
||||
|
||||
// Cleanup muxer
|
||||
start = false
|
||||
file.Close()
|
||||
file = nil
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
// Update the name with the duration in milliseconds.
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
"0-0-0-0" + "_" + // region coordinates, we
|
||||
"-1" + "_" + // token
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -193,7 +242,6 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
|
||||
start = true
|
||||
timestamp = now
|
||||
|
||||
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
|
||||
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
|
||||
@@ -204,13 +252,17 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// - Number of changes
|
||||
// - Token
|
||||
|
||||
startRecording = time.Now().Unix() // we mark the current time when the record started.ss
|
||||
s := strconv.FormatInt(startRecording, 10) + "_" +
|
||||
"6" + "-" +
|
||||
"967003" + "_" +
|
||||
config.Name + "_" +
|
||||
"200-200-400-400" + "_0_" +
|
||||
"769"
|
||||
startRecording = pkt.CurrentTime
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" + // start timestamp in seconds
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" + // length of milliseconds
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" + // milliseconds
|
||||
config.Name + "_" + // device name
|
||||
"0-0-0-0" + "_" + // region coordinates, we will not use this for continuous recording
|
||||
"0" + "_" + // token
|
||||
"0" + "_" //+ // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
@@ -218,49 +270,61 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// Running...
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): recording started")
|
||||
|
||||
file, err = os.Create(fullName)
|
||||
if err == nil {
|
||||
//cws = newCacheWriterSeeker(4096)
|
||||
myMuxer, _ = mp4.CreateMp4Muxer(file)
|
||||
// We choose between H264 and H265
|
||||
if pkt.Codec == "H264" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H264)
|
||||
} else if pkt.Codec == "H265" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H265)
|
||||
}
|
||||
// For an MP4 container, AAC is the only audio codec supported.
|
||||
audioTrack = myMuxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
|
||||
} else {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
// Get width and height from the camera.
|
||||
width := configuration.Config.Capture.IPCamera.Width
|
||||
height := configuration.Config.Capture.IPCamera.Height
|
||||
|
||||
// Get SPS and PPS NALUs from the camera.
|
||||
spsNALUS := configuration.Config.Capture.IPCamera.SPSNALUs
|
||||
ppsNALUS := configuration.Config.Capture.IPCamera.PPSNALUs
|
||||
vpsNALUS := configuration.Config.Capture.IPCamera.VPSNALUs
|
||||
|
||||
// Create a video file, and set the dimensions.
|
||||
mp4Video = video.NewMP4(fullName, spsNALUS, ppsNALUS, vpsNALUS)
|
||||
mp4Video.SetWidth(width)
|
||||
mp4Video.SetHeight(height)
|
||||
|
||||
if videoCodec == "H264" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H264")
|
||||
} else if videoCodec == "H265" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H265")
|
||||
}
|
||||
if audioCodec == "AAC" {
|
||||
audioTrack = mp4Video.AddAudioTrack("AAC")
|
||||
} else if audioCodec == "PCM_MULAW" {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
|
||||
ttime := convertPTS(pkt.Time)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
// TODO: transcode to AAC, some work to do..
|
||||
// We might need to use ffmpeg to transcode the audio to AAC.
|
||||
// For now we will skip the audio track.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
}
|
||||
|
||||
recordingStatus = "started"
|
||||
|
||||
} else if start {
|
||||
ttime := convertPTS(pkt.Time)
|
||||
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
// New method using new mp4 library
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
@@ -269,7 +333,6 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pkt = nextPkt
|
||||
}
|
||||
|
||||
@@ -277,21 +340,43 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// If this happens we need to check to properly close the recording.
|
||||
if cursorError != nil {
|
||||
if recordingStatus == "started" {
|
||||
// This will write the trailer a well.
|
||||
if err := myMuxer.WriteTrailer(); err != nil {
|
||||
log.Log.Error(err.Error())
|
||||
}
|
||||
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): Recording finished: file save: " + name)
|
||||
|
||||
// Cleanup muxer
|
||||
start = false
|
||||
file.Close()
|
||||
file = nil
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
// Update the name with the duration in milliseconds.
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
"0-0-0-0" + "_" + // region coordinates, we
|
||||
"-1" + "_" + // token
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -329,33 +414,44 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): Start motion based recording ")
|
||||
|
||||
var lastDuration time.Duration
|
||||
var lastRecordingTime int64
|
||||
var lastRecordingTime int64 = 0 // last recording timestamp in milliseconds
|
||||
var displayTime int64 = 0 // display time in milliseconds
|
||||
|
||||
//var cws *cacheWriterSeeker
|
||||
var myMuxer *mp4.Movmuxer
|
||||
var videoTrack uint32
|
||||
var audioTrack uint32
|
||||
|
||||
for motion := range communication.HandleMotion {
|
||||
|
||||
timestamp = time.Now().Unix()
|
||||
startRecording = time.Now().Unix() // we mark the current time when the record started.
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var nextPkt packets.Packet
|
||||
recordingCursor := queue.Oldest() // Start from the latest packet in the queue)
|
||||
|
||||
// If we have prerecording we will substract the number of seconds.
|
||||
// Taking into account FPS = GOP size (Keyfram interval)
|
||||
if config.Capture.PreRecording > 0 {
|
||||
now := time.Now().UnixMilli()
|
||||
motionTimestamp := now
|
||||
|
||||
// Might be that recordings are coming short after each other.
|
||||
// Therefore we do some math with the current time and the last recording time.
|
||||
start := false
|
||||
|
||||
timeBetweenNowAndLastRecording := startRecording - lastRecordingTime
|
||||
if timeBetweenNowAndLastRecording > int64(config.Capture.PreRecording) {
|
||||
startRecording = startRecording - int64(config.Capture.PreRecording) + 1
|
||||
} else {
|
||||
startRecording = startRecording - timeBetweenNowAndLastRecording
|
||||
}
|
||||
if cursorError == nil {
|
||||
pkt, cursorError = recordingCursor.ReadPacket()
|
||||
}
|
||||
|
||||
displayTime = pkt.CurrentTime
|
||||
startRecording := pkt.CurrentTime
|
||||
|
||||
// We have more packets in the queue (which might still be older than where we close the previous recording).
|
||||
// In that case we will use the last recording time to determine the start time of the recording, otherwise
|
||||
// we will have duplicate frames in the recording.
|
||||
if startRecording < lastRecordingTime {
|
||||
displayTime = lastRecordingTime
|
||||
startRecording = lastRecordingTime
|
||||
}
|
||||
|
||||
// If startRecording is 0, we will continue as it might be we are in a state of restarting the agent.
|
||||
if startRecording == 0 {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): startRecording is 0, we will continue as it might be we are in a state of restarting the agent.")
|
||||
continue
|
||||
}
|
||||
|
||||
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
|
||||
@@ -367,43 +463,56 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// - Number of changes
|
||||
// - Token
|
||||
|
||||
s := strconv.FormatInt(startRecording, 10) + "_" +
|
||||
"6" + "-" +
|
||||
"967003" + "_" +
|
||||
config.Name + "_" +
|
||||
"200-200-400-400" + "_" +
|
||||
strconv.Itoa(numberOfChanges) + "_" +
|
||||
"769"
|
||||
displayTimeSeconds := displayTime / 1000 // convert to seconds
|
||||
displayTimeMilliseconds := displayTime % 1000 // convert to milliseconds
|
||||
motionRectangleString := "0-0-0-0"
|
||||
if motion.Rectangle.X != 0 || motion.Rectangle.Y != 0 ||
|
||||
motion.Rectangle.Width != 0 || motion.Rectangle.Height != 0 {
|
||||
motionRectangleString = strconv.Itoa(motion.Rectangle.X) + "-" + strconv.Itoa(motion.Rectangle.Y) + "-" +
|
||||
strconv.Itoa(motion.Rectangle.Width) + "-" + strconv.Itoa(motion.Rectangle.Height)
|
||||
}
|
||||
|
||||
// Get the number of changes from the motion detection.
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
|
||||
s := strconv.FormatInt(displayTimeSeconds, 10) + "_" + // start timestamp in seconds
|
||||
strconv.Itoa(len(strconv.FormatInt(displayTimeMilliseconds, 10))) + "-" + // length of milliseconds
|
||||
strconv.FormatInt(displayTimeMilliseconds, 10) + "_" + // milliseconds
|
||||
config.Name + "_" + // device name
|
||||
motionRectangleString + "_" + // region coordinates, we will not use this for continuous recording
|
||||
strconv.Itoa(numberOfChanges) + "_" + // number of changes
|
||||
"0" // + "_" + // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
name := s + ".mp4"
|
||||
fullName := configDirectory + "/data/recordings/" + name
|
||||
|
||||
// Running...
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): recording started")
|
||||
file, _ = os.Create(fullName)
|
||||
myMuxer, _ = mp4.CreateMp4Muxer(file)
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): recording started (" + name + ")" + " at " + strconv.FormatInt(displayTimeSeconds, 10) + " unix")
|
||||
|
||||
// Check which video codec we need to use.
|
||||
videoSteams, _ := rtspClient.GetVideoStreams()
|
||||
for _, stream := range videoSteams {
|
||||
if stream.Name == "H264" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H264)
|
||||
} else if stream.Name == "H265" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H265)
|
||||
}
|
||||
// Get width and height from the camera.
|
||||
width := configuration.Config.Capture.IPCamera.Width
|
||||
height := configuration.Config.Capture.IPCamera.Height
|
||||
|
||||
// Get SPS and PPS NALUs from the camera.
|
||||
spsNALUS := configuration.Config.Capture.IPCamera.SPSNALUs
|
||||
ppsNALUS := configuration.Config.Capture.IPCamera.PPSNALUs
|
||||
vpsNALUS := configuration.Config.Capture.IPCamera.VPSNALUs
|
||||
|
||||
// Create a video file, and set the dimensions.
|
||||
mp4Video := video.NewMP4(fullName, spsNALUS, ppsNALUS, vpsNALUS)
|
||||
mp4Video.SetWidth(width)
|
||||
mp4Video.SetHeight(height)
|
||||
|
||||
if videoCodec == "H264" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H264")
|
||||
} else if videoCodec == "H265" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H265")
|
||||
}
|
||||
// For an MP4 container, AAC is the only audio codec supported.
|
||||
audioTrack = myMuxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
|
||||
start := false
|
||||
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var nextPkt packets.Packet
|
||||
recordingCursor := queue.DelayedGopCount(int(config.Capture.PreRecording + 1))
|
||||
|
||||
if cursorError == nil {
|
||||
pkt, cursorError = recordingCursor.ReadPacket()
|
||||
if audioCodec == "AAC" {
|
||||
audioTrack = mp4Video.AddAudioTrack("AAC")
|
||||
} else if audioCodec == "PCM_MULAW" {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
|
||||
for cursorError == nil {
|
||||
@@ -413,69 +522,91 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + cursorError.Error())
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
now = time.Now().UnixMilli()
|
||||
select {
|
||||
case motion := <-communication.HandleMotion:
|
||||
timestamp = now
|
||||
motionTimestamp = now
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): motion detected while recording. Expanding recording.")
|
||||
numberOfChanges = motion.NumberOfChanges
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): Received message with recording data, detected changes to save: " + strconv.Itoa(numberOfChanges))
|
||||
default:
|
||||
}
|
||||
|
||||
if (timestamp+recordingPeriod-now < 0 || now-startRecording > maxRecordingPeriod) && nextPkt.IsKeyFrame {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): closing recording (timestamp: " + strconv.FormatInt(timestamp, 10) + ", recordingPeriod: " + strconv.FormatInt(recordingPeriod, 10) + ", now: " + strconv.FormatInt(now, 10) + ", startRecording: " + strconv.FormatInt(startRecording, 10) + ", maxRecordingPeriod: " + strconv.FormatInt(maxRecordingPeriod, 10))
|
||||
if (motionTimestamp+postRecording-now < 0 || now-startRecording > maxRecordingPeriod-500) && nextPkt.IsKeyFrame {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): timestamp+postRecording-now < 0 - " + strconv.FormatInt(motionTimestamp+postRecording-now, 10) + " < 0")
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): now-startRecording > maxRecordingPeriod-500 - " + strconv.FormatInt(now-startRecording, 10) + " > " + strconv.FormatInt(maxRecordingPeriod-500, 10))
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): closing recording (timestamp: " + strconv.FormatInt(motionTimestamp, 10) + ", postRecording: " + strconv.FormatInt(postRecording, 10) + ", now: " + strconv.FormatInt(now, 10) + ", startRecording: " + strconv.FormatInt(startRecording, 10) + ", maxRecordingPeriod: " + strconv.FormatInt(maxRecordingPeriod, 10))
|
||||
break
|
||||
}
|
||||
if pkt.IsKeyFrame && !start && pkt.Time >= lastDuration {
|
||||
if pkt.IsKeyFrame && !start && pkt.CurrentTime >= startRecording {
|
||||
// We start the recording if we have a keyframe and the last duration is 0 or less than the current packet time.
|
||||
// It could be start we start from the beginning of the recording.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): write frames")
|
||||
start = true
|
||||
}
|
||||
if start {
|
||||
|
||||
ttime := convertPTS(pkt.Time)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): add video sample")
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): add audio sample")
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
// TODO: transcode to AAC, some work to do..
|
||||
// We might need to use ffmpeg to transcode the audio to AAC.
|
||||
// For now we will skip the audio track.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
}
|
||||
|
||||
// We will sync to file every keyframe.
|
||||
if pkt.IsKeyFrame {
|
||||
err := file.Sync()
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
} else {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): synced file " + name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pkt = nextPkt
|
||||
}
|
||||
|
||||
// This will write the trailer a well.
|
||||
myMuxer.WriteTrailer()
|
||||
// Update the last duration and last recording time.
|
||||
// This is used to determine if we need to start a new recording.
|
||||
lastRecordingTime = pkt.CurrentTime
|
||||
|
||||
// This will close the recording and write the last packet.
|
||||
mp4Video.Close(&config)
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): file save: " + name)
|
||||
|
||||
lastDuration = pkt.Time
|
||||
lastRecordingTime = time.Now().Unix()
|
||||
file.Close()
|
||||
file = nil
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name with the duration in milliseconds.
|
||||
s := strconv.FormatInt(displayTimeSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(displayTimeMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(displayTimeMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
motionRectangleString + "_" +
|
||||
strconv.Itoa(numberOfChanges) + "_" + // number of changes
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -523,6 +654,10 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func VerifyCamera(c *gin.Context) {
|
||||
|
||||
// Start OpenTelemetry tracing
|
||||
ctxVerifyCamera, span := tracer.Start(context.Background(), "VerifyCamera", trace.WithSpanKind(trace.SpanKindServer))
|
||||
defer span.End()
|
||||
|
||||
var cameraStreams models.CameraStreams
|
||||
err := c.BindJSON(&cameraStreams)
|
||||
|
||||
@@ -548,12 +683,11 @@ func VerifyCamera(c *gin.Context) {
|
||||
Url: rtspUrl,
|
||||
}
|
||||
|
||||
err := rtspClient.Connect(ctx)
|
||||
err := rtspClient.Connect(ctx, ctxVerifyCamera)
|
||||
if err == nil {
|
||||
|
||||
// Get the streams from the rtsp client.
|
||||
streams, _ := rtspClient.GetStreams()
|
||||
|
||||
videoIdx := -1
|
||||
audioIdx := -1
|
||||
for i, stream := range streams {
|
||||
@@ -564,7 +698,7 @@ func VerifyCamera(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
err := rtspClient.Close()
|
||||
err := rtspClient.Close(ctxVerifyCamera)
|
||||
if err == nil {
|
||||
if videoIdx > -1 {
|
||||
c.JSON(200, models.APIResponse{
|
||||
@@ -611,7 +745,9 @@ func Base64Image(captureDevice *Capture, communication *models.Communication) st
|
||||
|
||||
// We'll try to have a keyframe, if not we'll return an empty string.
|
||||
var encodedImage string
|
||||
for {
|
||||
// Try for 3 times in a row.
|
||||
count := 0
|
||||
for count < 3 {
|
||||
if queue != nil && cursor != nil && rtspClient != nil {
|
||||
pkt, err := cursor.ReadPacket()
|
||||
if err == nil {
|
||||
@@ -624,8 +760,10 @@ func Base64Image(captureDevice *Capture, communication *models.Communication) st
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
encodedImage = base64.StdEncoding.EncodeToString(bytes)
|
||||
break
|
||||
} else {
|
||||
count++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
@@ -652,15 +790,22 @@ func JpegImage(captureDevice *Capture, communication *models.Communication) imag
|
||||
|
||||
// We'll try to have a keyframe, if not we'll return an empty string.
|
||||
var image image.YCbCr
|
||||
for {
|
||||
// Try for 3 times in a row.
|
||||
count := 0
|
||||
for count < 3 {
|
||||
if queue != nil && cursor != nil && rtspClient != nil {
|
||||
pkt, err := cursor.ReadPacket()
|
||||
if err == nil {
|
||||
if !pkt.IsKeyFrame {
|
||||
continue
|
||||
}
|
||||
image, _ = (*rtspClient).DecodePacket(pkt)
|
||||
break
|
||||
image, err = (*rtspClient).DecodePacket(pkt)
|
||||
if err != nil {
|
||||
count++
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break
|
||||
@@ -672,3 +817,7 @@ func JpegImage(captureDevice *Capture, communication *models.Communication) imag
|
||||
func convertPTS(v time.Duration) uint64 {
|
||||
return uint64(v.Milliseconds())
|
||||
}
|
||||
|
||||
/*func convertPTS2(v int64) uint64 {
|
||||
return uint64(v) / 100
|
||||
}*/
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/dromara/carbon/v2"
|
||||
"github.com/elastic/go-sysinfo"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/golang-module/carbon/v2"
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
|
||||
@@ -131,7 +131,7 @@ func HandleUpload(configDirectory string, configuration *models.Configuration, c
|
||||
log.Log.Error("HandleUpload: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
delay = 20 * time.Second // slow down
|
||||
delay = 5 * time.Second // slow down
|
||||
if err != nil {
|
||||
log.Log.Error("HandleUpload: " + err.Error())
|
||||
}
|
||||
@@ -229,15 +229,17 @@ func HandleHeartBeat(configuration *models.Configuration, communication *models.
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
config := configuration.Config
|
||||
|
||||
// Get a pull point address
|
||||
var pullPointAddress string
|
||||
if config.Capture.IPCamera.ONVIFXAddr != "" {
|
||||
kerberosAgentVersion := utils.VERSION
|
||||
|
||||
// Create a loop pull point address, which we will use to retrieve async events
|
||||
// As you'll read below camera manufactures are having different implementations of events.
|
||||
var pullPointAddressLoopState string
|
||||
if configuration.Config.Capture.IPCamera.ONVIFXAddr != "" {
|
||||
cameraConfiguration := configuration.Config.Capture.IPCamera
|
||||
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
if err == nil {
|
||||
pullPointAddress, err = onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
pullPointAddressLoopState, err = onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
}
|
||||
@@ -251,7 +253,6 @@ loop:
|
||||
|
||||
// We'll check ONVIF capabilitites anyhow.. Verify if we have PTZ, presets and inputs/outputs.
|
||||
// For the inputs we will keep track of a the inputs and outputs state.
|
||||
|
||||
onvifEnabled := "false"
|
||||
onvifZoom := "false"
|
||||
onvifPanTilt := "false"
|
||||
@@ -262,6 +263,7 @@ loop:
|
||||
cameraConfiguration := configuration.Config.Capture.IPCamera
|
||||
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
if err == nil {
|
||||
// We will try to retrieve the PTZ configurations from the device.
|
||||
onvifEnabled = "true"
|
||||
configurations, err := onvif.GetPTZConfigurationsFromDevice(device)
|
||||
if err == nil {
|
||||
@@ -296,8 +298,22 @@ loop:
|
||||
|
||||
// We will also fetch some events, to know the status of the inputs and outputs.
|
||||
// More event types might be added.
|
||||
if pullPointAddress != "" {
|
||||
events, err := onvif.GetEventMessages(device, pullPointAddress)
|
||||
// -- We have two differen pull point subscriptions, one for the initials events and one for the loop.
|
||||
// -- Some cameras do send recurrent events, others don't.
|
||||
// a. For some older Hikvision models, events are send repeatedly (if input is high) with the strong state (set to false).
|
||||
// - In this scenarion we are using a polling mechanism and set a timestamp to understand if the input is still active.
|
||||
// b. For some newer Hikvision models, Avigilon, events are send only once (if state is set active).
|
||||
// - In this scenario we are creating a new subscription to retrieve the initial (current) state of the inputs and outputs.
|
||||
|
||||
// Get a new pull point address, to get the initiatal state of the inputs and outputs.
|
||||
pullPointAddressInitialState, err := onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
}
|
||||
if pullPointAddressInitialState != "" {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): Fetching events from pullPointAddressInitialState")
|
||||
events, err := onvif.GetEventMessages(device, pullPointAddressInitialState)
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): Completed fetching events from pullPointAddressInitialState")
|
||||
if err == nil && len(events) > 0 {
|
||||
onvifEventsList, err = json.Marshal(events)
|
||||
if err != nil {
|
||||
@@ -307,9 +323,28 @@ loop:
|
||||
} else if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while getting events: " + err.Error())
|
||||
onvifEventsList = []byte("[]")
|
||||
// Try to unsubscribe and subscribe again.
|
||||
onvif.UnsubscribePullPoint(device, pullPointAddress)
|
||||
pullPointAddress, err = onvif.CreatePullPointSubscription(device)
|
||||
} else if len(events) == 0 {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): no events found.")
|
||||
onvifEventsList = []byte("[]")
|
||||
}
|
||||
onvif.UnsubscribePullPoint(device, pullPointAddressInitialState)
|
||||
}
|
||||
|
||||
// We do a second run an a long-living subscription to get the events asynchronously.
|
||||
if pullPointAddressLoopState != "" {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): Fetching events from pullPointAddressLoopState")
|
||||
events, err := onvif.GetEventMessages(device, pullPointAddressLoopState)
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): Completed fetching events from pullPointAddressLoopState")
|
||||
if err == nil && len(events) > 0 {
|
||||
onvifEventsList, err = json.Marshal(events)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while marshalling events: " + err.Error())
|
||||
onvifEventsList = []byte("[]")
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while getting events: " + err.Error())
|
||||
onvifEventsList = []byte("[]")
|
||||
pullPointAddressLoopState, err = onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
}
|
||||
@@ -319,15 +354,55 @@ loop:
|
||||
}
|
||||
} else {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): no pull point address found.")
|
||||
onvifEventsList = []byte("[]")
|
||||
|
||||
// Try again
|
||||
pullPointAddress, err = onvif.CreatePullPointSubscription(device)
|
||||
pullPointAddressLoopState, err = onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// It also might be that events are not supported by the camera, in that case we will try to get the digital inputs and outputs.
|
||||
// Through the `device` API, the `GetDigitalInputs` and `GetDigitalOutputs` functions are called.
|
||||
// The disadvantage of this approach is that we don't have the state of the inputs and outputs (which is crazy..)
|
||||
|
||||
if pullPointAddressInitialState == "" && pullPointAddressLoopState == "" {
|
||||
var events []onvif.ONVIFEvents
|
||||
outputs, err := onvif.GetRelayOutputs(device)
|
||||
if err != nil {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): error while getting relay outputs: " + err.Error())
|
||||
} else {
|
||||
for _, output := range outputs.RelayOutputs {
|
||||
event := onvif.ONVIFEvents{
|
||||
Key: string(output.Token),
|
||||
Value: "false",
|
||||
Type: "output",
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
|
||||
inputs, err := onvif.GetDigitalInputs(device)
|
||||
if err != nil {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): error while getting digital inputs: " + err.Error())
|
||||
} else {
|
||||
for _, input := range inputs.DigitalInputs {
|
||||
event := onvif.ONVIFEvents{
|
||||
Key: string(input.Token),
|
||||
Value: "false",
|
||||
Type: "input",
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal the events
|
||||
onvifEventsList, err = json.Marshal(events)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while marshalling events: " + err.Error())
|
||||
onvifEventsList = []byte("[]")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while connecting to ONVIF device: " + err.Error())
|
||||
onvifPresetsList = []byte("[]")
|
||||
@@ -395,6 +470,16 @@ loop:
|
||||
hasBackChannel = "true"
|
||||
}
|
||||
|
||||
hub_encryption := "false"
|
||||
if config.HubEncryption == "true" {
|
||||
hub_encryption = "true"
|
||||
}
|
||||
|
||||
e2e_encryption := "false"
|
||||
if config.Encryption != nil && config.Encryption.Enabled == "true" {
|
||||
e2e_encryption = "true"
|
||||
}
|
||||
|
||||
// We will formated the uptime to a human readable format
|
||||
// this will be used on Kerberos Hub: Uptime -> 1 day and 2 hours.
|
||||
uptimeFormatted := uptimeStart.Format("2006-01-02 15:04:05")
|
||||
@@ -411,7 +496,9 @@ loop:
|
||||
|
||||
var object = fmt.Sprintf(`{
|
||||
"key" : "%s",
|
||||
"version" : "3.0.0",
|
||||
"version" : "%s",
|
||||
"hub_encryption": "%s",
|
||||
"e2e_encryption": "%s",
|
||||
"release" : "%s",
|
||||
"cpuid" : "%s",
|
||||
"clouduser" : "%s",
|
||||
@@ -447,12 +534,11 @@ loop:
|
||||
"docker" : true,
|
||||
"kios" : false,
|
||||
"raspberrypi" : false
|
||||
}`, config.Key, system.Version, system.CPUId, username, key, name, isEnterprise, system.Hostname, system.Architecture, system.TotalMemory, system.UsedMemory, system.FreeMemory, system.ProcessUsedMemory, macs, ips, "0", "0", "0", uptimeString, boottimeString, config.HubSite, onvifEnabled, onvifZoom, onvifPanTilt, onvifPresets, onvifPresetsList, onvifEventsList, cameraConnected, hasBackChannel)
|
||||
}`, config.Key, kerberosAgentVersion, hub_encryption, e2e_encryption, system.Version, system.CPUId, username, key, name, isEnterprise, system.Hostname, system.Architecture, system.TotalMemory, system.UsedMemory, system.FreeMemory, system.ProcessUsedMemory, macs, ips, "0", "0", "0", uptimeString, boottimeString, config.HubSite, onvifEnabled, onvifZoom, onvifPanTilt, onvifPresets, onvifPresetsList, onvifEventsList, cameraConnected, hasBackChannel)
|
||||
|
||||
// Get the private key to encrypt the data using symmetric encryption: AES.
|
||||
HubEncryption := config.HubEncryption
|
||||
privateKey := config.HubPrivateKey
|
||||
if HubEncryption == "true" && privateKey != "" {
|
||||
if hub_encryption == "true" && privateKey != "" {
|
||||
// Encrypt the data using AES.
|
||||
encrypted, err := encryption.AesEncrypt([]byte(object), privateKey)
|
||||
if err != nil {
|
||||
@@ -499,7 +585,7 @@ loop:
|
||||
|
||||
var object = fmt.Sprintf(`{
|
||||
"key" : "%s",
|
||||
"version" : "3.0.0",
|
||||
"version" : "%s",
|
||||
"release" : "%s",
|
||||
"cpuid" : "%s",
|
||||
"clouduser" : "%s",
|
||||
@@ -533,7 +619,7 @@ loop:
|
||||
"docker" : true,
|
||||
"kios" : false,
|
||||
"raspberrypi" : false
|
||||
}`, config.Key, system.Version, system.CPUId, username, key, name, isEnterprise, system.Hostname, system.Architecture, system.TotalMemory, system.UsedMemory, system.FreeMemory, system.ProcessUsedMemory, macs, ips, "0", "0", "0", uptimeString, boottimeString, config.HubSite, onvifEnabled, onvifZoom, onvifPanTilt, onvifPresets, onvifPresetsList, cameraConnected)
|
||||
}`, config.Key, kerberosAgentVersion, system.Version, system.CPUId, username, key, name, isEnterprise, system.Hostname, system.Architecture, system.TotalMemory, system.UsedMemory, system.FreeMemory, system.ProcessUsedMemory, macs, ips, "0", "0", "0", uptimeString, boottimeString, config.HubSite, onvifEnabled, onvifZoom, onvifPanTilt, onvifPresets, onvifPresetsList, cameraConnected)
|
||||
|
||||
var jsonStr = []byte(object)
|
||||
buffy := bytes.NewBuffer(jsonStr)
|
||||
@@ -561,11 +647,11 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
if pullPointAddress != "" {
|
||||
if pullPointAddressLoopState != "" {
|
||||
cameraConfiguration := configuration.Config.Capture.IPCamera
|
||||
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
if err == nil {
|
||||
onvif.UnsubscribePullPoint(device, pullPointAddress)
|
||||
if err != nil {
|
||||
onvif.UnsubscribePullPoint(device, pullPointAddressLoopState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -586,6 +672,7 @@ func HandleLiveStreamSD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
// Check if we need to enable the live stream
|
||||
if config.Capture.Liveview != "false" {
|
||||
|
||||
deviceId := config.Key
|
||||
hubKey := ""
|
||||
if config.Cloud == "s3" && config.S3 != nil && config.S3.Publickey != "" {
|
||||
hubKey = config.S3.Publickey
|
||||
@@ -620,24 +707,77 @@ func HandleLiveStreamSD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
img, err := rtspClient.DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
encoded := base64.StdEncoding.EncodeToString(bytes)
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["image"] = encoded
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: configuration.Config.Key,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
chunking := config.Capture.LiveviewChunking
|
||||
|
||||
if chunking == "true" {
|
||||
|
||||
// Split encoded image into chunks of 2kb
|
||||
// This is to prevent the MQTT message to be too large.
|
||||
// By default, bytes are not encoded to base64 here; you are splitting the raw JPEG/PNG bytes.
|
||||
// However, in MQTT and web contexts, binary data may not be handled well, so base64 is often used.
|
||||
// To avoid base64 encoding, just send the raw []byte chunks as you do here.
|
||||
// If you want to avoid base64, make sure the receiver can handle binary payloads.
|
||||
|
||||
chunkSize := 25 * 1024 // 25KB chunks
|
||||
var chunks [][]byte
|
||||
for i := 0; i < len(bytes); i += chunkSize {
|
||||
end := i + chunkSize
|
||||
if end > len(bytes) {
|
||||
end = len(bytes)
|
||||
}
|
||||
chunk := bytes[i:end]
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
log.Log.Infof("cloud.HandleLiveStreamSD(): Sending %d chunks of size %d bytes.", len(chunks), chunkSize)
|
||||
|
||||
timestamp := time.Now().Unix()
|
||||
for i, chunk := range chunks {
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["id"] = timestamp
|
||||
valueMap["chunk"] = chunk
|
||||
valueMap["chunkIndex"] = i
|
||||
valueMap["chunkSize"] = chunkSize
|
||||
valueMap["chunkCount"] = len(chunks)
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Version: "v1.0.0",
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: deviceId,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey+"/"+deviceId, 1, false, payload)
|
||||
log.Log.Infof("cloud.HandleLiveStreamSD(): sent chunk %d/%d to MQTT topic kerberos/hub/%s/%s", i+1, len(chunks), hubKey, deviceId)
|
||||
time.Sleep(33 * time.Millisecond) // Sleep to avoid flooding the MQTT broker with messages
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["image"] = bytes
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: configuration.Config.Key,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
time.Sleep(1000 * time.Millisecond) // Sleep to avoid flooding the MQTT broker with messages
|
||||
}
|
||||
|
||||
} else {
|
||||
@@ -681,6 +821,78 @@ func HandleLiveStreamHD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
}
|
||||
}
|
||||
|
||||
func HandleRealtimeProcessing(processingCursor *packets.QueueCursor, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, rtspClient capture.RTSPClient) {
|
||||
|
||||
log.Log.Debug("cloud.RealtimeProcessing(): started")
|
||||
|
||||
config := configuration.Config
|
||||
|
||||
// If offline made is enabled, we will stop the thread.
|
||||
if config.Offline == "true" {
|
||||
log.Log.Debug("cloud.RealtimeProcessing(): stopping as Offline is enabled.")
|
||||
} else {
|
||||
|
||||
// Check if we need to enable the realtime processing
|
||||
if config.RealtimeProcessing == "true" {
|
||||
|
||||
hubKey := ""
|
||||
if config.Cloud == "s3" && config.S3 != nil && config.S3.Publickey != "" {
|
||||
hubKey = config.S3.Publickey
|
||||
} else if config.Cloud == "kstorage" && config.KStorage != nil && config.KStorage.CloudKey != "" {
|
||||
hubKey = config.KStorage.CloudKey
|
||||
}
|
||||
// This is the new way ;)
|
||||
if config.HubKey != "" {
|
||||
hubKey = config.HubKey
|
||||
}
|
||||
|
||||
// We will publish the keyframes to the MQTT topic.
|
||||
realtimeProcessingTopic := "kerberos/keyframes/" + hubKey
|
||||
if config.RealtimeProcessingTopic != "" {
|
||||
realtimeProcessingTopic = config.RealtimeProcessingTopic
|
||||
}
|
||||
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
|
||||
for cursorError == nil {
|
||||
pkt, cursorError = processingCursor.ReadPacket()
|
||||
if len(pkt.Data) == 0 || !pkt.IsKeyFrame {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Log.Info("cloud.RealtimeProcessing(): Sending base64 encoded images to MQTT.")
|
||||
img, err := rtspClient.DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
encoded := base64.StdEncoding.EncodeToString(bytes)
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["image"] = encoded
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Action: "receive-keyframe",
|
||||
DeviceId: configuration.Config.Key,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish(realtimeProcessingTopic, 0, false, payload)
|
||||
} else {
|
||||
log.Log.Info("cloud.RealtimeProcessing(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
log.Log.Debug("cloud.RealtimeProcessing(): stopping as Liveview is disabled.")
|
||||
}
|
||||
}
|
||||
|
||||
log.Log.Debug("cloud.HandleLiveStreamSD(): finished")
|
||||
}
|
||||
|
||||
// VerifyHub godoc
|
||||
// @Router /api/hub/verify [post]
|
||||
// @ID verify-hub
|
||||
@@ -1004,3 +1216,184 @@ func VerifyPersistence(c *gin.Context, configDirectory string) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// VerifySecondaryPersistence godoc
|
||||
// @Router /api/persistence/secondary/verify [post]
|
||||
// @ID verify-persistence
|
||||
// @Security Bearer
|
||||
// @securityDefinitions.apikey Bearer
|
||||
// @in header
|
||||
// @name Authorization
|
||||
// @Tags persistence
|
||||
// @Param config body models.Config true "Config"
|
||||
// @Summary Will verify the secondary persistence.
|
||||
// @Description Will verify the secondary persistence.
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func VerifySecondaryPersistence(c *gin.Context, configDirectory string) {
|
||||
|
||||
var config models.Config
|
||||
err := c.BindJSON(&config)
|
||||
if err != nil || config.Cloud != "" {
|
||||
|
||||
if config.Cloud == "kstorage" || config.Cloud == "kerberosvault" {
|
||||
|
||||
if config.KStorageSecondary == nil {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): please fill-in the required Kerberos Vault credentials."
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
|
||||
} else {
|
||||
|
||||
uri := config.KStorageSecondary.URI
|
||||
accessKey := config.KStorageSecondary.AccessKey
|
||||
secretAccessKey := config.KStorageSecondary.SecretAccessKey
|
||||
directory := config.KStorageSecondary.Directory
|
||||
provider := config.KStorageSecondary.Provider
|
||||
|
||||
if err == nil && uri != "" && accessKey != "" && secretAccessKey != "" {
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", uri+"/ping", nil)
|
||||
if err == nil {
|
||||
req.Header.Add("X-Kerberos-Storage-AccessKey", accessKey)
|
||||
req.Header.Add("X-Kerberos-Storage-SecretAccessKey", secretAccessKey)
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if err == nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
|
||||
if provider != "" || directory != "" {
|
||||
|
||||
// Generate a random name.
|
||||
timestamp := time.Now().Unix()
|
||||
fileName := strconv.FormatInt(timestamp, 10) +
|
||||
"_6-967003_" + config.Name + "_200-200-400-400_24_769.mp4"
|
||||
|
||||
// Open test-480p.mp4
|
||||
file, err := os.Open(configDirectory + "/data/test-480p.mp4")
|
||||
if err != nil {
|
||||
msg := "cloud.VerifyPersistence(kerberosvault): error reading test-480p.mp4: " + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
req, err := http.NewRequest("POST", uri+"/storage", file)
|
||||
if err == nil {
|
||||
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", config.HubKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", accessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", secretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", directory)
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if err == nil {
|
||||
if resp != nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Upload allowed using the credentials provided (" + accessKey + ", " + secretAccessKey + ")"
|
||||
log.Log.Info(msg)
|
||||
c.JSON(200, models.APIResponse{
|
||||
Data: body,
|
||||
})
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while verifying your persistence settings. Make sure your provider is the same as the storage provider in your Kerberos Vault, and the relevant storage provider is configured properly."
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Upload of fake recording failed: " + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while creating /storage POST request." + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Provider and/or directory is missing from the request."
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while verifying storage credentials: " + string(body)
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while verifying storage credentials:" + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while verifying storage credentials:" + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): please fill-in the required Kerberos Vault credentials."
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(): No persistence was specified, so do not know what to verify:" + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,14 +3,20 @@ package cloud
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
)
|
||||
|
||||
// We will count the number of retries we have done.
|
||||
// If we have done more than "kstorageRetryPolicy" retries, we will stop, and start sending to the secondary storage.
|
||||
var kstorageRetryCount = 0
|
||||
var kstorageRetryTimeout = time.Now().Unix()
|
||||
|
||||
func UploadKerberosVault(configuration *models.Configuration, fileName string) (bool, bool, error) {
|
||||
|
||||
config := configuration.Config
|
||||
@@ -19,7 +25,7 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string) (
|
||||
config.KStorage.SecretAccessKey == "" ||
|
||||
config.KStorage.Directory == "" ||
|
||||
config.KStorage.URI == "" {
|
||||
err := "UploadKerberosVault: Kerberos Vault not properly configured."
|
||||
err := "UploadKerberosVault: Kerberos Vault not properly configured"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
@@ -42,64 +48,147 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string) (
|
||||
defer file.Close()
|
||||
}
|
||||
if err != nil {
|
||||
err := "UploadKerberosVault: Upload Failed, file doesn't exists anymore."
|
||||
err := "UploadKerberosVault: Upload Failed, file doesn't exists anymore"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
|
||||
publicKey := config.KStorage.CloudKey
|
||||
// This is the new way ;)
|
||||
if config.HubKey != "" {
|
||||
publicKey = config.HubKey
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", config.KStorage.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault: error reading request, " + config.KStorage.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", config.KStorage.AccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", config.KStorage.SecretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", config.KStorage.Provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorage.Directory)
|
||||
// We need to check if we are in a retry timeout.
|
||||
if kstorageRetryTimeout <= time.Now().Unix() {
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
req, err := http.NewRequest("POST", config.KStorage.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault: error reading request, " + config.KStorage.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", config.KStorage.AccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", config.KStorage.SecretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", config.KStorage.Provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorage.Directory)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
log.Log.Info("UploadKerberosVault: Upload Finished, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + resp.Status + ", " + string(body))
|
||||
return false, true, nil
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if resp != nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
kstorageRetryCount = 0
|
||||
log.Log.Info("UploadKerberosVault: Upload Finished, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
// We increase the retry count, and set the timeout.
|
||||
// If we have reached the retry policy, we set the timeout.
|
||||
// This means we will not retry for the next 5 minutes.
|
||||
if kstorageRetryCount < config.KStorage.MaxRetries {
|
||||
kstorageRetryCount = (kstorageRetryCount + 1)
|
||||
}
|
||||
if kstorageRetryCount == config.KStorage.MaxRetries {
|
||||
kstorageRetryTimeout = time.Now().Add(time.Duration(config.KStorage.Timeout) * time.Second).Unix()
|
||||
}
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + resp.Status + ", " + string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// We might need to check if we can upload to our secondary storage.
|
||||
if config.KStorageSecondary.AccessKey == "" ||
|
||||
config.KStorageSecondary.SecretAccessKey == "" ||
|
||||
config.KStorageSecondary.Directory == "" ||
|
||||
config.KStorageSecondary.URI == "" {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Secondary Kerberos Vault not properly configured.")
|
||||
} else {
|
||||
|
||||
if kstorageRetryCount < config.KStorage.MaxRetries {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Do not upload to secondary storage, we are still in retry policy.")
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Uploading to Secondary Kerberos Vault (" + config.KStorageSecondary.URI + ")")
|
||||
|
||||
file, err = os.OpenFile(fullname, os.O_RDWR, 0755)
|
||||
if file != nil {
|
||||
defer file.Close()
|
||||
}
|
||||
if err != nil {
|
||||
err := "UploadKerberosVault (Secondary): Upload Failed, file doesn't exists anymore"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", config.KStorageSecondary.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault (Secondary): error reading request, " + config.KStorageSecondary.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", config.KStorageSecondary.AccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", config.KStorageSecondary.SecretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", config.KStorageSecondary.Provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorageSecondary.Directory)
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if resp != nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Upload Finished to secondary, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Upload Failed to secondary, " + resp.Status + ", " + string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errorMessage := "UploadKerberosVault: Upload Failed, " + err.Error()
|
||||
log.Log.Info(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
"github.com/gin-gonic/gin"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
"github.com/kerberos-io/agent/machinery/src/capture"
|
||||
"github.com/kerberos-io/agent/machinery/src/cloud"
|
||||
@@ -23,9 +24,15 @@ import (
|
||||
"github.com/tevino/abool"
|
||||
)
|
||||
|
||||
func Bootstrap(configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
var tracer = otel.Tracer("github.com/kerberos-io/agent/machinery/src/components")
|
||||
|
||||
func Bootstrap(ctx context.Context, configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
|
||||
log.Log.Debug("components.Kerberos.Bootstrap(): bootstrapping the kerberos agent.")
|
||||
|
||||
bootstrapContext := context.Background()
|
||||
_, span := tracer.Start(bootstrapContext, "Bootstrap")
|
||||
|
||||
// We will keep track of the Kerberos Agent up time
|
||||
// This is send to Kerberos Hub in a heartbeat.
|
||||
uptimeStart := time.Now()
|
||||
@@ -78,6 +85,8 @@ func Bootstrap(configDirectory string, configuration *models.Configuration, comm
|
||||
// Configure a MQTT client which helps for a bi-directional communication
|
||||
mqttClient := routers.ConfigureMQTT(configDirectory, configuration, communication)
|
||||
|
||||
span.End()
|
||||
|
||||
// Run the agent and fire up all the other
|
||||
// goroutines which do image capture, motion detection, onvif, etc.
|
||||
for {
|
||||
@@ -114,6 +123,9 @@ func Bootstrap(configDirectory string, configuration *models.Configuration, comm
|
||||
|
||||
func RunAgent(configDirectory string, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, uptimeStart time.Time, cameraSettings *models.Camera, captureDevice *capture.Capture) string {
|
||||
|
||||
ctx := context.Background()
|
||||
ctxRunAgent, span := tracer.Start(ctx, "RunAgent")
|
||||
|
||||
log.Log.Info("components.Kerberos.RunAgent(): Creating camera and processing threads.")
|
||||
config := configuration.Config
|
||||
|
||||
@@ -124,10 +136,10 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
rtspUrl := config.Capture.IPCamera.RTSP
|
||||
rtspClient := captureDevice.SetMainClient(rtspUrl)
|
||||
if rtspUrl != "" {
|
||||
err := rtspClient.Connect(context.Background())
|
||||
err := rtspClient.Connect(ctx, ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error connecting to RTSP stream: " + err.Error())
|
||||
rtspClient.Close()
|
||||
rtspClient.Close(ctxRunAgent)
|
||||
rtspClient = nil
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
@@ -145,7 +157,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
videoStreams, err := rtspClient.GetVideoStreams()
|
||||
if err != nil || len(videoStreams) == 0 {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): no video stream found, might be the wrong codec (we only support H264 for the moment)")
|
||||
rtspClient.Close()
|
||||
rtspClient.Close(ctxRunAgent)
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
}
|
||||
@@ -161,6 +173,12 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
configuration.Config.Capture.IPCamera.Width = width
|
||||
configuration.Config.Capture.IPCamera.Height = height
|
||||
|
||||
// Set the SPS and PPS values in the configuration.
|
||||
configuration.Config.Capture.IPCamera.SPSNALUs = [][]byte{videoStream.SPS}
|
||||
configuration.Config.Capture.IPCamera.PPSNALUs = [][]byte{videoStream.PPS}
|
||||
configuration.Config.Capture.IPCamera.VPSNALUs = [][]byte{videoStream.VPS}
|
||||
|
||||
// Define queues for the main and sub stream.
|
||||
var queue *packets.Queue
|
||||
var subQueue *packets.Queue
|
||||
|
||||
@@ -182,19 +200,19 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
rtspSubClient := captureDevice.SetSubClient(subRtspUrl)
|
||||
captureDevice.RTSPSubClient = rtspSubClient
|
||||
|
||||
err := rtspSubClient.Connect(context.Background())
|
||||
err := rtspSubClient.Connect(ctx, ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error connecting to RTSP sub stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
}
|
||||
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP sub stream: " + rtspUrl)
|
||||
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP sub stream: " + subRtspUrl)
|
||||
|
||||
// Get the video streams from the RTSP server.
|
||||
videoSubStreams, err = rtspSubClient.GetVideoStreams()
|
||||
if err != nil || len(videoSubStreams) == 0 {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): no video sub stream found, might be the wrong codec (we only support H264 for the moment)")
|
||||
rtspSubClient.Close()
|
||||
rtspSubClient.Close(ctxRunAgent)
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
}
|
||||
@@ -206,42 +224,8 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
height := videoSubStream.Height
|
||||
|
||||
// Set config values as well
|
||||
configuration.Config.Capture.IPCamera.Width = width
|
||||
configuration.Config.Capture.IPCamera.Height = height
|
||||
}
|
||||
|
||||
if cameraSettings.RTSP != rtspUrl ||
|
||||
cameraSettings.SubRTSP != subRtspUrl ||
|
||||
cameraSettings.Width != width ||
|
||||
cameraSettings.Height != height {
|
||||
|
||||
// TODO: this condition is used to reset the decoder when the camera settings change.
|
||||
// The main idea is that you only set the decoder once, and then reuse it on each restart (no new memory allocation).
|
||||
// However the stream settings of the camera might have been changed, and so the decoder might need to be reloaded.
|
||||
// .... Not used for the moment ....
|
||||
|
||||
if cameraSettings.RTSP != "" && cameraSettings.SubRTSP != "" && cameraSettings.Initialized {
|
||||
//decoder.Close()
|
||||
//if subStreamEnabled {
|
||||
// subDecoder.Close()
|
||||
//}
|
||||
}
|
||||
|
||||
// At some routines we will need to decode the image.
|
||||
// Make sure its properly locked as we only have a single decoder.
|
||||
log.Log.Info("components.Kerberos.RunAgent(): camera settings changed, reloading decoder")
|
||||
//capture.GetVideoDecoder(decoder, streams)
|
||||
//if subStreamEnabled {
|
||||
// capture.GetVideoDecoder(subDecoder, subStreams)
|
||||
//}
|
||||
|
||||
cameraSettings.RTSP = rtspUrl
|
||||
cameraSettings.SubRTSP = subRtspUrl
|
||||
cameraSettings.Width = width
|
||||
cameraSettings.Height = height
|
||||
cameraSettings.Initialized = true
|
||||
} else {
|
||||
log.Log.Info("components.Kerberos.RunAgent(): camera settings did not change, keeping decoder")
|
||||
configuration.Config.Capture.IPCamera.SubWidth = width
|
||||
configuration.Config.Capture.IPCamera.SubHeight = height
|
||||
}
|
||||
|
||||
// We are creating a queue to store the RTSP frames in, these frames will be
|
||||
@@ -251,28 +235,28 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
// Set the maximum GOP count, this is used to determine the pre-recording time.
|
||||
log.Log.Info("components.Kerberos.RunAgent(): SetMaxGopCount was set with: " + strconv.Itoa(int(config.Capture.PreRecording)+1))
|
||||
queue.SetMaxGopCount(int(config.Capture.PreRecording) + 1) // GOP time frame is set to prerecording (we'll add 2 gops to leave some room).
|
||||
queue.SetMaxGopCount(1) // We will adjust this later on, when we have the GOP size.
|
||||
queue.WriteHeader(videoStreams)
|
||||
go rtspClient.Start(context.Background(), "main", queue, configuration, communication)
|
||||
go rtspClient.Start(ctx, "main", queue, configuration, communication)
|
||||
|
||||
// Main stream is connected and ready to go.
|
||||
communication.MainStreamConnected = true
|
||||
|
||||
// Try to create backchannel
|
||||
rtspBackChannelClient := captureDevice.SetBackChannelClient(rtspUrl)
|
||||
err = rtspBackChannelClient.ConnectBackChannel(context.Background())
|
||||
err = rtspBackChannelClient.ConnectBackChannel(ctx, ctxRunAgent)
|
||||
if err == nil {
|
||||
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP backchannel stream: " + rtspUrl)
|
||||
go rtspBackChannelClient.StartBackChannel(context.Background())
|
||||
go rtspBackChannelClient.StartBackChannel(ctx, ctxRunAgent)
|
||||
}
|
||||
|
||||
rtspSubClient := captureDevice.RTSPSubClient
|
||||
if subStreamEnabled && rtspSubClient != nil {
|
||||
subQueue = packets.NewQueue()
|
||||
communication.SubQueue = subQueue
|
||||
subQueue.SetMaxGopCount(1) // GOP time frame is set to prerecording (we'll add 2 gops to leave some room).
|
||||
subQueue.SetMaxGopCount(1) // GOP time frame is set to 1 for motion detection and livestreaming.
|
||||
subQueue.WriteHeader(videoSubStreams)
|
||||
go rtspSubClient.Start(context.Background(), "sub", subQueue, configuration, communication)
|
||||
go rtspSubClient.Start(ctx, "sub", subQueue, configuration, communication)
|
||||
|
||||
// Sub stream is connected and ready to go.
|
||||
communication.SubStreamConnected = true
|
||||
@@ -288,7 +272,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
}
|
||||
|
||||
// Handle livestream HD (high resolution over WEBRTC)
|
||||
communication.HandleLiveHDHandshake = make(chan models.RequestHDStreamPayload, 1)
|
||||
communication.HandleLiveHDHandshake = make(chan models.RequestHDStreamPayload, 10)
|
||||
if subStreamEnabled {
|
||||
livestreamHDCursor := subQueue.Latest()
|
||||
go cloud.HandleLiveStreamHD(livestreamHDCursor, configuration, communication, mqttClient, rtspSubClient)
|
||||
@@ -301,7 +285,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
go capture.HandleRecordStream(queue, configDirectory, configuration, communication, rtspClient)
|
||||
|
||||
// Handle processing of motion
|
||||
communication.HandleMotion = make(chan models.MotionDataPartial, 1)
|
||||
communication.HandleMotion = make(chan models.MotionDataPartial, 10)
|
||||
if subStreamEnabled {
|
||||
motionCursor := subQueue.Latest()
|
||||
go computervision.ProcessMotion(motionCursor, configuration, communication, mqttClient, rtspSubClient)
|
||||
@@ -310,14 +294,23 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
go computervision.ProcessMotion(motionCursor, configuration, communication, mqttClient, rtspClient)
|
||||
}
|
||||
|
||||
// Handle realtime processing if enabled.
|
||||
if subStreamEnabled {
|
||||
realtimeProcessingCursor := subQueue.Latest()
|
||||
go cloud.HandleRealtimeProcessing(realtimeProcessingCursor, configuration, communication, mqttClient, rtspClient)
|
||||
} else {
|
||||
realtimeProcessingCursor := queue.Latest()
|
||||
go cloud.HandleRealtimeProcessing(realtimeProcessingCursor, configuration, communication, mqttClient, rtspClient)
|
||||
}
|
||||
|
||||
// Handle Upload to cloud provider (Kerberos Hub, Kerberos Vault and others)
|
||||
go cloud.HandleUpload(configDirectory, configuration, communication)
|
||||
|
||||
// Handle ONVIF actions
|
||||
communication.HandleONVIF = make(chan models.OnvifAction, 1)
|
||||
communication.HandleONVIF = make(chan models.OnvifAction, 10)
|
||||
go onvif.HandleONVIFActions(configuration, communication)
|
||||
|
||||
communication.HandleAudio = make(chan models.AudioDataPartial, 1)
|
||||
communication.HandleAudio = make(chan models.AudioDataPartial, 10)
|
||||
if rtspBackChannelClient.HasBackChannel {
|
||||
communication.HasBackChannel = true
|
||||
go WriteAudioToBackchannel(communication, rtspBackChannelClient)
|
||||
@@ -326,6 +319,9 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
// If we reach this point, we have a working RTSP connection.
|
||||
communication.CameraConnected = true
|
||||
|
||||
// Otel end span
|
||||
span.End()
|
||||
|
||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
// This will go into a blocking state, once this channel is triggered
|
||||
// the agent will cleanup and restart.
|
||||
@@ -348,9 +344,20 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
// Here we are cleaning up everything!
|
||||
if configuration.Config.Offline != "true" {
|
||||
communication.HandleUpload <- "stop"
|
||||
select {
|
||||
case communication.HandleUpload <- "stop":
|
||||
log.Log.Info("components.Kerberos.RunAgent(): stopping upload")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.RunAgent(): stopping upload timed out")
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case communication.HandleStream <- "stop":
|
||||
log.Log.Info("components.Kerberos.RunAgent(): stopping stream")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.RunAgent(): stopping stream timed out")
|
||||
}
|
||||
communication.HandleStream <- "stop"
|
||||
// We use the steam channel to stop both main and sub stream.
|
||||
//if subStreamEnabled {
|
||||
// communication.HandleSubStream <- "stop"
|
||||
@@ -358,7 +365,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
err = rtspClient.Close()
|
||||
err = rtspClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
@@ -370,7 +377,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
communication.Queue = nil
|
||||
|
||||
if subStreamEnabled {
|
||||
err = rtspSubClient.Close()
|
||||
err = rtspSubClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP sub stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
@@ -381,7 +388,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
communication.SubQueue = nil
|
||||
}
|
||||
|
||||
err = rtspBackChannelClient.Close()
|
||||
err = rtspBackChannelClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP backchannel stream: " + err.Error())
|
||||
}
|
||||
@@ -442,8 +449,12 @@ func ControlAgent(communication *models.Communication) {
|
||||
// After 15 seconds without activity this is thrown..
|
||||
if occurence == 3 {
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking mainstream.")
|
||||
communication.HandleBootstrap <- "restart"
|
||||
time.Sleep(2 * time.Second)
|
||||
select {
|
||||
case communication.HandleBootstrap <- "restart":
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream timed out")
|
||||
}
|
||||
occurence = 0
|
||||
}
|
||||
|
||||
@@ -464,9 +475,12 @@ func ControlAgent(communication *models.Communication) {
|
||||
|
||||
// After 15 seconds without activity this is thrown..
|
||||
if occurenceSub == 3 {
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream.")
|
||||
communication.HandleBootstrap <- "restart"
|
||||
time.Sleep(2 * time.Second)
|
||||
select {
|
||||
case communication.HandleBootstrap <- "restart":
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream timed out")
|
||||
}
|
||||
occurenceSub = 0
|
||||
}
|
||||
}
|
||||
@@ -603,7 +617,12 @@ func GetDays(c *gin.Context, configDirectory string, configuration *models.Confi
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func StopAgent(c *gin.Context, communication *models.Communication) {
|
||||
log.Log.Info("components.Kerberos.StopAgent(): sending signal to stop agent, this will os.Exit(0).")
|
||||
communication.HandleBootstrap <- "stop"
|
||||
select {
|
||||
case communication.HandleBootstrap <- "stop":
|
||||
log.Log.Info("components.Kerberos.StopAgent(): Stopping machinery.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.StopAgent(): Stopping machinery timed out")
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"stopped": true,
|
||||
})
|
||||
@@ -618,7 +637,12 @@ func StopAgent(c *gin.Context, communication *models.Communication) {
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func RestartAgent(c *gin.Context, communication *models.Communication) {
|
||||
log.Log.Info("components.Kerberos.RestartAgent(): sending signal to restart agent.")
|
||||
communication.HandleBootstrap <- "restart"
|
||||
select {
|
||||
case communication.HandleBootstrap <- "restart":
|
||||
log.Log.Info("components.Kerberos.RestartAgent(): Restarting machinery.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.RestartAgent(): Restarting machinery timed out")
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"restarted": true,
|
||||
})
|
||||
|
||||
@@ -87,7 +87,6 @@ func WriteFileToBackChannel(infile av.DemuxCloser) {
|
||||
break
|
||||
}
|
||||
// Send to backchannel
|
||||
fmt.Println(buffer)
|
||||
infile.Write(buffer, 2, uint32(count))
|
||||
|
||||
count = count + 1024
|
||||
|
||||
@@ -21,6 +21,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
|
||||
var isPixelChangeThresholdReached = false
|
||||
var changesToReturn = 0
|
||||
var motionRectangle models.MotionRectangle
|
||||
|
||||
pixelThreshold := config.Capture.PixelChangeThreshold
|
||||
// Might not be set in the config file, so set it to 150
|
||||
@@ -132,7 +133,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
if detectMotion {
|
||||
|
||||
// Remember additional information about the result of findmotion
|
||||
isPixelChangeThresholdReached, changesToReturn = FindMotion(imageArray, coordinatesToCheck, pixelThreshold)
|
||||
isPixelChangeThresholdReached, changesToReturn, motionRectangle = FindMotion(imageArray, coordinatesToCheck, pixelThreshold)
|
||||
if isPixelChangeThresholdReached {
|
||||
|
||||
// If offline mode is disabled, send a message to the hub
|
||||
@@ -150,7 +151,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 2, false, payload)
|
||||
} else {
|
||||
log.Log.Info("computervision.main.ProcessMotion(): failed to package MQTT message: " + err.Error())
|
||||
}
|
||||
@@ -164,6 +165,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
dataToPass := models.MotionDataPartial{
|
||||
Timestamp: time.Now().Unix(),
|
||||
NumberOfChanges: changesToReturn,
|
||||
Rectangle: motionRectangle,
|
||||
}
|
||||
communication.HandleMotion <- dataToPass //Save data to the channel
|
||||
}
|
||||
@@ -185,24 +187,58 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
log.Log.Debug("computervision.main.ProcessMotion(): stop the motion detection.")
|
||||
}
|
||||
|
||||
func FindMotion(imageArray [3]*image.Gray, coordinatesToCheck []int, pixelChangeThreshold int) (thresholdReached bool, changesDetected int) {
|
||||
func FindMotion(imageArray [3]*image.Gray, coordinatesToCheck []int, pixelChangeThreshold int) (thresholdReached bool, changesDetected int, motionRectangle models.MotionRectangle) {
|
||||
image1 := imageArray[0]
|
||||
image2 := imageArray[1]
|
||||
image3 := imageArray[2]
|
||||
threshold := 60
|
||||
changes := AbsDiffBitwiseAndThreshold(image1, image2, image3, threshold, coordinatesToCheck)
|
||||
return changes > pixelChangeThreshold, changes
|
||||
changes, motionRectangle := AbsDiffBitwiseAndThreshold(image1, image2, image3, threshold, coordinatesToCheck)
|
||||
return changes > pixelChangeThreshold, changes, motionRectangle
|
||||
}
|
||||
|
||||
func AbsDiffBitwiseAndThreshold(img1 *image.Gray, img2 *image.Gray, img3 *image.Gray, threshold int, coordinatesToCheck []int) int {
|
||||
func AbsDiffBitwiseAndThreshold(img1 *image.Gray, img2 *image.Gray, img3 *image.Gray, threshold int, coordinatesToCheck []int) (int, models.MotionRectangle) {
|
||||
changes := 0
|
||||
var pixelList [][]int
|
||||
for i := 0; i < len(coordinatesToCheck); i++ {
|
||||
pixel := coordinatesToCheck[i]
|
||||
diff := int(img3.Pix[pixel]) - int(img1.Pix[pixel])
|
||||
diff2 := int(img3.Pix[pixel]) - int(img2.Pix[pixel])
|
||||
if (diff > threshold || diff < -threshold) && (diff2 > threshold || diff2 < -threshold) {
|
||||
changes++
|
||||
// Store the pixel coordinates where the change is detected
|
||||
pixelList = append(pixelList, []int{pixel % img1.Bounds().Dx(), pixel / img1.Bounds().Dx()})
|
||||
}
|
||||
}
|
||||
return changes
|
||||
|
||||
// Calculate rectangle of pixelList (startX, startY, endX, endY)
|
||||
var motionRectangle models.MotionRectangle
|
||||
if len(pixelList) > 0 {
|
||||
startX := pixelList[0][0]
|
||||
startY := pixelList[0][1]
|
||||
endX := startX
|
||||
endY := startY
|
||||
for _, pixel := range pixelList {
|
||||
if pixel[0] < startX {
|
||||
startX = pixel[0]
|
||||
}
|
||||
if pixel[1] < startY {
|
||||
startY = pixel[1]
|
||||
}
|
||||
if pixel[0] > endX {
|
||||
endX = pixel[0]
|
||||
}
|
||||
if pixel[1] > endY {
|
||||
endY = pixel[1]
|
||||
}
|
||||
}
|
||||
log.Log.Debugf("Rectangle of changes detected: startX: %d, startY: %d, endX: %d, endY: %d", startX, startY, endX, endY)
|
||||
motionRectangle = models.MotionRectangle{
|
||||
X: startX,
|
||||
Y: startY,
|
||||
Width: endX - startX,
|
||||
Height: endY - startY,
|
||||
}
|
||||
log.Log.Debugf("Motion rectangle: %+v", motionRectangle)
|
||||
}
|
||||
return changes, motionRectangle
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func OpenConfig(configDirectory string, configuration *models.Configuration) {
|
||||
// Write to mongodb
|
||||
client := database.New()
|
||||
|
||||
db := client.Database(database.DatabaseName)
|
||||
db := client.Client.Database(database.DatabaseName)
|
||||
collection := db.Collection("configuration")
|
||||
|
||||
var globalConfig models.Config
|
||||
@@ -135,6 +135,12 @@ func OpenConfig(configDirectory string, configuration *models.Configuration) {
|
||||
conjungo.Merge(&kerberosvault, configuration.CustomConfig.KStorage, opts)
|
||||
configuration.Config.KStorage = &kerberosvault
|
||||
|
||||
// Merge Secondary Kerberos Vault settings
|
||||
var kerberosvaultSecondary models.KStorage
|
||||
conjungo.Merge(&kerberosvaultSecondary, configuration.GlobalConfig.KStorageSecondary, opts)
|
||||
conjungo.Merge(&kerberosvaultSecondary, configuration.CustomConfig.KStorageSecondary, opts)
|
||||
configuration.Config.KStorageSecondary = &kerberosvaultSecondary
|
||||
|
||||
// Merge Kerberos S3 settings
|
||||
var s3 models.S3
|
||||
conjungo.Merge(&s3, configuration.GlobalConfig.S3, opts)
|
||||
@@ -183,15 +189,19 @@ func OpenConfig(configDirectory string, configuration *models.Configuration) {
|
||||
}
|
||||
jsonFile.Close()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// This function will override the configuration with environment variables.
|
||||
func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
environmentVariables := os.Environ()
|
||||
|
||||
// Initialize the configuration for some new fields.
|
||||
if configuration.Config.KStorageSecondary == nil {
|
||||
configuration.Config.KStorageSecondary = &models.KStorage{}
|
||||
}
|
||||
|
||||
for _, env := range environmentVariables {
|
||||
if strings.Contains(env, "AGENT_") {
|
||||
key := strings.Split(env, "=")[0]
|
||||
@@ -382,10 +392,26 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.MQTTPassword = value
|
||||
break
|
||||
|
||||
/* MQTT chunking of low-resolution images into multiple messages */
|
||||
case "AGENT_CAPTURE_LIVEVIEW_CHUNKING":
|
||||
configuration.Config.Capture.LiveviewChunking = value
|
||||
break
|
||||
|
||||
/* Real-time streaming of keyframes to a MQTT topic */
|
||||
case "AGENT_REALTIME_PROCESSING":
|
||||
configuration.Config.RealtimeProcessing = value
|
||||
break
|
||||
case "AGENT_REALTIME_PROCESSING_TOPIC":
|
||||
configuration.Config.RealtimeProcessingTopic = value
|
||||
break
|
||||
|
||||
/* WebRTC settings for live-streaming (remote) */
|
||||
case "AGENT_STUN_URI":
|
||||
configuration.Config.STUNURI = value
|
||||
break
|
||||
case "AGENT_FORCE_TURN":
|
||||
configuration.Config.ForceTurn = value
|
||||
break
|
||||
case "AGENT_TURN_URI":
|
||||
configuration.Config.TURNURI = value
|
||||
break
|
||||
@@ -425,7 +451,7 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.S3.Region = value
|
||||
break
|
||||
|
||||
/* When storing in a Kerberos Vault */
|
||||
/* When storing in a Vault */
|
||||
case "AGENT_KERBEROSVAULT_URI":
|
||||
configuration.Config.KStorage.URI = value
|
||||
break
|
||||
@@ -442,6 +468,37 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.KStorage.Directory = value
|
||||
break
|
||||
|
||||
/* Retry policy and timeout */
|
||||
case "AGENT_KERBEROSVAULT_MAX_RETRIES":
|
||||
maxRetries, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
configuration.Config.KStorage.MaxRetries = maxRetries
|
||||
}
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_TIMEOUT":
|
||||
timeout, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
configuration.Config.KStorage.Timeout = timeout
|
||||
}
|
||||
break
|
||||
|
||||
/* When storing in a secondary Vault */
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_URI":
|
||||
configuration.Config.KStorageSecondary.URI = value
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_ACCESS_KEY":
|
||||
configuration.Config.KStorageSecondary.AccessKey = value
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_SECRET_KEY":
|
||||
configuration.Config.KStorageSecondary.SecretAccessKey = value
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_PROVIDER":
|
||||
configuration.Config.KStorageSecondary.Provider = value
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_DIRECTORY":
|
||||
configuration.Config.KStorageSecondary.Directory = value
|
||||
break
|
||||
|
||||
/* When storing in dropbox */
|
||||
case "AGENT_DROPBOX_ACCESS_TOKEN":
|
||||
configuration.Config.Dropbox.AccessToken = value
|
||||
@@ -467,9 +524,26 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
case "AGENT_ENCRYPTION_SYMMETRIC_KEY":
|
||||
configuration.Config.Encryption.SymmetricKey = value
|
||||
break
|
||||
|
||||
/* When signing is enabled */
|
||||
case "AGENT_SIGNING":
|
||||
configuration.Config.Signing.Enabled = value
|
||||
break
|
||||
case "AGENT_SIGNING_PRIVATE_KEY":
|
||||
signingPrivateKey := strings.ReplaceAll(value, "\\n", "\n")
|
||||
configuration.Config.Signing.PrivateKey = signingPrivateKey
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signing is a new feature, so if empty we set default values.
|
||||
if configuration.Config.Signing == nil || configuration.Config.Signing.PrivateKey == "" {
|
||||
configuration.Config.Signing = &models.Signing{
|
||||
Enabled: "true",
|
||||
PrivateKey: "-----BEGIN PRIVATE KEY-----\nMIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDoSxjyw08lRxF4Yoqmcaewjq3XjB55dMy4tlN5MGLdr8aAPuNR9Mwh3jlh1bDpwQXNgZkHDV/q9bpdPGGi7SQo2xw+rDuo5Y1f3wdzz+iuCTPbzoGFalE+1PZlU5TEtUtlbt7MRc4pxTaLP3u0P3EtW3KnzcUarcJWZJYxzv7gqVNCA/47BN+1ptqjwz3LAlah5yaftEvVjkaANOsafUswbS4VT44XfSlbKgebORCKDuNgQiyhuV5gU+J0TOaqRWwwMAWV0UoScyJLfhHRBCrUwrCUTwqH9jfkB7pgRFsYoZJd4MKMeHJjFSum+QXCBqInSnwu8c2kJChiLMWqJ+mhpTdfUAmSkeUSStfbbcavIPbDABvMgzOcmYMIVXXe57twU0xdu3AqWLtc9kw1BkUgZblM9pSSpYrIDheEyMs2/hiLgXsIaM0nVQtqwrA7rbeEGuPblzA6hvHgwN9K6HaBqdlGSlpYZ0v3SWIMwmxRB+kIojlyuggm8Qa4mqL97GFDGl6gOBGlNUFTBUVEa3EaJ7NJpGobRGsh/9dXzcW4aYmT9WxlzTlIKksI1ro6KdRfuVWfEs4AnG8bVEJmofK8EUrueB9IdXlcJZB49xolnOZPFohtMe/0U7evQOQP3sZnX+KotCsE7OXJvL09oF58JKoqmK9lPp0+pFBU4g6NjQIDAQABAoICAA+RSWph1t+q5R3nxUxFTYMrhv5IjQe2mDxJpF3B409zolC9OHxgGUisobTY3pBqs0DtKbxUeH2A0ehUH/axEosWHcz3cmIbgxHE9kdlJ9B3Lmss6j/uw+PWutu1sgm5phaIFIvuNNRWhPB6yXUwU4sLRat1+Z9vTmIQiKdtLIrtJz/n2VDvrJxn1N+yAsE20fnrksFKyZuxVsJaZPiX/t5Yv1/z0LjFjVoL7GUA5/Si7csN4ftqEhUrkNr2BvcZlTyffrF4lZCXrtl76RNUaxhqIu3H0gFbV2UfBpuckkfAhNRpXJ4iFSxm4nQbk4ojV8+l21RFOBeDN2Z7Ocu6auP5MnzpopR66vmDCmPoid498VGgDzFQEVkOar8WAa4v9h85QgLKrth6FunmaWJUT6OggQD3yY58GSwp5+ARMETMBP2x6Eld+PGgqoJvPT1+l/e9gOw7/SJ+Wz6hRXZAm/eiXMppHtB7sfea5rscNanPjJkK9NvPM0MX9cq/iA6QjXuETkMbubjo+Cxk3ydZiIQmWQDAx/OgxTyHbeRCVhLPcAphX0clykCuHZpI9Mvvj643/LoE0mjTByWJXf/WuGJA8ElHkjSdokVJ7jumz8OZZHfq0+V7+la2opsObeQANHW5MLWrnHlRVzTGV0IRZDXh7h1ptUJ4ubdvw/GJ2NeTAoIBAQD0lXXdjYKWC4uZ4YlgydP8b1CGda9cBV5RcPt7q9Ya1R2E4ieYyohmzltopvdaOXdsTZzhtdzOzKF+2qNcbBKhBTleYZ8GN5RKbo7HwXWpzfCTjseKHOD/QPwvBKXzLVWNtXn1NrLR79Rv0wbkYF6DtoqpEPf5kMs4bx79yW+mz8FUgdEeMjKphx6Jd5RYlTUxS64K6bnK7gjHNCF2cwdxsh4B6EB649GKeNz4JXi+oQBmOcX5ncXnkJrbju+IjtCkQ40HINVNdX7XeEaaw6KGaImVjw61toPUuDaioYUojufayoyXaUJnDbHQ2tNekEpq5iwnenZCbUKWmSeRe7dLAoIBAQDzIscYujsrmPxiTj2prhG0v36NRNP99mShnnJGowiIs+UBS0EMdOmBFa2sC9uFs/VnreQNYPDJdfr7O5VK9kfbH/PSiiKJ+wVebfdAlWkJYH27JN2Kl2l/OsvRVelNvF3BWIYF46qzGxIM0axaz3T2ZAJ9SrUgeAYhak6uyM4fbexEWXxDgPGu6C0jB6IAzmHJnnh+j5+4ZXqjVyUxBYtUsWXF/TXomVcT9jxj7aUmS2/Us0XTVOVNpALqqYcekrzsX/wX0OEi5HkivYXHcNaDHx3NuUf6KdYof5DwPUM76qe+5/kWlSIHP3M6rIFK3pYFUnkHn2E8jNWcO97Aio+HAoIBAA+bcff/TbPxbKkXIUMR3fsfx02tONFwbkJYKVQM9Q6lRsrx+4Dee7HDvUWCUgpp3FsG4NnuVvbDTBLiNMZzBwVLZgvFwvYMmePeBjJs/+sj/xQLamQ/z4O6S91cOJK589mlGPEy2lpXKYExQCFWnPFetp5vPMOqH62sOZgMQJmubDHOTt/UaDM1Mhenj8nPS6OnpqV/oKF4awr7Ip+CW5k/unZ4sZSl8PsbF06mZXwUngfn6+Av1y8dpSQZjONz6ZBx1w/7YmEc/EkXnbnGfhqBlTX7+P5TdTofvyzFjc+2vsjRYANRbjFRSGWBcTd5kaYcpfim8eDvQ+6EO2gnMt0CggEAH2ln1Y8B5AEQ4lZ/avOdP//ZhsDUrqPtnl/NHckkahzrwj4JumVEYbP+SxMBGoYEd4+kvgG/OhfvBBRPlm65G9tF8fZ8vdzbdba5UfO7rUV1GP+LS8OCErjy6imySaPDbR5Vul8Oh7NAor1YCidxUf/bvnovanF3QUvtvHEfCDp4YuA4yLPZBaLjaforePUw9w5tPNSravRZYs74dBvmQ1vj7S9ojpN5B5AxfyuNwaPPX+iFZec69MvywISEe3Ozysof1Kfc3lgsOkvIA9tVK32SqSh93xkWnQbWH+OaUxxe7bAko0FDMzKEXZk53wVg1nEwR8bUljEPy+6EOdXs8wKCAQEAsEOWYMY5m7HkeG2XTTvX7ECmmdGl/c4ZDVwzB4IPxqUG7XfLmtsON8YoKOEUpJoc4ANafLXzmU+esUGbH4Ph22IWgP9jzws7jxaN/Zoku64qrSjgEZFTRIpKyhFk/ImWbS9laBW4l+m0tqTTRqoE0QEJf/2uv/04q65zrA70X9z2+KTrAtqOiRQPWl/IxRe9U4OEeGL+oD+YlXKCDsnJ3rwUIOZgJx0HWZg7K35DKwqs1nVi56FBdljiTRKAjVLRedjgDCSfGS1yUZ3krHzpaPt1qgnT3rdtYcIdbYDr66V2/gEEaz6XMGHuTk/ewjzUJxq9UTVeXOCbkRPXgVJg1w==\n-----END PRIVATE KEY-----",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SaveConfig(configDirectory string, config models.Config, configuration *models.Configuration, communication *models.Communication) error {
|
||||
@@ -485,7 +559,9 @@ func SaveConfig(configDirectory string, config models.Config, configuration *mod
|
||||
if communication.CameraConnected {
|
||||
select {
|
||||
case communication.HandleBootstrap <- "restart":
|
||||
default:
|
||||
log.Log.Info("config.main.SaveConfig(): update config, restart agent.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("config.main.SaveConfig(): update config, restart agent.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -512,7 +588,7 @@ func StoreConfig(configDirectory string, config models.Config) error {
|
||||
// Write to mongodb
|
||||
client := database.New()
|
||||
|
||||
db := client.Database(database.DatabaseName)
|
||||
db := client.Client.Database(database.DatabaseName)
|
||||
collection := db.Collection("configuration")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
@@ -15,12 +15,19 @@ type DB struct {
|
||||
Client *mongo.Client
|
||||
}
|
||||
|
||||
var TIMEOUT = 10 * time.Second
|
||||
var _init_ctx sync.Once
|
||||
var _instance *DB
|
||||
var DatabaseName = "KerberosFactory"
|
||||
|
||||
func New() *mongo.Client {
|
||||
var DatabaseName = os.Getenv("MONGODB_DATABASE_FACTORY")
|
||||
|
||||
func New() *DB {
|
||||
|
||||
if DatabaseName == "" {
|
||||
DatabaseName = "KerberosFactory"
|
||||
}
|
||||
|
||||
mongodbURI := os.Getenv("MONGODB_URI")
|
||||
host := os.Getenv("MONGODB_HOST")
|
||||
databaseCredentials := os.Getenv("MONGODB_DATABASE_CREDENTIALS")
|
||||
replicaset := os.Getenv("MONGODB_REPLICASET")
|
||||
@@ -28,28 +35,46 @@ func New() *mongo.Client {
|
||||
password := os.Getenv("MONGODB_PASSWORD")
|
||||
authentication := "SCRAM-SHA-256"
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), TIMEOUT)
|
||||
defer cancel()
|
||||
|
||||
_init_ctx.Do(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_instance = new(DB)
|
||||
mongodbURI := fmt.Sprintf("mongodb://%s:%s@%s", username, password, host)
|
||||
if replicaset != "" {
|
||||
mongodbURI = fmt.Sprintf("%s/?replicaSet=%s", mongodbURI, replicaset)
|
||||
}
|
||||
|
||||
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongodbURI).SetAuth(options.Credential{
|
||||
AuthMechanism: authentication,
|
||||
AuthSource: databaseCredentials,
|
||||
Username: username,
|
||||
Password: password,
|
||||
}))
|
||||
if err != nil {
|
||||
fmt.Printf("Error setting up mongodb connection: %+v\n", err)
|
||||
os.Exit(1)
|
||||
// We can also apply the complete URI
|
||||
// e.g. "mongodb+srv://<username>:<password>@kerberos-hub.shhng.mongodb.net/?retryWrites=true&w=majority&appName=kerberos-hub"
|
||||
if mongodbURI != "" {
|
||||
serverAPI := options.ServerAPI(options.ServerAPIVersion1)
|
||||
opts := options.Client().ApplyURI(mongodbURI).SetServerAPIOptions(serverAPI)
|
||||
|
||||
// Create a new client and connect to the server
|
||||
client, err := mongo.Connect(ctx, opts)
|
||||
if err != nil {
|
||||
fmt.Printf("Error setting up mongodb connection: %+v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
_instance.Client = client
|
||||
|
||||
} else {
|
||||
|
||||
// New MongoDB driver
|
||||
mongodbURI := fmt.Sprintf("mongodb://%s:%s@%s", username, password, host)
|
||||
if replicaset != "" {
|
||||
mongodbURI = fmt.Sprintf("%s/?replicaSet=%s", mongodbURI, replicaset)
|
||||
}
|
||||
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongodbURI).SetAuth(options.Credential{
|
||||
AuthMechanism: authentication,
|
||||
AuthSource: databaseCredentials,
|
||||
Username: username,
|
||||
Password: password,
|
||||
}))
|
||||
if err != nil {
|
||||
fmt.Printf("Error setting up mongodb connection: %+v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
_instance.Client = client
|
||||
}
|
||||
_instance.Client = client
|
||||
})
|
||||
|
||||
return _instance.Client
|
||||
return _instance
|
||||
}
|
||||
|
||||
@@ -76,7 +76,6 @@ func ConfigureLogrus(level string, output string, timezone *time.Location) {
|
||||
logLevel = logrus.ErrorLevel
|
||||
} else if level == "debug" {
|
||||
logLevel = logrus.DebugLevel
|
||||
logrus.SetReportCaller(true)
|
||||
} else if level == "fatal" {
|
||||
logLevel = logrus.FatalLevel
|
||||
} else if level == "warning" {
|
||||
@@ -119,6 +118,16 @@ func (self *Logging) Info(sentence string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Infof(format string, args ...interface{}) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
gologging.Infof(format, args...)
|
||||
case "logrus":
|
||||
logrus.Infof(format, args...)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Warning(sentence string) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
@@ -139,6 +148,16 @@ func (self *Logging) Debug(sentence string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Debugf(format string, args ...interface{}) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
gologging.Debugf(format, args...)
|
||||
case "logrus":
|
||||
logrus.Debugf(format, args...)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Error(sentence string) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
|
||||
@@ -12,38 +12,43 @@ type Configuration struct {
|
||||
// Config is the highlevel struct which contains all the configuration of
|
||||
// your Kerberos Open Source instance.
|
||||
type Config struct {
|
||||
Type string `json:"type"`
|
||||
Key string `json:"key"`
|
||||
Name string `json:"name"`
|
||||
FriendlyName string `json:"friendly_name"`
|
||||
Time string `json:"time" bson:"time"`
|
||||
Offline string `json:"offline"`
|
||||
AutoClean string `json:"auto_clean"`
|
||||
RemoveAfterUpload string `json:"remove_after_upload"`
|
||||
MaxDirectorySize int64 `json:"max_directory_size"`
|
||||
Timezone string `json:"timezone"`
|
||||
Capture Capture `json:"capture"`
|
||||
Timetable []*Timetable `json:"timetable"`
|
||||
Region *Region `json:"region"`
|
||||
Cloud string `json:"cloud" bson:"cloud"`
|
||||
S3 *S3 `json:"s3,omitempty" bson:"s3,omitempty"`
|
||||
KStorage *KStorage `json:"kstorage,omitempty" bson:"kstorage,omitempty"`
|
||||
Dropbox *Dropbox `json:"dropbox,omitempty" bson:"dropbox,omitempty"`
|
||||
MQTTURI string `json:"mqtturi" bson:"mqtturi,omitempty"`
|
||||
MQTTUsername string `json:"mqtt_username" bson:"mqtt_username"`
|
||||
MQTTPassword string `json:"mqtt_password" bson:"mqtt_password"`
|
||||
STUNURI string `json:"stunuri" bson:"stunuri"`
|
||||
TURNURI string `json:"turnuri" bson:"turnuri"`
|
||||
TURNUsername string `json:"turn_username" bson:"turn_username"`
|
||||
TURNPassword string `json:"turn_password" bson:"turn_password"`
|
||||
HeartbeatURI string `json:"heartbeaturi" bson:"heartbeaturi"` /*obsolete*/
|
||||
HubEncryption string `json:"hub_encryption" bson:"hub_encryption"`
|
||||
HubURI string `json:"hub_uri" bson:"hub_uri"`
|
||||
HubKey string `json:"hub_key" bson:"hub_key"`
|
||||
HubPrivateKey string `json:"hub_private_key" bson:"hub_private_key"`
|
||||
HubSite string `json:"hub_site" bson:"hub_site"`
|
||||
ConditionURI string `json:"condition_uri" bson:"condition_uri"`
|
||||
Encryption *Encryption `json:"encryption,omitempty" bson:"encryption,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Key string `json:"key"`
|
||||
Name string `json:"name"`
|
||||
FriendlyName string `json:"friendly_name"`
|
||||
Time string `json:"time" bson:"time"`
|
||||
Offline string `json:"offline"`
|
||||
AutoClean string `json:"auto_clean"`
|
||||
RemoveAfterUpload string `json:"remove_after_upload"`
|
||||
MaxDirectorySize int64 `json:"max_directory_size"`
|
||||
Timezone string `json:"timezone"`
|
||||
Capture Capture `json:"capture"`
|
||||
Timetable []*Timetable `json:"timetable"`
|
||||
Region *Region `json:"region"`
|
||||
Cloud string `json:"cloud" bson:"cloud"`
|
||||
S3 *S3 `json:"s3,omitempty" bson:"s3,omitempty"`
|
||||
KStorage *KStorage `json:"kstorage,omitempty" bson:"kstorage,omitempty"`
|
||||
KStorageSecondary *KStorage `json:"kstorage_secondary,omitempty" bson:"kstorage_secondary,omitempty"`
|
||||
Dropbox *Dropbox `json:"dropbox,omitempty" bson:"dropbox,omitempty"`
|
||||
MQTTURI string `json:"mqtturi" bson:"mqtturi,omitempty"`
|
||||
MQTTUsername string `json:"mqtt_username" bson:"mqtt_username"`
|
||||
MQTTPassword string `json:"mqtt_password" bson:"mqtt_password"`
|
||||
STUNURI string `json:"stunuri" bson:"stunuri"`
|
||||
ForceTurn string `json:"turn_force" bson:"turn_force"`
|
||||
TURNURI string `json:"turnuri" bson:"turnuri"`
|
||||
TURNUsername string `json:"turn_username" bson:"turn_username"`
|
||||
TURNPassword string `json:"turn_password" bson:"turn_password"`
|
||||
HeartbeatURI string `json:"heartbeaturi" bson:"heartbeaturi"` /*obsolete*/
|
||||
HubEncryption string `json:"hub_encryption" bson:"hub_encryption"`
|
||||
HubURI string `json:"hub_uri" bson:"hub_uri"`
|
||||
HubKey string `json:"hub_key" bson:"hub_key"`
|
||||
HubPrivateKey string `json:"hub_private_key" bson:"hub_private_key"`
|
||||
HubSite string `json:"hub_site" bson:"hub_site"`
|
||||
ConditionURI string `json:"condition_uri" bson:"condition_uri"`
|
||||
Encryption *Encryption `json:"encryption,omitempty" bson:"encryption,omitempty"`
|
||||
Signing *Signing `json:"signing,omitempty" bson:"signing,omitempty"`
|
||||
RealtimeProcessing string `json:"realtimeprocessing,omitempty" bson:"realtimeprocessing,omitempty"`
|
||||
RealtimeProcessingTopic string `json:"realtimeprocessing_topic" bson:"realtimeprocessing_topic"`
|
||||
}
|
||||
|
||||
// Capture defines which camera type (Id) you are using (IP, USB or Raspberry Pi camera),
|
||||
@@ -57,9 +62,11 @@ type Capture struct {
|
||||
Snapshots string `json:"snapshots,omitempty"`
|
||||
Motion string `json:"motion,omitempty"`
|
||||
Liveview string `json:"liveview,omitempty"`
|
||||
LiveviewChunking string `json:"liveview_chunking,omitempty" bson:"liveview_chunking,omitempty"`
|
||||
Continuous string `json:"continuous,omitempty"`
|
||||
PostRecording int64 `json:"postrecording"`
|
||||
PreRecording int64 `json:"prerecording"`
|
||||
GopSize int `json:"gopsize,omitempty" bson:"gopsize,omitempty"` // GOP size in seconds, used for pre-recording
|
||||
MaxLengthRecording int64 `json:"maxlengthrecording"`
|
||||
TranscodingWebRTC string `json:"transcodingwebrtc"`
|
||||
TranscodingResolution int64 `json:"transcodingresolution"`
|
||||
@@ -72,15 +79,23 @@ type Capture struct {
|
||||
// IPCamera configuration, such as the RTSP url of the IPCamera and the FPS.
|
||||
// Also includes ONVIF integration
|
||||
type IPCamera struct {
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
FPS string `json:"fps"`
|
||||
RTSP string `json:"rtsp"`
|
||||
SubRTSP string `json:"sub_rtsp"`
|
||||
ONVIF string `json:"onvif,omitempty" bson:"onvif"`
|
||||
ONVIFXAddr string `json:"onvif_xaddr" bson:"onvif_xaddr"`
|
||||
ONVIFUsername string `json:"onvif_username" bson:"onvif_username"`
|
||||
ONVIFPassword string `json:"onvif_password" bson:"onvif_password"`
|
||||
RTSP string `json:"rtsp"`
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
FPS string `json:"fps"`
|
||||
SubRTSP string `json:"sub_rtsp"`
|
||||
SubWidth int `json:"sub_width"`
|
||||
SubHeight int `json:"sub_height"`
|
||||
SubFPS string `json:"sub_fps"`
|
||||
ONVIF string `json:"onvif,omitempty" bson:"onvif"`
|
||||
ONVIFXAddr string `json:"onvif_xaddr" bson:"onvif_xaddr"`
|
||||
ONVIFUsername string `json:"onvif_username" bson:"onvif_username"`
|
||||
ONVIFPassword string `json:"onvif_password" bson:"onvif_password"`
|
||||
SPSNALUs [][]byte `json:"sps_nalus,omitempty" bson:"sps_nalus,omitempty"`
|
||||
PPSNALUs [][]byte `json:"pps_nalus,omitempty" bson:"pps_nalus,omitempty"`
|
||||
VPSNALUs [][]byte `json:"vps_nalus,omitempty" bson:"vps_nalus,omitempty"`
|
||||
SampleRate int `json:"sample_rate,omitempty" bson:"sample_rate,omitempty"`
|
||||
Channels int `json:"channels,omitempty" bson:"channels,omitempty"`
|
||||
}
|
||||
|
||||
// USBCamera configuration, such as the device path (/dev/video*)
|
||||
@@ -152,6 +167,8 @@ type KStorage struct {
|
||||
SecretAccessKey string `json:"secret_access_key,omitempty" bson:"secret_access_key,omitempty"`
|
||||
Provider string `json:"provider,omitempty" bson:"provider,omitempty"`
|
||||
Directory string `json:"directory,omitempty" bson:"directory,omitempty"`
|
||||
MaxRetries int `json:"max_retries,omitempty" bson:"max_retries,omitempty"`
|
||||
Timeout int `json:"timeout,omitempty" bson:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
// Dropbox integration
|
||||
@@ -168,3 +185,9 @@ type Encryption struct {
|
||||
PrivateKey string `json:"private_key" bson:"private_key"`
|
||||
SymmetricKey string `json:"symmetric_key" bson:"symmetric_key"`
|
||||
}
|
||||
|
||||
// Signing
|
||||
type Signing struct {
|
||||
Enabled string `json:"enabled" bson:"enabled"`
|
||||
PrivateKey string `json:"private_key" bson:"private_key"`
|
||||
}
|
||||
|
||||
@@ -27,31 +27,13 @@ func PackageMQTTMessage(configuration *Configuration, msg Message) ([]byte, erro
|
||||
msg.DeviceId = msg.Payload.DeviceId
|
||||
msg.Timestamp = time.Now().Unix()
|
||||
|
||||
// We'll hide the message (by default in latest version)
|
||||
// We will encrypt using the Kerberos Hub private key if set.
|
||||
/*msg.Hidden = false
|
||||
if configuration.Config.HubPrivateKey != "" {
|
||||
msg.Hidden = true
|
||||
pload := msg.Payload
|
||||
// Pload to base64
|
||||
data, err := json.Marshal(pload)
|
||||
if err != nil {
|
||||
msg.Hidden = false
|
||||
} else {
|
||||
k := configuration.Config.Encryption.SymmetricKey
|
||||
encryptedValue, err := encryption.AesEncrypt(data, k)
|
||||
if err == nil {
|
||||
data := base64.StdEncoding.EncodeToString(encryptedValue)
|
||||
msg.Payload.HiddenValue = data
|
||||
msg.Payload.Value = make(map[string]interface{})
|
||||
}
|
||||
}
|
||||
}*/
|
||||
// Configuration
|
||||
config := configuration.Config
|
||||
|
||||
// Next to hiding the message, we can also encrypt it using your own private key.
|
||||
// Which is not stored in a remote environment (hence you are the only one owning it).
|
||||
msg.Encrypted = false
|
||||
if configuration.Config.Encryption != nil && configuration.Config.Encryption.Enabled == "true" {
|
||||
if config.Encryption != nil && config.Encryption.Enabled == "true" {
|
||||
msg.Encrypted = true
|
||||
}
|
||||
msg.PublicKey = ""
|
||||
@@ -85,19 +67,47 @@ func PackageMQTTMessage(configuration *Configuration, msg Message) ([]byte, erro
|
||||
rsaKey, _ := key.(*rsa.PrivateKey)
|
||||
|
||||
// Create a 16bit key random
|
||||
k := configuration.Config.Encryption.SymmetricKey
|
||||
if config.Encryption != nil && config.Encryption.SymmetricKey != "" {
|
||||
k := config.Encryption.SymmetricKey
|
||||
encryptedValue, err := encryption.AesEncrypt(data, k)
|
||||
if err == nil {
|
||||
|
||||
data := base64.StdEncoding.EncodeToString(encryptedValue)
|
||||
// Sign the encrypted value
|
||||
signature, err := encryption.SignWithPrivateKey([]byte(data), rsaKey)
|
||||
if err == nil {
|
||||
base64Signature := base64.StdEncoding.EncodeToString(signature)
|
||||
msg.Payload.EncryptedValue = data
|
||||
msg.Payload.Signature = base64Signature
|
||||
msg.Payload.Value = make(map[string]interface{})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We'll hide the message (by default in latest version)
|
||||
// We will encrypt using the Kerberos Hub private key if set.
|
||||
msg.Hidden = false
|
||||
if config.HubEncryption == "true" && config.HubPrivateKey != "" {
|
||||
msg.Hidden = true
|
||||
}
|
||||
|
||||
if msg.Hidden {
|
||||
pload := msg.Payload
|
||||
// Pload to base64
|
||||
data, err := json.Marshal(pload)
|
||||
if err != nil {
|
||||
msg.Hidden = false
|
||||
} else {
|
||||
k := config.HubPrivateKey
|
||||
encryptedValue, err := encryption.AesEncrypt(data, k)
|
||||
if err == nil {
|
||||
|
||||
data := base64.StdEncoding.EncodeToString(encryptedValue)
|
||||
// Sign the encrypted value
|
||||
signature, err := encryption.SignWithPrivateKey([]byte(data), rsaKey)
|
||||
if err == nil {
|
||||
base64Signature := base64.StdEncoding.EncodeToString(signature)
|
||||
msg.Payload.EncryptedValue = data
|
||||
msg.Payload.Signature = base64Signature
|
||||
msg.Payload.Value = make(map[string]interface{})
|
||||
}
|
||||
msg.Payload.HiddenValue = data
|
||||
msg.Payload.EncryptedValue = ""
|
||||
msg.Payload.Signature = ""
|
||||
msg.Payload.Value = make(map[string]interface{})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -122,6 +132,7 @@ type Message struct {
|
||||
// The payload structure which is used to send over
|
||||
// and receive messages from the MQTT broker
|
||||
type Payload struct {
|
||||
Version string `json:"version"` // Version of the message, e.g. "1.0"
|
||||
Action string `json:"action"`
|
||||
DeviceId string `json:"device_id"`
|
||||
Signature string `json:"signature"`
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package models
|
||||
|
||||
type MotionDataPartial struct {
|
||||
Timestamp int64 `json:"timestamp" bson:"timestamp"`
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Timestamp int64 `json:"timestamp" bson:"timestamp"`
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Rectangle MotionRectangle `json:"rectangle" bson:"rectangle"`
|
||||
}
|
||||
|
||||
type MotionDataFull struct {
|
||||
@@ -14,3 +15,10 @@ type MotionDataFull struct {
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Token int `json:"token" bson:"token"`
|
||||
}
|
||||
|
||||
type MotionRectangle struct {
|
||||
X int `json:"x" bson:"x"`
|
||||
Y int `json:"y" bson:"y"`
|
||||
Width int `json:"width" bson:"width"`
|
||||
Height int `json:"height" bson:"height"`
|
||||
}
|
||||
|
||||
@@ -200,9 +200,19 @@ func ConnectToOnvifDevice(cameraConfiguration *models.IPCamera) (*onvif.Device,
|
||||
|
||||
var capabilities device.GetCapabilitiesResponse
|
||||
if err != nil {
|
||||
log.Log.Debug("onvif.ConnectToOnvifDevice(): " + err.Error())
|
||||
} else {
|
||||
// Try again with other authentication mode
|
||||
dev, err = onvif.NewDevice(onvif.DeviceParams{
|
||||
Xaddr: cameraConfiguration.ONVIFXAddr,
|
||||
Username: cameraConfiguration.ONVIFUsername,
|
||||
Password: cameraConfiguration.ONVIFPassword,
|
||||
AuthMode: "digest",
|
||||
})
|
||||
if err != nil {
|
||||
log.Log.Debug("onvif.ConnectToOnvifDevice(): " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
getCapabilities := device.GetCapabilities{Category: []xsdonvif.CapabilityCategory{"All"}}
|
||||
resp, err := dev.CallMethod(getCapabilities)
|
||||
if err != nil {
|
||||
@@ -212,10 +222,10 @@ func ConnectToOnvifDevice(cameraConfiguration *models.IPCamera) (*onvif.Device,
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.ConnectToOnvifDevice(): " + err.Error())
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetCapabilitiesResponse")
|
||||
@@ -242,10 +252,10 @@ func GetTokenFromProfile(device *onvif.Device, profileId int) (xsdonvif.Referenc
|
||||
// Get Profiles
|
||||
resp, err := device.CallMethod(media.GetProfiles{})
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetProfilesResponse")
|
||||
if err != nil {
|
||||
log.Log.Debug("onvif.GetTokenFromProfile(): " + err.Error())
|
||||
@@ -278,21 +288,19 @@ func GetPTZConfigurationsFromDevice(device *onvif.Device) (ptz.GetConfigurations
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetConfigurationsResponse")
|
||||
if err != nil {
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetConfigurationsResponse")
|
||||
if err != nil {
|
||||
log.Log.Debug("onvif.GetPTZConfigurationsFromDevice(): " + err.Error())
|
||||
return configurations, err
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&configurations, et); err != nil {
|
||||
log.Log.Debug("onvif.GetPTZConfigurationsFromDevice(): " + err.Error())
|
||||
return configurations, err
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&configurations, et); err != nil {
|
||||
log.Log.Debug("onvif.GetPTZConfigurationsFromDevice(): " + err.Error())
|
||||
return configurations, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -350,7 +358,7 @@ func GetPosition(device *onvif.Device, token xsdonvif.ReferenceToken) (xsdonvif.
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
@@ -784,7 +792,7 @@ func GetPresetsFromDevice(device *onvif.Device) ([]models.OnvifActionPreset, err
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
@@ -798,14 +806,16 @@ func GetPresetsFromDevice(device *onvif.Device) ([]models.OnvifActionPreset, err
|
||||
return presets, err
|
||||
}
|
||||
|
||||
presetsList := ""
|
||||
for _, preset := range presetsResponse.Preset {
|
||||
log.Log.Debug("onvif.main.GetPresetsFromDevice(): " + string(preset.Name) + " (" + string(preset.Token) + ")")
|
||||
p := models.OnvifActionPreset{
|
||||
Name: string(preset.Name),
|
||||
Token: string(preset.Token),
|
||||
}
|
||||
presetsList += string(preset.Name) + " (" + string(preset.Token) + "), "
|
||||
presets = append(presets, p)
|
||||
}
|
||||
log.Log.Debug("onvif.main.GetPresetsFromDevice(): " + presetsList)
|
||||
|
||||
return presets, err
|
||||
}
|
||||
@@ -833,7 +843,7 @@ func GoToPresetFromDevice(device *onvif.Device, presetName string) error {
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
@@ -974,20 +984,25 @@ func CreatePullPointSubscription(dev *onvif.Device) (string, error) {
|
||||
// For the time being we are just interested in the digital inputs and outputs, therefore
|
||||
// we have set the topic to the followin filter.
|
||||
terminate := xsd.String("PT60S")
|
||||
if dev == nil {
|
||||
return pullPointAdress, errors.New("dev is nil, ONVIF was not able to connect to the device")
|
||||
}
|
||||
|
||||
resp, err := dev.CallMethod(event.CreatePullPointSubscription{
|
||||
InitialTerminationTime: &terminate,
|
||||
|
||||
Filter: &event.FilterType{
|
||||
TopicExpression: &event.TopicExpressionType{
|
||||
Dialect: xsd.String("http://www.onvif.org/ver10/tev/topicExpression/ConcreteSet"),
|
||||
TopicKinds: "tns1:Device/Trigger//.",
|
||||
TopicKinds: "tns1:Device/Trigger//.", // -> This works for Avigilon, Hanwa, Hikvision
|
||||
// TopicKinds: "//.", -> This works for Axis, but throws other errors.
|
||||
},
|
||||
},
|
||||
})
|
||||
var b2 []byte
|
||||
if resp != nil {
|
||||
b2, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
if err == nil {
|
||||
stringBody := string(b2)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "CreatePullPointSubscriptionResponse")
|
||||
@@ -1006,19 +1021,25 @@ func CreatePullPointSubscription(dev *onvif.Device) (string, error) {
|
||||
}
|
||||
|
||||
func UnsubscribePullPoint(dev *onvif.Device, pullPointAddress string) error {
|
||||
|
||||
// Unsubscribe from the device
|
||||
unsubscribe := event.Unsubscribe{}
|
||||
requestBody, err := xml.Marshal(unsubscribe)
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.UnsubscribePullPoint(): " + err.Error())
|
||||
}
|
||||
|
||||
res, err := dev.SendSoap(pullPointAddress, string(requestBody))
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.UnsubscribePullPoint(): " + err.Error())
|
||||
}
|
||||
if res != nil {
|
||||
_, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
b, err := io.ReadAll(res.Body)
|
||||
res.Body.Close() // Ensure the response body is closed
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
log.Log.Debug("onvif.main.UnsubscribePullPoint(): " + stringBody)
|
||||
}
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.UnsubscribePullPoint(): " + err.Error())
|
||||
}
|
||||
@@ -1037,14 +1058,10 @@ func GetInputOutputs() ([]ONVIFEvents, error) {
|
||||
// We have some odd behaviour for inputs: the logical state is set to false even if circuit is closed. However we do see repeated events (looks like heartbeats).
|
||||
// We are assuming that if we do not receive an event for 15 seconds the input is inactive, otherwise we set to active.
|
||||
for key, value := range inputOutputDeviceMap {
|
||||
if value.Type == "input" {
|
||||
if time.Now().Unix()-value.Timestamp > 15 {
|
||||
value.Value = "false"
|
||||
} else {
|
||||
value.Value = "true"
|
||||
}
|
||||
inputOutputDeviceMap[key] = value
|
||||
if time.Now().Unix()-value.Timestamp < 15 && value.Value == "false" {
|
||||
value.Value = "true"
|
||||
}
|
||||
inputOutputDeviceMap[key] = value
|
||||
eventsArray = append(eventsArray, *value)
|
||||
}
|
||||
for _, value := range eventsArray {
|
||||
@@ -1070,7 +1087,7 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
// Pull message
|
||||
pullMessage := event.PullMessages{
|
||||
Timeout: xsd.Duration("PT5S"),
|
||||
MessageLimit: 100,
|
||||
MessageLimit: 10,
|
||||
}
|
||||
requestBody, err := xml.Marshal(pullMessage)
|
||||
if err != nil {
|
||||
@@ -1086,7 +1103,7 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
var pullMessagesResponse event.PullMessagesResponse
|
||||
if res != nil {
|
||||
bs, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
res.Body.Close() // Ensure the response body is closed
|
||||
if err == nil {
|
||||
stringBody := string(bs)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "PullMessagesResponse")
|
||||
@@ -1104,13 +1121,18 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
|
||||
for _, message := range pullMessagesResponse.NotificationMessage {
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages): " + string(message.Topic.TopicKinds))
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages): " + string(message.Message.Message.Data.SimpleItem[0].Name) + " " + string(message.Message.Message.Data.SimpleItem[0].Value))
|
||||
if message.Topic.TopicKinds == "tns1:Device/Trigger/Relay" {
|
||||
//if len(message.Message.Message.Data.SimpleItem) > 0 {
|
||||
// log.Log.Debug("onvif.main.GetEventMessages(pullMessages): " + string(message.Message.Message.Data.SimpleItem[0].Name) + " " + string(message.Message.Message.Data.SimpleItem[0].Value))
|
||||
//}
|
||||
if message.Topic.TopicKinds == "tns1:Device/Trigger/Relay" ||
|
||||
message.Topic.TopicKinds == "tns1:Device/tns1:Trigger/tns1:Relay" { // This is for avigilon cameras
|
||||
if len(message.Message.Message.Data.SimpleItem) > 0 {
|
||||
if message.Message.Message.Data.SimpleItem[0].Name == "LogicalState" {
|
||||
if message.Message.Message.Data.SimpleItem[0].Name == "LogicalState" ||
|
||||
message.Message.Message.Data.SimpleItem[0].Name == "RelayLogicalState" { // On avigilon it's called RelayLogicalState
|
||||
key := string(message.Message.Message.Source.SimpleItem[0].Value)
|
||||
value := string(message.Message.Message.Data.SimpleItem[0].Value)
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) output: " + key + " " + value)
|
||||
propertyOperation := string(message.Message.Message.PropertyOperation)
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) output: " + key + " " + value + " (" + propertyOperation + ")")
|
||||
|
||||
// Depending on the onvif library they might use different values for active and inactive.
|
||||
if value == "active" || value == "1" {
|
||||
@@ -1121,26 +1143,30 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
|
||||
// Check if key exists in map
|
||||
// If it does not exist we'll add it to the map otherwise we'll update the value.
|
||||
if _, ok := inputOutputDeviceMap[key]; !ok {
|
||||
inputOutputDeviceMap[key] = &ONVIFEvents{
|
||||
Key: key,
|
||||
if _, ok := inputOutputDeviceMap[key+"-output"]; !ok {
|
||||
inputOutputDeviceMap[key+"-output"] = &ONVIFEvents{
|
||||
Key: key + "-output",
|
||||
Type: "output",
|
||||
Value: value,
|
||||
Timestamp: 0,
|
||||
}
|
||||
} else {
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) output: " + key + " " + value)
|
||||
inputOutputDeviceMap[key].Value = value
|
||||
inputOutputDeviceMap[key].Timestamp = time.Now().Unix()
|
||||
} else if propertyOperation == "Changed" {
|
||||
inputOutputDeviceMap[key+"-output"].Value = value
|
||||
inputOutputDeviceMap[key+"-output"].Timestamp = time.Now().Unix()
|
||||
} else if propertyOperation == "Initialized" {
|
||||
inputOutputDeviceMap[key+"-output"].Value = value
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if message.Topic.TopicKinds == "tns1:Device/Trigger/DigitalInput" {
|
||||
} else if message.Topic.TopicKinds == "tns1:Device/Trigger/DigitalInput" ||
|
||||
message.Topic.TopicKinds == "tns1:Device/tns1:Trigger/tnssamsung:DigitalInput" { // This is for avigilon's camera
|
||||
if len(message.Message.Message.Data.SimpleItem) > 0 {
|
||||
if message.Message.Message.Data.SimpleItem[0].Name == "LogicalState" {
|
||||
if message.Message.Message.Data.SimpleItem[0].Name == "LogicalState" ||
|
||||
message.Message.Message.Data.SimpleItem[0].Name == "Level" { // On avigilon it's called level
|
||||
key := string(message.Message.Message.Source.SimpleItem[0].Value)
|
||||
value := string(message.Message.Message.Data.SimpleItem[0].Value)
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) input: " + key + " " + value)
|
||||
propertyOperation := string(message.Message.Message.PropertyOperation)
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) input: " + key + " " + value + " (" + propertyOperation + ")")
|
||||
|
||||
// Depending on the onvif library they might use different values for active and inactive.
|
||||
if value == "active" || value == "1" {
|
||||
@@ -1151,17 +1177,18 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
|
||||
// Check if key exists in map
|
||||
// If it does not exist we'll add it to the map otherwise we'll update the value.
|
||||
if _, ok := inputOutputDeviceMap[key]; !ok {
|
||||
inputOutputDeviceMap[key] = &ONVIFEvents{
|
||||
Key: key,
|
||||
if _, ok := inputOutputDeviceMap[key+"-input"]; !ok {
|
||||
inputOutputDeviceMap[key+"-input"] = &ONVIFEvents{
|
||||
Key: key + "-input",
|
||||
Type: "input",
|
||||
Value: value,
|
||||
Timestamp: 0,
|
||||
}
|
||||
} else {
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) input: " + key + " " + value)
|
||||
inputOutputDeviceMap[key].Value = value
|
||||
inputOutputDeviceMap[key].Timestamp = time.Now().Unix()
|
||||
} else if propertyOperation == "Changed" {
|
||||
inputOutputDeviceMap[key+"-input"].Value = value
|
||||
inputOutputDeviceMap[key+"-input"].Timestamp = time.Now().Unix()
|
||||
} else if propertyOperation == "Initialized" {
|
||||
inputOutputDeviceMap[key+"-input"].Value = value
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1185,7 +1212,7 @@ func GetDigitalInputs(dev *onvif.Device) (device.GetDigitalInputsResponse, error
|
||||
resp, err := dev.CallMethod(deviceio.GetDigitalInputs{})
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
@@ -1217,21 +1244,19 @@ func GetRelayOutputs(dev *onvif.Device) (device.GetRelayOutputsResponse, error)
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetRelayOutputsResponse")
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.GetRelayOutputs(): " + err.Error())
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetRelayOutputsResponse")
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.GetRelayOutputs(): " + err.Error())
|
||||
return relayoutputs, err
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&relayoutputs, et); err != nil {
|
||||
log.Log.Debug("onvif.main.GetRelayOutputs(): " + err.Error())
|
||||
return relayoutputs, err
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&relayoutputs, et); err != nil {
|
||||
log.Log.Debug("onvif.main.GetRelayOutputs(): " + err.Error())
|
||||
return relayoutputs, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1249,8 +1274,8 @@ func TriggerRelayOutput(dev *onvif.Device, output string) (err error) {
|
||||
// However in theory there might be multiple outputs. We might need to change
|
||||
// this in the future "kerberos-io/onvif" library.
|
||||
if err == nil {
|
||||
token := relayoutputs.RelayOutputs.Token
|
||||
if output == string(token) {
|
||||
token := relayoutputs.RelayOutputs[0].Token
|
||||
if output == string(token+"-output") {
|
||||
outputState := device.SetRelayOutputState{
|
||||
RelayOutputToken: token,
|
||||
LogicalState: "active",
|
||||
@@ -1260,7 +1285,7 @@ func TriggerRelayOutput(dev *onvif.Device, output string) (err error) {
|
||||
var b []byte
|
||||
if errResp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
stringBody := string(b)
|
||||
if err == nil && resp.StatusCode == 200 {
|
||||
|
||||
@@ -9,12 +9,15 @@ import (
|
||||
// Packet represents an RTP Packet
|
||||
type Packet struct {
|
||||
Packet *rtp.Packet
|
||||
IsAudio bool // packet is audio
|
||||
IsVideo bool // packet is video
|
||||
IsKeyFrame bool // video packet is key frame
|
||||
Idx int8 // stream index in container format
|
||||
Codec string // codec name
|
||||
CompositionTime time.Duration // packet presentation time minus decode time for H264 B-Frame
|
||||
Time time.Duration // packet decode time
|
||||
Data []byte // packet data
|
||||
IsAudio bool // packet is audio
|
||||
IsVideo bool // packet is video
|
||||
IsKeyFrame bool // video packet is key frame
|
||||
Idx int8 // stream index in container format
|
||||
Codec string // codec name
|
||||
CompositionTime int64 // packet presentation time minus decode time for H264 B-Frame
|
||||
Time int64 // packet decode time
|
||||
TimeLegacy time.Duration
|
||||
CurrentTime int64 // current time in milliseconds (UNIX timestamp)
|
||||
Data []byte // packet data
|
||||
Gopsize int // size of the GOP
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ package packets
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// time
|
||||
@@ -46,6 +45,11 @@ func (self *Queue) SetMaxGopCount(n int) {
|
||||
return
|
||||
}
|
||||
|
||||
func (self *Queue) GetMaxGopCount() int {
|
||||
n := self.maxgopcount
|
||||
return n
|
||||
}
|
||||
|
||||
func (self *Queue) WriteHeader(streams []Stream) error {
|
||||
self.lock.Lock()
|
||||
|
||||
@@ -145,7 +149,7 @@ func (self *Queue) Oldest() *QueueCursor {
|
||||
}
|
||||
|
||||
// Create cursor position at specific time in buffered packets.
|
||||
func (self *Queue) DelayedTime(dur time.Duration) *QueueCursor {
|
||||
func (self *Queue) DelayedTime(dur int64) *QueueCursor {
|
||||
cursor := self.newCursor()
|
||||
cursor.init = func(buf *Buf, videoidx int) BufPos {
|
||||
i := buf.Tail - 1
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package packets
|
||||
|
||||
type Stream struct {
|
||||
// The ID of the stream.
|
||||
Index int `json:"index" bson:"index"`
|
||||
|
||||
// The name of the stream.
|
||||
Name string
|
||||
|
||||
@@ -39,4 +42,13 @@ type Stream struct {
|
||||
|
||||
// IsBackChannel is true if this stream is a back channel.
|
||||
IsBackChannel bool
|
||||
|
||||
// SampleRate is the sample rate of the audio stream.
|
||||
SampleRate int
|
||||
|
||||
// Channels is the number of audio channels.
|
||||
Channels int
|
||||
|
||||
// GopSize is the size of the GOP (Group of Pictures).
|
||||
GopSize int
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func StartServer(configDirectory string, configuration *models.Configuration, co
|
||||
// Initialize REST API
|
||||
r := gin.Default()
|
||||
|
||||
// Profileerggerg
|
||||
// Profiler
|
||||
pprof.Register(r)
|
||||
|
||||
// Setup CORS
|
||||
|
||||
@@ -395,7 +395,9 @@ func DoGetDigitalInputs(c *gin.Context) {
|
||||
}
|
||||
|
||||
cameraConfiguration := configuration.Config.Capture.IPCamera
|
||||
_, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
|
||||
onvifInputs, _ := onvif.GetDigitalInputs(device)
|
||||
if err == nil {
|
||||
// Get the digital inputs and outputs from the device
|
||||
inputOutputs, err := onvif.GetInputOutputs()
|
||||
@@ -408,6 +410,24 @@ func DoGetDigitalInputs(c *gin.Context) {
|
||||
inputs = append(inputs, event)
|
||||
}
|
||||
}
|
||||
// Iterate over inputs from onvif and compare
|
||||
|
||||
for _, input := range onvifInputs.DigitalInputs {
|
||||
find := false
|
||||
for _, event := range inputs {
|
||||
key := string(input.Token)
|
||||
if key == event.Key {
|
||||
find = true
|
||||
}
|
||||
}
|
||||
if !find {
|
||||
key := string(input.Token)
|
||||
inputs = append(inputs, onvif.ONVIFEvents{
|
||||
Key: key,
|
||||
Type: "input",
|
||||
})
|
||||
}
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"data": inputs,
|
||||
})
|
||||
|
||||
@@ -54,16 +54,21 @@ func AddRoutes(r *gin.Engine, authMiddleware *jwt.GinJWTMiddleware, configDirect
|
||||
components.UpdateConfig(c, configDirectory, configuration, communication)
|
||||
})
|
||||
|
||||
// Will verify the current hub settings.
|
||||
// Will verify the hub settings.
|
||||
api.POST("/hub/verify", func(c *gin.Context) {
|
||||
cloud.VerifyHub(c)
|
||||
})
|
||||
|
||||
// Will verify the current persistence settings.
|
||||
// Will verify the persistence settings.
|
||||
api.POST("/persistence/verify", func(c *gin.Context) {
|
||||
cloud.VerifyPersistence(c, configDirectory)
|
||||
})
|
||||
|
||||
// Will verify the secondary persistence settings.
|
||||
api.POST("/persistence/secondary/verify", func(c *gin.Context) {
|
||||
cloud.VerifySecondaryPersistence(c, configDirectory)
|
||||
})
|
||||
|
||||
// Camera specific methods. Doesn't require any authorization.
|
||||
// These are available for anyone, but require the agent, to reach
|
||||
// the camera.
|
||||
|
||||
@@ -166,9 +166,33 @@ func MQTTListenerHandler(mqttClient mqtt.Client, hubKey string, configDirectory
|
||||
|
||||
// We will receive all messages from our hub, so we'll need to filter to the relevant device.
|
||||
if message.Mid != "" && message.Timestamp != 0 && message.DeviceId == configuration.Config.Key {
|
||||
// Messages might be encrypted, if so we'll
|
||||
// need to decrypt them.
|
||||
var payload models.Payload
|
||||
|
||||
// Messages might be hidden, if so we'll need to decrypt them using the Kerberos Hub private key.
|
||||
if message.Hidden && configuration.Config.HubEncryption == "true" {
|
||||
hiddenValue := message.Payload.HiddenValue
|
||||
if len(hiddenValue) > 0 {
|
||||
privateKey := configuration.Config.HubPrivateKey
|
||||
if privateKey != "" {
|
||||
data, err := base64.StdEncoding.DecodeString(hiddenValue)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
visibleValue, err := encryption.AesDecrypt(data, privateKey)
|
||||
if err != nil {
|
||||
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error decrypting message: " + err.Error())
|
||||
return
|
||||
}
|
||||
json.Unmarshal(visibleValue, &payload)
|
||||
message.Payload = payload
|
||||
} else {
|
||||
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error decrypting message, no private key provided.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Messages might be end-to-end encrypted, if so we'll need to decrypt them,
|
||||
// using our own keys.
|
||||
if message.Encrypted && configuration.Config.Encryption != nil && configuration.Config.Encryption.Enabled == "true" {
|
||||
encryptedValue := message.Payload.EncryptedValue
|
||||
if len(encryptedValue) > 0 {
|
||||
@@ -317,7 +341,7 @@ func HandleGetPTZPosition(mqttClient mqtt.Client, hubKey string, payload models.
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 2, false, payload)
|
||||
} else {
|
||||
log.Log.Info("routers.mqtt.main.HandlePTZPosition(): something went wrong while sending position to hub: " + string(payload))
|
||||
}
|
||||
@@ -352,16 +376,27 @@ func HandleRequestConfig(mqttClient mqtt.Client, hubKey string, payload models.P
|
||||
json.Unmarshal(jsonData, &configPayload)
|
||||
|
||||
if configPayload.Timestamp != 0 {
|
||||
// Get Config from the device
|
||||
|
||||
// Get Config from the device
|
||||
key := configuration.Config.Key
|
||||
name := configuration.Config.Name
|
||||
if configuration.Config.FriendlyName != "" {
|
||||
name = configuration.Config.FriendlyName
|
||||
}
|
||||
|
||||
if key != "" && name != "" {
|
||||
|
||||
// Copy the config, as we don't want to share the encryption part.
|
||||
deepCopy := configuration.Config
|
||||
|
||||
// We need a fix for the width and height if a substream.
|
||||
// The ROI requires the width and height of the sub stream.
|
||||
if configuration.Config.Capture.IPCamera.SubRTSP != "" &&
|
||||
configuration.Config.Capture.IPCamera.SubRTSP != configuration.Config.Capture.IPCamera.RTSP {
|
||||
deepCopy.Capture.IPCamera.Width = configuration.Config.Capture.IPCamera.SubWidth
|
||||
deepCopy.Capture.IPCamera.Height = configuration.Config.Capture.IPCamera.SubHeight
|
||||
}
|
||||
|
||||
var configMap map[string]interface{}
|
||||
inrec, _ := json.Marshal(deepCopy)
|
||||
json.Unmarshal(inrec, &configMap)
|
||||
@@ -378,7 +413,7 @@ func HandleRequestConfig(mqttClient mqtt.Client, hubKey string, payload models.P
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 2, false, payload)
|
||||
} else {
|
||||
log.Log.Info("routers.mqtt.main.HandleRequestConfig(): something went wrong while sending config to hub: " + string(payload))
|
||||
}
|
||||
@@ -417,7 +452,7 @@ func HandleUpdateConfig(mqttClient mqtt.Client, hubKey string, payload models.Pa
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 2, false, payload)
|
||||
} else {
|
||||
log.Log.Info("routers.mqtt.main.HandleUpdateConfig(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
|
||||
@@ -161,6 +161,8 @@ logreader:
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
encodedImage = base64.StdEncoding.EncodeToString(bytes)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.Log.Error("routers.websocket.main.ForwardSDStream():" + err.Error())
|
||||
|
||||
@@ -23,6 +23,8 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
)
|
||||
|
||||
const VERSION = "3.5.0"
|
||||
|
||||
const letterBytes = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
// MaxUint8 - maximum value which can be held in an uint8
|
||||
|
||||
992
machinery/src/video/mp4.go
Normal file
992
machinery/src/video/mp4.go
Normal file
@@ -0,0 +1,992 @@
|
||||
package video
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
mp4ff "github.com/Eyevinn/mp4ff/mp4"
|
||||
"github.com/kerberos-io/agent/machinery/src/encryption"
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/utils"
|
||||
)
|
||||
|
||||
var LastPTS uint64 = 0 // Last PTS for the current segment
|
||||
|
||||
type MP4 struct {
|
||||
// FileName is the name of the file
|
||||
FileName string
|
||||
width int
|
||||
height int
|
||||
Segments []*mp4ff.MediaSegment // List of media segments
|
||||
Segment *mp4ff.MediaSegment
|
||||
MultiTrackFragment *mp4ff.Fragment
|
||||
TrackIDs []uint32
|
||||
FileWriter *os.File
|
||||
Writer *bufio.Writer
|
||||
SegmentCount int
|
||||
SampleCount int
|
||||
StartPTS uint64
|
||||
VideoTotalDuration uint64
|
||||
AudioTotalDuration uint64
|
||||
AudioPTS uint64
|
||||
Start bool
|
||||
SPSNALUs [][]byte // SPS NALUs for H264
|
||||
PPSNALUs [][]byte // PPS NALUs for H264
|
||||
VPSNALUs [][]byte // VPS NALUs for H264
|
||||
FreeBoxSize int64
|
||||
MoofBoxes int64 // Number of moof boxes in the file
|
||||
MoofBoxSizes []int64 // Sizes of each moof box
|
||||
StartTime uint64 // Start time of the MP4 file
|
||||
VideoTrackName string // Name of the video track
|
||||
VideoTrack int // Track ID for the video track
|
||||
AudioTrackName string // Name of the audio track
|
||||
AudioTrack int // Track ID for the audio track
|
||||
VideoFullSample *mp4ff.FullSample // Full sample for video track
|
||||
AudioFullSample *mp4ff.FullSample // Full sample for audio track
|
||||
LastAudioSampleDTS uint64 // Last PTS for audio sample
|
||||
LastVideoSampleDTS uint64 // Last PTS for video sample
|
||||
SampleType string // Type of the sample (e.g., "video", "audio", "subtitle")
|
||||
}
|
||||
|
||||
// NewMP4 creates a new MP4 object
|
||||
func NewMP4(fileName string, spsNALUs [][]byte, ppsNALUs [][]byte, vpsNALUs [][]byte) *MP4 {
|
||||
|
||||
init := mp4ff.NewMP4Init()
|
||||
|
||||
// Add a free box to the init segment
|
||||
// Prepend a free box to the init segment with a size of 1000
|
||||
freeBoxSize := 2048
|
||||
free := mp4ff.NewFreeBox(make([]byte, freeBoxSize))
|
||||
init.AddChild(free)
|
||||
|
||||
// Create a writer
|
||||
ofd, err := os.Create(fileName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create a buffered writer
|
||||
bufferedWriter := bufio.NewWriterSize(ofd, 64*1024) // 64KB buffer
|
||||
|
||||
// We will write the empty init segment to the file
|
||||
// so we can overwrite it later with the actual init segment.
|
||||
err = init.Encode(bufferedWriter)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &MP4{
|
||||
FileName: fileName,
|
||||
StartTime: uint64(time.Now().Unix()),
|
||||
FreeBoxSize: int64(freeBoxSize),
|
||||
FileWriter: ofd,
|
||||
Writer: bufferedWriter,
|
||||
SPSNALUs: spsNALUs,
|
||||
PPSNALUs: ppsNALUs,
|
||||
VPSNALUs: vpsNALUs,
|
||||
}
|
||||
}
|
||||
|
||||
// SetWidth sets the width of the video
|
||||
func (mp4 *MP4) SetWidth(width int) {
|
||||
// Set the width of the video
|
||||
mp4.width = width
|
||||
}
|
||||
|
||||
// SetHeight sets the height of the video
|
||||
func (mp4 *MP4) SetHeight(height int) {
|
||||
// Set the height of the video
|
||||
mp4.height = height
|
||||
}
|
||||
|
||||
// AddVideoTrack
|
||||
// Add a video track to the MP4 file
|
||||
func (mp4 *MP4) AddVideoTrack(codec string) uint32 {
|
||||
nextTrack := uint32(len(mp4.TrackIDs) + 1)
|
||||
mp4.VideoTrack = int(nextTrack)
|
||||
mp4.TrackIDs = append(mp4.TrackIDs, nextTrack)
|
||||
mp4.VideoTrackName = codec
|
||||
return nextTrack
|
||||
}
|
||||
|
||||
// AddAudioTrack
|
||||
// Add an audio track to the MP4 file
|
||||
func (mp4 *MP4) AddAudioTrack(codec string) uint32 {
|
||||
nextTrack := uint32(len(mp4.TrackIDs) + 1)
|
||||
mp4.AudioTrack = int(nextTrack)
|
||||
mp4.TrackIDs = append(mp4.TrackIDs, nextTrack)
|
||||
mp4.AudioTrackName = codec
|
||||
return nextTrack
|
||||
}
|
||||
|
||||
func (mp4 *MP4) AddMediaSegment(segNr int) {
|
||||
}
|
||||
|
||||
func (mp4 *MP4) AddSampleToTrack(trackID uint32, isKeyframe bool, data []byte, pts uint64) error {
|
||||
|
||||
if isKeyframe {
|
||||
|
||||
// Write the segment to the file
|
||||
if mp4.Start {
|
||||
mp4.MoofBoxes = mp4.MoofBoxes + 1
|
||||
mp4.MoofBoxSizes = append(mp4.MoofBoxSizes, int64(mp4.Segment.Size()))
|
||||
err := mp4.Segment.Encode(mp4.Writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mp4.Segments = append(mp4.Segments, mp4.Segment)
|
||||
}
|
||||
|
||||
mp4.Start = true
|
||||
|
||||
// Increment the segment count
|
||||
mp4.SegmentCount = mp4.SegmentCount + 1
|
||||
|
||||
// Create a new media segment
|
||||
seg := mp4ff.NewMediaSegment()
|
||||
|
||||
// Create a video fragment
|
||||
multiTrackFragment, err := mp4ff.CreateMultiTrackFragment(uint32(mp4.SegmentCount), mp4.TrackIDs) // Assuming 1 for video track and 2 for audio track
|
||||
if err != nil {
|
||||
}
|
||||
mp4.MultiTrackFragment = multiTrackFragment
|
||||
seg.AddFragment(multiTrackFragment)
|
||||
|
||||
// Set to MP4 struct
|
||||
mp4.Segment = seg
|
||||
|
||||
// Set the start PTS for the next segment
|
||||
mp4.StartPTS = pts
|
||||
}
|
||||
|
||||
if mp4.Start {
|
||||
|
||||
if trackID == uint32(mp4.VideoTrack) {
|
||||
|
||||
var lengthPrefixed []byte
|
||||
var err error
|
||||
if mp4.VideoTrackName == "H264" || mp4.VideoTrackName == "AVC1" { // Convert Annex B to length-prefixed NAL units if H264
|
||||
lengthPrefixed, err = annexBToLengthPrefixed(data)
|
||||
} else if mp4.VideoTrackName == "H265" || mp4.VideoTrackName == "HVC1" { // Convert H265 Annex B to length-prefixed NAL units
|
||||
lengthPrefixed, err = annexBToLengthPrefixed(data)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if mp4.VideoFullSample != nil {
|
||||
duration := pts - mp4.VideoFullSample.DecodeTime
|
||||
log.Log.Debug("Adding sample to track " + fmt.Sprintf("%d, PTS: %d, Duration: %d, size: %d, Keyframe: %t", trackID, pts, duration, len(lengthPrefixed), isKeyframe))
|
||||
|
||||
mp4.LastVideoSampleDTS = duration
|
||||
//fmt.Printf("Adding sample to track %d, PTS: %d, Duration: %d, size: %d, Keyframe: %t\n", trackID, pts, duration, len(mp4.VideoFullSample.Data), isKeyframe)
|
||||
mp4.VideoTotalDuration += duration
|
||||
mp4.VideoFullSample.DecodeTime = mp4.VideoTotalDuration - duration
|
||||
mp4.VideoFullSample.Sample.Dur = uint32(duration)
|
||||
err := mp4.MultiTrackFragment.AddFullSampleToTrack(*mp4.VideoFullSample, trackID)
|
||||
if err != nil {
|
||||
//log.Printf("Error adding sample to track %d: %v", trackID, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set the sample data
|
||||
var fullSample mp4ff.FullSample
|
||||
flags := uint32(33554432)
|
||||
if !isKeyframe {
|
||||
flags = uint32(16842752)
|
||||
}
|
||||
fullSample.DecodeTime = pts
|
||||
fullSample.Data = lengthPrefixed
|
||||
fullSample.Sample = mp4ff.Sample{
|
||||
Size: uint32(len(fullSample.Data)),
|
||||
Flags: flags,
|
||||
CompositionTimeOffset: 0, // No composition time offset for video
|
||||
}
|
||||
mp4.VideoFullSample = &fullSample
|
||||
mp4.SampleType = "video"
|
||||
}
|
||||
} else if trackID == uint32(mp4.AudioTrack) {
|
||||
if mp4.AudioFullSample != nil {
|
||||
SplitAACFrame(mp4.AudioFullSample.Data, func(started bool, aac []byte) {
|
||||
sampleToAdd := *mp4.AudioFullSample
|
||||
dts := pts - mp4.AudioFullSample.DecodeTime
|
||||
if pts < mp4.AudioFullSample.DecodeTime {
|
||||
//log.Printf("Warning: PTS %d is less than previous sample's DecodeTime %d, resetting AudioFullSample", pts, mp4.AudioFullSample.DecodeTime)
|
||||
dts = 1
|
||||
}
|
||||
if started {
|
||||
dts = 1
|
||||
}
|
||||
mp4.LastAudioSampleDTS = dts
|
||||
//fmt.Printf("Adding sample to track %d, PTS: %d, Duration: %d, size: %d\n", trackID, pts, dts, len(aac[7:]))
|
||||
mp4.AudioTotalDuration += dts
|
||||
mp4.AudioPTS += dts
|
||||
sampleToAdd.Data = aac[7:] // Remove the ADTS header (first 7 bytes)
|
||||
sampleToAdd.DecodeTime = mp4.AudioPTS - dts
|
||||
sampleToAdd.Sample.Dur = uint32(dts)
|
||||
sampleToAdd.Sample.Size = uint32(len(aac[7:]))
|
||||
err := mp4.MultiTrackFragment.AddFullSampleToTrack(sampleToAdd, trackID)
|
||||
if err != nil {
|
||||
log.Log.Error("mp4.AddSampleToTrack(): error adding sample to track " + fmt.Sprintf("%d: %v", trackID, err))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the sample data
|
||||
//flags := uint32(33554432)
|
||||
var fullSample mp4ff.FullSample
|
||||
fullSample.DecodeTime = pts
|
||||
fullSample.Data = data
|
||||
fullSample.Sample = mp4ff.Sample{
|
||||
Size: uint32(len(fullSample.Data)),
|
||||
Flags: 0,
|
||||
CompositionTimeOffset: 0, // No composition time offset for audio
|
||||
}
|
||||
mp4.AudioFullSample = &fullSample
|
||||
mp4.SampleType = "audio"
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp4 *MP4) Close(config *models.Config) {
|
||||
|
||||
// Add the last sample to the track, we will predict the duration based on the last sample
|
||||
// We are not insert the last sample as we might corrupt playback (as we do not know accurately the next PTS).
|
||||
// In theory it means we will lose the last sample, so there is millisecond dataloss, but it is better than corrupting playback.
|
||||
// We could this by using a delayed packet reader, and look for the next PTS (closest one), but that would require a lot of memory and CPU.
|
||||
|
||||
/*duration := uint64(0)
|
||||
trackID := uint32(1)
|
||||
if mp4.SampleType == "video" {
|
||||
duration = mp4.LastVideoSampleDTS
|
||||
trackID = uint32(mp4.VideoTrack)
|
||||
} else if mp4.SampleType == "audio" {
|
||||
duration = 21 //mp4.LastAudioSampleDTS
|
||||
|
||||
} else {
|
||||
log.Println("mp4.Close(): unknown sample type, cannot calculate duration")
|
||||
}
|
||||
|
||||
if duration > 0 {
|
||||
mp4.VideoTotalDuration += duration
|
||||
mp4.VideoFullSample.DecodeTime = mp4.VideoTotalDuration - duration
|
||||
mp4.VideoFullSample.Sample.Dur = uint32(duration)
|
||||
err := mp4.MultiTrackFragment.AddFullSampleToTrack(*mp4.VideoFullSample, trackID)
|
||||
if err != nil {
|
||||
}
|
||||
mp4.Segments = append(mp4.Segments, mp4.Segment)
|
||||
}*/
|
||||
|
||||
if mp4.VideoTotalDuration == 0 && mp4.AudioTotalDuration == 0 {
|
||||
log.Log.Error("mp4.Close(): no video or audio samples added, cannot create MP4 file")
|
||||
}
|
||||
|
||||
// Encode the last segment
|
||||
if mp4.Segment != nil {
|
||||
err := mp4.Segment.Encode(mp4.Writer)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
mp4.Writer.Flush()
|
||||
defer mp4.FileWriter.Close()
|
||||
|
||||
// Now we have all the moof and mdat boxes written to the file.
|
||||
// We can now generate the ftyp and moov boxes, and replace it with the free box we added earlier (size of 10008 bytes).
|
||||
init := mp4ff.NewMP4Init()
|
||||
|
||||
// Create a new ftyp box
|
||||
majorBrand := "isom"
|
||||
minorVersion := uint32(512)
|
||||
compatibleBrands := []string{"iso2", "avc1", "hvc1", "mp41"}
|
||||
ftyp := mp4ff.NewFtyp(majorBrand, minorVersion, compatibleBrands)
|
||||
init.AddChild(ftyp)
|
||||
|
||||
// Create a new moov box
|
||||
moov := mp4ff.NewMoovBox()
|
||||
init.AddChild(moov)
|
||||
|
||||
// Set the creation time and modification time for the moov box
|
||||
videoTimescale := uint32(1000)
|
||||
audioTimescale := uint32(1000)
|
||||
mvhd := &mp4ff.MvhdBox{
|
||||
Version: 0,
|
||||
Flags: 0,
|
||||
CreationTime: mp4.StartTime,
|
||||
ModificationTime: mp4.StartTime,
|
||||
Timescale: videoTimescale,
|
||||
Duration: mp4.VideoTotalDuration,
|
||||
}
|
||||
init.Moov.AddChild(mvhd)
|
||||
|
||||
// Set the total duration in the moov box
|
||||
mvex := mp4ff.NewMvexBox()
|
||||
mvex.AddChild(&mp4ff.MehdBox{FragmentDuration: int64(mp4.VideoTotalDuration)})
|
||||
init.Moov.AddChild(mvex)
|
||||
|
||||
// Add a track for the video
|
||||
if mp4.VideoTrackName == "H264" || mp4.VideoTrackName == "AVC1" {
|
||||
init.AddEmptyTrack(videoTimescale, "video", "und")
|
||||
includePS := true
|
||||
err := init.Moov.Traks[0].SetAVCDescriptor("avc1", mp4.SPSNALUs, mp4.PPSNALUs, includePS)
|
||||
if err != nil {
|
||||
//panic(err)
|
||||
}
|
||||
init.Moov.Traks[0].Tkhd.Duration = mp4.VideoTotalDuration
|
||||
init.Moov.Traks[0].Mdia.Hdlr.Name = "agent " + utils.VERSION
|
||||
//init.Moov.Traks[0].Mdia.Mdhd.Duration = mp4.VideoTotalDuration
|
||||
} else if mp4.VideoTrackName == "H265" || mp4.VideoTrackName == "HVC1" {
|
||||
init.AddEmptyTrack(videoTimescale, "video", "und")
|
||||
includePS := true
|
||||
err := init.Moov.Traks[0].SetHEVCDescriptor("hvc1", mp4.VPSNALUs, mp4.SPSNALUs, mp4.PPSNALUs, [][]byte{}, includePS)
|
||||
if err != nil {
|
||||
//panic(err)
|
||||
}
|
||||
init.Moov.Traks[0].Tkhd.Duration = mp4.VideoTotalDuration
|
||||
init.Moov.Traks[0].Mdia.Hdlr.Name = "agent " + utils.VERSION
|
||||
//init.Moov.Traks[0].Mdia.Mdhd.Duration = mp4.VideoTotalDuration
|
||||
}
|
||||
|
||||
// Try adding audio track if available
|
||||
if mp4.AudioTrackName == "AAC" || mp4.AudioTrackName == "MP4A" {
|
||||
// Add an audio track to the moov box
|
||||
init.AddEmptyTrack(audioTimescale, "audio", "und")
|
||||
|
||||
// Check if the same sample rate is set, otherwise we default to 48000
|
||||
audioSampleRate := 48000
|
||||
if config.Capture.IPCamera.SampleRate > 0 {
|
||||
audioSampleRate = config.Capture.IPCamera.SampleRate
|
||||
}
|
||||
// Set the audio descriptor
|
||||
err := init.Moov.Traks[1].SetAACDescriptor(29, audioSampleRate)
|
||||
if err != nil {
|
||||
//panic(err)
|
||||
}
|
||||
init.Moov.Traks[1].Tkhd.Duration = mp4.AudioTotalDuration
|
||||
init.Moov.Traks[1].Mdia.Hdlr.Name = "agent " + utils.VERSION
|
||||
//init.Moov.Traks[1].Mdia.Mdhd.Duration = mp4.AudioTotalDuration
|
||||
}
|
||||
|
||||
// Try adding subtitle track if available
|
||||
if mp4.VideoTrackName == "VTT" || mp4.VideoTrackName == "WebVTT" {
|
||||
// Add a subtitle track to the moov box
|
||||
init.AddEmptyTrack(videoTimescale, "subtitle", "und")
|
||||
// Set the subtitle descriptor
|
||||
err := init.Moov.Traks[2].SetWvttDescriptor("")
|
||||
if err != nil {
|
||||
//log.Log.Error("mp4.Close(): error setting VTT descriptor: " + err.Error())
|
||||
//return
|
||||
}
|
||||
init.Moov.Traks[2].Mdia.Hdlr.Name = "agent " + utils.VERSION
|
||||
}
|
||||
|
||||
// We will create a fingerprint that's be encrypted with the public key, so we can verify the integrity of the file later.
|
||||
// The fingerprint will be a UUID box, which is a custom box that we can use to store the fingerprint.
|
||||
// Following fields are included in the fingerprint (UUID):
|
||||
// - Moov.Mvhd.CreationTime (the time the file was created)
|
||||
// - Moov.Mvhd.Duration (the total duration of the video)
|
||||
// - Moov.Trak.Hdlr.Name // (the name of the handler, which is the agent and version)
|
||||
// - len(Moof) // (the number of moof boxes in the file)
|
||||
// - size(Moof1) // (the size of the first moof box)
|
||||
// - size(Moof2) // (the size of the second moof box)
|
||||
// ..
|
||||
//
|
||||
// All attributes of the fingerprint are concatenated into a single string, which is then hashed using SHA-256
|
||||
// and encrypted with the public key.
|
||||
|
||||
fingerprint := fmt.Sprintf("%d", init.Moov.Mvhd.CreationTime) + "_" +
|
||||
fmt.Sprintf("%d", init.Moov.Mvhd.Duration) + "_" +
|
||||
init.Moov.Trak.Mdia.Hdlr.Name + "_" +
|
||||
fmt.Sprintf("%d", mp4.MoofBoxes) + "_" // Number of moof boxes
|
||||
|
||||
for i, size := range mp4.MoofBoxSizes {
|
||||
fingerprint += fmt.Sprintf("%d", size)
|
||||
if i < len(mp4.MoofBoxSizes)-1 {
|
||||
fingerprint += "_"
|
||||
}
|
||||
}
|
||||
// Remove trailing underscore if present
|
||||
if len(fingerprint) > 0 && fingerprint[len(fingerprint)-1] == '_' {
|
||||
fingerprint = fingerprint[:len(fingerprint)-1]
|
||||
}
|
||||
|
||||
// Load the private key from the configuration
|
||||
privateKey := config.Signing.PrivateKey
|
||||
r := strings.NewReader(privateKey)
|
||||
pemBytes, _ := ioutil.ReadAll(r)
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
|
||||
if block == nil {
|
||||
//log.Log.Error("mp4.Close(): error decoding PEM block containing private key")
|
||||
//return
|
||||
} else {
|
||||
// Parse private key
|
||||
b := block.Bytes
|
||||
key, err := x509.ParsePKCS8PrivateKey(b)
|
||||
if err != nil {
|
||||
//log.Log.Error("mp4.Close(): error parsing private key: " + err.Error())
|
||||
//return
|
||||
} else {
|
||||
// Conver key to *rsa.PrivateKey
|
||||
rsaKey, _ := key.(*rsa.PrivateKey)
|
||||
fingerprintBytes := []byte(fingerprint)
|
||||
signature, err := encryption.SignWithPrivateKey(fingerprintBytes, rsaKey)
|
||||
if err == nil && len(signature) > 0 {
|
||||
uuid := &mp4ff.UUIDBox{}
|
||||
uuid.SetUUID("6b0c1f8e-3d2a-4f5b-9c7d-8f1e2b3c4d5e")
|
||||
uuid.UnknownPayload = signature
|
||||
init.Moov.AddChild(uuid)
|
||||
} else {
|
||||
//log.Log.Error("mp4.Close(): error signing fingerprint: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We will also calculate the SIDX box, which is a segment index box that contains information about the segments in the file.
|
||||
// This is useful for seeking in the file, and for streaming the file.
|
||||
/*sidx := &mp4ff.SidxBox{
|
||||
Version: 0,
|
||||
Flags: 0,
|
||||
ReferenceID: 0,
|
||||
Timescale: videoTimescale,
|
||||
EarliestPresentationTime: 0,
|
||||
FirstOffset: 0,
|
||||
SidxRefs: make([]mp4ff.SidxRef, 0),
|
||||
}
|
||||
referenceTrak := init.Moov.Trak
|
||||
trex, ok := init.Moov.Mvex.GetTrex(referenceTrak.Tkhd.TrackID)
|
||||
if !ok {
|
||||
// We have an issue.
|
||||
}
|
||||
|
||||
segDatas, err := findSegmentData(mp4.Segments, referenceTrak, trex)
|
||||
if err != nil {
|
||||
// We have an issue.
|
||||
}
|
||||
fillSidx(sidx, referenceTrak, segDatas, true)
|
||||
|
||||
// Add the SIDX box to the moov box
|
||||
init.AddChild(sidx)*/
|
||||
|
||||
// Get a bit slice writer for the init segment
|
||||
// Get a byte buffer of 10008 bytes to write the init segment
|
||||
buffer := bytes.NewBuffer(make([]byte, 0))
|
||||
init.Encode(buffer)
|
||||
|
||||
// The first 10008 bytes of the file is a free box, so we can read it and replace it with the moov box.
|
||||
// The init box might not be 10008 bytes, so we need to read the first 10008 bytes and then replace it with the moov box.
|
||||
// while the remaining bytes are for a new free box.
|
||||
// Write the init segment at the beginning of the file, replacing the free box
|
||||
if _, err := mp4.FileWriter.WriteAt(buffer.Bytes(), 0); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Calculate the remaining size for the free box
|
||||
remainingSize := mp4.FreeBoxSize - int64(buffer.Len())
|
||||
if remainingSize > 0 {
|
||||
newFreeBox := mp4ff.NewFreeBox(make([]byte, remainingSize))
|
||||
var freeBuf bytes.Buffer
|
||||
if err := newFreeBox.Encode(&freeBuf); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if _, err := mp4.FileWriter.WriteAt(freeBuf.Bytes(), int64(buffer.Len())); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type segData struct {
|
||||
startPos uint64
|
||||
presentationTime uint64
|
||||
baseDecodeTime uint64
|
||||
dur uint32
|
||||
size uint32
|
||||
}
|
||||
|
||||
func fillSidx(sidx *mp4ff.SidxBox, refTrak *mp4ff.TrakBox, segDatas []segData, nonZeroEPT bool) {
|
||||
ept := uint64(0)
|
||||
if nonZeroEPT {
|
||||
ept = segDatas[0].presentationTime
|
||||
}
|
||||
sidx.Version = 1
|
||||
sidx.Timescale = refTrak.Mdia.Mdhd.Timescale
|
||||
sidx.ReferenceID = 1
|
||||
sidx.EarliestPresentationTime = ept
|
||||
sidx.FirstOffset = 0
|
||||
sidx.SidxRefs = make([]mp4ff.SidxRef, 0, len(segDatas))
|
||||
|
||||
for _, segData := range segDatas {
|
||||
size := segData.size
|
||||
sidx.SidxRefs = append(sidx.SidxRefs, mp4ff.SidxRef{
|
||||
ReferencedSize: size,
|
||||
SubSegmentDuration: segData.dur,
|
||||
StartsWithSAP: 1,
|
||||
SAPType: 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// findSegmentData returns a slice of segment media data using a reference track.
|
||||
func findSegmentData(segs []*mp4ff.MediaSegment, refTrak *mp4ff.TrakBox, trex *mp4ff.TrexBox) ([]segData, error) {
|
||||
segDatas := make([]segData, 0, len(segs))
|
||||
for _, seg := range segs {
|
||||
var firstCompositionTimeOffest int64
|
||||
dur := uint32(0)
|
||||
var baseTime uint64
|
||||
for fIdx, frag := range seg.Fragments {
|
||||
for _, traf := range frag.Moof.Trafs {
|
||||
tfhd := traf.Tfhd
|
||||
if tfhd.TrackID == refTrak.Tkhd.TrackID { // Find track that gives sidx time values
|
||||
if fIdx == 0 {
|
||||
baseTime = traf.Tfdt.BaseMediaDecodeTime()
|
||||
}
|
||||
for i, trun := range traf.Truns {
|
||||
trun.AddSampleDefaultValues(tfhd, trex)
|
||||
samples := trun.GetSamples()
|
||||
for j, sample := range samples {
|
||||
if fIdx == 0 && i == 0 && j == 0 {
|
||||
firstCompositionTimeOffest = int64(sample.CompositionTimeOffset)
|
||||
}
|
||||
dur += sample.Dur
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sd := segData{
|
||||
startPos: seg.StartPos,
|
||||
presentationTime: uint64(int64(baseTime) + firstCompositionTimeOffest),
|
||||
baseDecodeTime: baseTime,
|
||||
dur: dur,
|
||||
size: uint32(seg.Size()),
|
||||
}
|
||||
segDatas = append(segDatas, sd)
|
||||
}
|
||||
return segDatas, nil
|
||||
}
|
||||
|
||||
// annexBToLengthPrefixed converts Annex B formatted H264 data (with start codes)
|
||||
// into length-prefixed NAL units (4-byte length before each NAL unit).
|
||||
func annexBToLengthPrefixed(data []byte) ([]byte, error) {
|
||||
var out bytes.Buffer
|
||||
|
||||
// Find start codes and split NAL units
|
||||
nalus := splitNALUs(data)
|
||||
if len(nalus) == 0 {
|
||||
return nil, fmt.Errorf("no NAL units found")
|
||||
}
|
||||
|
||||
for _, nalu := range nalus {
|
||||
// Remove Annex B start codes (0x000001 or 0x00000001) from the beginning of each NALU
|
||||
nalu = removeAnnexBStartCode(nalu)
|
||||
if len(nalu) == 0 {
|
||||
continue
|
||||
}
|
||||
// Write 4-byte big-endian length
|
||||
length := uint32(len(nalu))
|
||||
lenBytes := []byte{
|
||||
byte(length >> 24),
|
||||
byte(length >> 16),
|
||||
byte(length >> 8),
|
||||
byte(length),
|
||||
}
|
||||
out.Write(lenBytes)
|
||||
out.Write(nalu)
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
// removeAnnexBStartCode removes a leading Annex B start code from a NALU if present.
|
||||
func removeAnnexBStartCode(nalu []byte) []byte {
|
||||
if len(nalu) >= 4 && nalu[0] == 0x00 && nalu[1] == 0x00 {
|
||||
if nalu[2] == 0x01 {
|
||||
return nalu[3:]
|
||||
}
|
||||
if nalu[2] == 0x00 && nalu[3] == 0x01 {
|
||||
return nalu[4:]
|
||||
}
|
||||
}
|
||||
return nalu
|
||||
}
|
||||
|
||||
// splitNALUs splits Annex B data into raw NAL units without start codes.
|
||||
func splitNALUs(data []byte) [][]byte {
|
||||
var nalus [][]byte
|
||||
start := 0
|
||||
|
||||
for start < len(data) {
|
||||
// Find next start code (0x000001 or 0x00000001)
|
||||
i := findStartCode(data, start+3)
|
||||
if i < 0 {
|
||||
// Last NALU till end of data
|
||||
nalus = append(nalus, data[start:])
|
||||
break
|
||||
}
|
||||
// NAL unit is between start and i
|
||||
nalus = append(nalus, data[start:i])
|
||||
start = i
|
||||
}
|
||||
|
||||
return nalus
|
||||
}
|
||||
|
||||
// findStartCode returns the index of the next Annex B start code (0x000001 or 0x00000001) after pos, or -1 if none.
|
||||
func findStartCode(data []byte, pos int) int {
|
||||
for i := pos; i+3 < len(data); i++ {
|
||||
if data[i] == 0x00 && data[i+1] == 0x00 {
|
||||
if data[i+2] == 0x01 {
|
||||
return i
|
||||
}
|
||||
if i+3 < len(data) && data[i+2] == 0x00 && data[i+3] == 0x01 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// FindSyncword searches for the AAC syncword (0xFFF0) in the given byte slice starting from the specified offset.
|
||||
func FindSyncword(aac []byte, offset int) int {
|
||||
for i := offset; i < len(aac)-1; i++ {
|
||||
if aac[i] == 0xFF && aac[i+1]&0xF0 == 0xF0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Table 31 – Profiles
|
||||
// index profile
|
||||
// 0 Main profile
|
||||
// 1 Low Complexity profile (LC)
|
||||
// 2 Scalable Sampling Rate profile (SSR)
|
||||
// 3 (reserved)
|
||||
|
||||
type START_CODE_TYPE int
|
||||
|
||||
const (
|
||||
START_CODE_3 START_CODE_TYPE = 3
|
||||
START_CODE_4 START_CODE_TYPE = 4
|
||||
)
|
||||
|
||||
func FindStartCode(nalu []byte, offset int) (int, START_CODE_TYPE) {
|
||||
idx := bytes.Index(nalu[offset:], []byte{0x00, 0x00, 0x01})
|
||||
switch {
|
||||
case idx > 0:
|
||||
if nalu[offset+idx-1] == 0x00 {
|
||||
return offset + idx - 1, START_CODE_4
|
||||
}
|
||||
fallthrough
|
||||
case idx == 0:
|
||||
return offset + idx, START_CODE_3
|
||||
}
|
||||
return -1, START_CODE_3
|
||||
}
|
||||
|
||||
func SplitFrame(frames []byte, onFrame func(nalu []byte) bool) {
|
||||
beg, sc := FindStartCode(frames, 0)
|
||||
for beg >= 0 {
|
||||
end, sc2 := FindStartCode(frames, beg+int(sc))
|
||||
if end == -1 {
|
||||
if onFrame != nil {
|
||||
onFrame(frames[beg+int(sc):])
|
||||
}
|
||||
break
|
||||
}
|
||||
if onFrame != nil && onFrame(frames[beg+int(sc):end]) == false {
|
||||
break
|
||||
}
|
||||
beg = end
|
||||
sc = sc2
|
||||
}
|
||||
}
|
||||
|
||||
func SplitFrameWithStartCode(frames []byte, onFrame func(nalu []byte) bool) {
|
||||
beg, sc := FindStartCode(frames, 0)
|
||||
for beg >= 0 {
|
||||
end, sc2 := FindStartCode(frames, beg+int(sc))
|
||||
if end == -1 {
|
||||
if onFrame != nil && (beg+int(sc)) < len(frames) {
|
||||
onFrame(frames[beg:])
|
||||
}
|
||||
break
|
||||
}
|
||||
if onFrame != nil && (beg+int(sc)) < end && onFrame(frames[beg:end]) == false {
|
||||
break
|
||||
}
|
||||
beg = end
|
||||
sc = sc2
|
||||
}
|
||||
}
|
||||
|
||||
func SplitAACFrame(frames []byte, onFrame func(started bool, aac []byte)) {
|
||||
var adts ADTS_Frame_Header
|
||||
start := FindSyncword(frames, 0)
|
||||
started := false
|
||||
for start >= 0 {
|
||||
adts.Decode(frames[start:])
|
||||
onFrame(started, frames[start:start+int(adts.Variable_Header.Frame_length)])
|
||||
start = FindSyncword(frames, start+int(adts.Variable_Header.Frame_length))
|
||||
started = true
|
||||
}
|
||||
}
|
||||
|
||||
type AAC_PROFILE int
|
||||
|
||||
const (
|
||||
MAIN AAC_PROFILE = iota
|
||||
LC
|
||||
SSR
|
||||
)
|
||||
|
||||
type AAC_SAMPLING_FREQUENCY int
|
||||
|
||||
const (
|
||||
AAC_SAMPLE_96000 AAC_SAMPLING_FREQUENCY = iota
|
||||
AAC_SAMPLE_88200
|
||||
AAC_SAMPLE_64000
|
||||
AAC_SAMPLE_48000
|
||||
AAC_SAMPLE_44100
|
||||
AAC_SAMPLE_32000
|
||||
AAC_SAMPLE_24000
|
||||
AAC_SAMPLE_22050
|
||||
AAC_SAMPLE_16000
|
||||
AAC_SAMPLE_12000
|
||||
AAC_SAMPLE_11025
|
||||
AAC_SAMPLE_8000
|
||||
AAC_SAMPLE_7350
|
||||
)
|
||||
|
||||
var AAC_Sampling_Idx [13]int = [13]int{96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350}
|
||||
|
||||
// Table 4 – Syntax of adts_sequence()
|
||||
// adts_sequence() {
|
||||
// while (nextbits() == syncword) {
|
||||
// adts_frame();
|
||||
// }
|
||||
// }
|
||||
// Table 5 – Syntax of adts_frame()
|
||||
// adts_frame() {
|
||||
// adts_fixed_header();
|
||||
// adts_variable_header();
|
||||
// if (number_of_raw_data_blocks_in_frame == 0) {
|
||||
// adts_error_check();
|
||||
// raw_data_block();
|
||||
// }
|
||||
// else {
|
||||
// adts_header_error_check();
|
||||
// for (i = 0; i <= number_of_raw_data_blocks_in_frame;i++ {
|
||||
// raw_data_block();
|
||||
// adts_raw_data_block_error_check();
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// adts_fixed_header()
|
||||
// {
|
||||
// syncword; 12 bslbf
|
||||
// ID; 1 bslbf
|
||||
// layer; 2 uimsbf
|
||||
// protection_absent; 1 bslbf
|
||||
// profile; 2 uimsbf
|
||||
// sampling_frequency_index; 4 uimsbf
|
||||
// private_bit; 1 bslbf
|
||||
// channel_configuration; 3 uimsbf
|
||||
// original/copy; 1 bslbf
|
||||
// home; 1 bslbf
|
||||
// }
|
||||
|
||||
type ADTS_Fix_Header struct {
|
||||
ID uint8
|
||||
Layer uint8
|
||||
Protection_absent uint8
|
||||
Profile uint8
|
||||
Sampling_frequency_index uint8
|
||||
Private_bit uint8
|
||||
Channel_configuration uint8
|
||||
Originalorcopy uint8
|
||||
Home uint8
|
||||
}
|
||||
|
||||
// adts_variable_header() {
|
||||
// copyright_identification_bit; 1 bslbf
|
||||
// copyright_identification_start; 1 bslbf
|
||||
// frame_length; 13 bslbf
|
||||
// adts_buffer_fullness; 11 bslbf
|
||||
// number_of_raw_data_blocks_in_frame; 2 uimsfb
|
||||
// }
|
||||
|
||||
type ADTS_Variable_Header struct {
|
||||
Copyright_identification_bit uint8
|
||||
copyright_identification_start uint8
|
||||
Frame_length uint16
|
||||
Adts_buffer_fullness uint16
|
||||
Number_of_raw_data_blocks_in_frame uint8
|
||||
}
|
||||
|
||||
type ADTS_Frame_Header struct {
|
||||
Fix_Header ADTS_Fix_Header
|
||||
Variable_Header ADTS_Variable_Header
|
||||
}
|
||||
|
||||
func NewAdtsFrameHeader() *ADTS_Frame_Header {
|
||||
return &ADTS_Frame_Header{
|
||||
Fix_Header: ADTS_Fix_Header{
|
||||
ID: 0,
|
||||
Layer: 0,
|
||||
Protection_absent: 1,
|
||||
Profile: uint8(MAIN),
|
||||
Sampling_frequency_index: uint8(AAC_SAMPLE_44100),
|
||||
Private_bit: 0,
|
||||
Channel_configuration: 0,
|
||||
Originalorcopy: 0,
|
||||
Home: 0,
|
||||
},
|
||||
|
||||
Variable_Header: ADTS_Variable_Header{
|
||||
copyright_identification_start: 0,
|
||||
Copyright_identification_bit: 0,
|
||||
Frame_length: 0,
|
||||
Adts_buffer_fullness: 0,
|
||||
Number_of_raw_data_blocks_in_frame: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (frame *ADTS_Frame_Header) Decode(aac []byte) {
|
||||
_ = aac[6]
|
||||
frame.Fix_Header.ID = aac[1] >> 3
|
||||
frame.Fix_Header.Layer = aac[1] >> 1 & 0x03
|
||||
frame.Fix_Header.Protection_absent = aac[1] & 0x01
|
||||
frame.Fix_Header.Profile = aac[2] >> 6 & 0x03
|
||||
frame.Fix_Header.Sampling_frequency_index = aac[2] >> 2 & 0x0F
|
||||
frame.Fix_Header.Private_bit = aac[2] >> 1 & 0x01
|
||||
frame.Fix_Header.Channel_configuration = (aac[2] & 0x01 << 2) | (aac[3] >> 6)
|
||||
frame.Fix_Header.Originalorcopy = aac[3] >> 5 & 0x01
|
||||
frame.Fix_Header.Home = aac[3] >> 4 & 0x01
|
||||
frame.Variable_Header.Copyright_identification_bit = aac[3] >> 3 & 0x01
|
||||
frame.Variable_Header.copyright_identification_start = aac[3] >> 2 & 0x01
|
||||
frame.Variable_Header.Frame_length = (uint16(aac[3]&0x03) << 11) | (uint16(aac[4]) << 3) | (uint16(aac[5]>>5) & 0x07)
|
||||
frame.Variable_Header.Adts_buffer_fullness = (uint16(aac[5]&0x1F) << 6) | uint16(aac[6]>>2)
|
||||
frame.Variable_Header.Number_of_raw_data_blocks_in_frame = aac[6] & 0x03
|
||||
}
|
||||
|
||||
func (frame *ADTS_Frame_Header) Encode() []byte {
|
||||
var hdr []byte
|
||||
if frame.Fix_Header.Protection_absent == 1 {
|
||||
hdr = make([]byte, 7)
|
||||
} else {
|
||||
hdr = make([]byte, 9)
|
||||
}
|
||||
hdr[0] = 0xFF
|
||||
hdr[1] = 0xF0
|
||||
hdr[1] = hdr[1] | (frame.Fix_Header.ID << 3) | (frame.Fix_Header.Layer << 1) | frame.Fix_Header.Protection_absent
|
||||
hdr[2] = frame.Fix_Header.Profile<<6 | frame.Fix_Header.Sampling_frequency_index<<2 | frame.Fix_Header.Private_bit<<1 | frame.Fix_Header.Channel_configuration>>2
|
||||
hdr[3] = frame.Fix_Header.Channel_configuration<<6 | frame.Fix_Header.Originalorcopy<<5 | frame.Fix_Header.Home<<4
|
||||
hdr[3] = hdr[3] | frame.Variable_Header.copyright_identification_start<<3 | frame.Variable_Header.Copyright_identification_bit<<2 | byte(frame.Variable_Header.Frame_length<<11)
|
||||
hdr[4] = byte(frame.Variable_Header.Frame_length >> 3)
|
||||
hdr[5] = byte((frame.Variable_Header.Frame_length&0x07)<<5) | byte(frame.Variable_Header.Adts_buffer_fullness>>3)
|
||||
hdr[6] = byte(frame.Variable_Header.Adts_buffer_fullness&0x3F<<2) | frame.Variable_Header.Number_of_raw_data_blocks_in_frame
|
||||
return hdr
|
||||
}
|
||||
|
||||
func SampleToAACSampleIndex(sampling int) int {
|
||||
for i, v := range AAC_Sampling_Idx {
|
||||
if v == sampling {
|
||||
return i
|
||||
}
|
||||
}
|
||||
panic("not Found AAC Sample Index")
|
||||
}
|
||||
|
||||
func AACSampleIdxToSample(idx int) int {
|
||||
return AAC_Sampling_Idx[idx]
|
||||
}
|
||||
|
||||
// +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
// | audio object type(5 bits) | sampling frequency index(4 bits) | channel configuration(4 bits) | GA framelength flag(1 bits) | GA Depends on core coder(1 bits) | GA Extension Flag(1 bits) |
|
||||
// +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
type AudioSpecificConfiguration struct {
|
||||
Audio_object_type uint8
|
||||
Sample_freq_index uint8
|
||||
Channel_configuration uint8
|
||||
GA_framelength_flag uint8
|
||||
GA_depends_on_core_coder uint8
|
||||
GA_extension_flag uint8
|
||||
}
|
||||
|
||||
func NewAudioSpecificConfiguration() *AudioSpecificConfiguration {
|
||||
return &AudioSpecificConfiguration{
|
||||
Audio_object_type: 0,
|
||||
Sample_freq_index: 0,
|
||||
Channel_configuration: 0,
|
||||
GA_framelength_flag: 0,
|
||||
GA_depends_on_core_coder: 0,
|
||||
GA_extension_flag: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (asc *AudioSpecificConfiguration) Encode() []byte {
|
||||
buf := make([]byte, 2)
|
||||
buf[0] = (asc.Audio_object_type & 0x1f << 3) | (asc.Sample_freq_index & 0x0F >> 1)
|
||||
buf[1] = (asc.Sample_freq_index & 0x0F << 7) | (asc.Channel_configuration & 0x0F << 3) | (asc.GA_framelength_flag & 0x01 << 2) | (asc.GA_depends_on_core_coder & 0x01 << 1) | (asc.GA_extension_flag & 0x01)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (asc *AudioSpecificConfiguration) Decode(buf []byte) error {
|
||||
|
||||
if len(buf) < 2 {
|
||||
return errors.New("len of buf < 2 ")
|
||||
}
|
||||
|
||||
asc.Audio_object_type = buf[0] >> 3
|
||||
asc.Sample_freq_index = (buf[0] & 0x07 << 1) | (buf[1] >> 7)
|
||||
asc.Channel_configuration = buf[1] >> 3 & 0x0F
|
||||
asc.GA_framelength_flag = buf[1] >> 2 & 0x01
|
||||
asc.GA_depends_on_core_coder = buf[1] >> 1 & 0x01
|
||||
asc.GA_extension_flag = buf[1] & 0x01
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertADTSToASC(frame []byte) (*AudioSpecificConfiguration, error) {
|
||||
if len(frame) < 7 {
|
||||
return nil, errors.New("len of frame < 7")
|
||||
}
|
||||
adts := NewAdtsFrameHeader()
|
||||
adts.Decode(frame)
|
||||
asc := NewAudioSpecificConfiguration()
|
||||
asc.Audio_object_type = adts.Fix_Header.Profile + 1
|
||||
asc.Channel_configuration = adts.Fix_Header.Channel_configuration
|
||||
asc.Sample_freq_index = adts.Fix_Header.Sampling_frequency_index
|
||||
return asc, nil
|
||||
}
|
||||
|
||||
func ConvertASCToADTS(asc []byte, aacbytes int) (*ADTS_Frame_Header, error) {
|
||||
aac_asc := NewAudioSpecificConfiguration()
|
||||
err := aac_asc.Decode(asc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aac_adts := NewAdtsFrameHeader()
|
||||
aac_adts.Fix_Header.Profile = aac_asc.Audio_object_type - 1
|
||||
aac_adts.Fix_Header.Channel_configuration = aac_asc.Channel_configuration
|
||||
aac_adts.Fix_Header.Sampling_frequency_index = aac_asc.Sample_freq_index
|
||||
aac_adts.Fix_Header.Protection_absent = 1
|
||||
aac_adts.Variable_Header.Adts_buffer_fullness = 0x3F
|
||||
aac_adts.Variable_Header.Frame_length = uint16(aacbytes)
|
||||
return aac_adts, nil
|
||||
}
|
||||
@@ -9,14 +9,17 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
//"github.com/izern/go-fdkaac/fdkaac"
|
||||
"github.com/kerberos-io/agent/machinery/src/capture"
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/packets"
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
pionWebRTC "github.com/pion/webrtc/v3"
|
||||
pionMedia "github.com/pion/webrtc/v3/pkg/media"
|
||||
"github.com/pion/interceptor"
|
||||
"github.com/pion/interceptor/pkg/intervalpli"
|
||||
pionWebRTC "github.com/pion/webrtc/v4"
|
||||
pionMedia "github.com/pion/webrtc/v4/pkg/media"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,7 +27,6 @@ var (
|
||||
CandidateArrays map[string](chan string)
|
||||
peerConnectionCount int64
|
||||
peerConnections map[string]*pionWebRTC.PeerConnection
|
||||
//encoder *ffmpeg.VideoEncoder
|
||||
)
|
||||
|
||||
type WebRTC struct {
|
||||
@@ -37,24 +39,6 @@ type WebRTC struct {
|
||||
PacketsCount chan int
|
||||
}
|
||||
|
||||
// No longer used, is for transcoding, might comeback on this!
|
||||
/*func init() {
|
||||
// Encoder is created for once and for all.
|
||||
var err error
|
||||
encoder, err = ffmpeg.NewVideoEncoderByCodecType(av.H264)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if encoder == nil {
|
||||
err = fmt.Errorf("Video encoder not found")
|
||||
return
|
||||
}
|
||||
encoder.SetFramerate(30, 1)
|
||||
encoder.SetPixelFormat(av.I420)
|
||||
encoder.SetBitrate(1000000) // 1MB
|
||||
encoder.SetGopSize(30 / 1) // 1s
|
||||
}*/
|
||||
|
||||
func CreateWebRTC(name string, stunServers []string, turnServers []string, turnServersUsername string, turnServersCredential string) *WebRTC {
|
||||
return &WebRTC{
|
||||
Name: name,
|
||||
@@ -63,7 +47,6 @@ func CreateWebRTC(name string, stunServers []string, turnServers []string, turnS
|
||||
TurnServersUsername: turnServersUsername,
|
||||
TurnServersCredential: turnServersCredential,
|
||||
Timer: time.NewTimer(time.Second * 10),
|
||||
PacketsCount: make(chan int),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +72,7 @@ func RegisterCandidates(key string, candidate models.ReceiveHDCandidatesPayload)
|
||||
CandidatesMutex.Lock()
|
||||
_, ok := CandidateArrays[key]
|
||||
if !ok {
|
||||
CandidateArrays[key] = make(chan string)
|
||||
CandidateArrays[key] = make(chan string, 100)
|
||||
}
|
||||
log.Log.Info("webrtc.main.HandleReceiveHDCandidates(): " + candidate.Candidate)
|
||||
select {
|
||||
@@ -100,6 +83,19 @@ func RegisterCandidates(key string, candidate models.ReceiveHDCandidatesPayload)
|
||||
CandidatesMutex.Unlock()
|
||||
}
|
||||
|
||||
func RegisterDefaultInterceptors(mediaEngine *pionWebRTC.MediaEngine, interceptorRegistry *interceptor.Registry) error {
|
||||
if err := pionWebRTC.ConfigureNack(mediaEngine, interceptorRegistry); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pionWebRTC.ConfigureRTCPReports(interceptorRegistry); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pionWebRTC.ConfigureSimulcastExtensionHeaders(mediaEngine); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func InitializeWebRTCConnection(configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, videoTrack *pionWebRTC.TrackLocalStaticSample, audioTrack *pionWebRTC.TrackLocalStaticSample, handshake models.RequestHDStreamPayload) {
|
||||
|
||||
config := configuration.Config
|
||||
@@ -114,7 +110,7 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
CandidatesMutex.Lock()
|
||||
_, ok := CandidateArrays[sessionKey]
|
||||
if !ok {
|
||||
CandidateArrays[sessionKey] = make(chan string)
|
||||
CandidateArrays[sessionKey] = make(chan string, 100)
|
||||
}
|
||||
CandidatesMutex.Unlock()
|
||||
|
||||
@@ -133,7 +129,36 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong registering codecs for media engine: " + err.Error())
|
||||
}
|
||||
|
||||
api := pionWebRTC.NewAPI(pionWebRTC.WithMediaEngine(mediaEngine))
|
||||
// Create a InterceptorRegistry. This is the user configurable RTP/RTCP Pipeline.
|
||||
// This provides NACKs, RTCP Reports and other features. If you use `webrtc.NewPeerConnection`
|
||||
// this is enabled by default. If you are manually managing You MUST create a InterceptorRegistry
|
||||
// for each PeerConnection.
|
||||
interceptorRegistry := &interceptor.Registry{}
|
||||
|
||||
// Use the default set of Interceptors
|
||||
if err := pionWebRTC.RegisterDefaultInterceptors(mediaEngine, interceptorRegistry); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Register a intervalpli factory
|
||||
// This interceptor sends a PLI every 3 seconds. A PLI causes a video keyframe to be generated by the sender.
|
||||
// This makes our video seekable and more error resilent, but at a cost of lower picture quality and higher bitrates
|
||||
// A real world application should process incoming RTCP packets from viewers and forward them to senders
|
||||
intervalPliFactory, err := intervalpli.NewReceiverInterceptor()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
interceptorRegistry.Add(intervalPliFactory)
|
||||
|
||||
api := pionWebRTC.NewAPI(
|
||||
pionWebRTC.WithMediaEngine(mediaEngine),
|
||||
pionWebRTC.WithInterceptorRegistry(interceptorRegistry),
|
||||
)
|
||||
|
||||
policy := pionWebRTC.ICETransportPolicyAll
|
||||
if config.ForceTurn == "true" {
|
||||
policy = pionWebRTC.ICETransportPolicyRelay
|
||||
}
|
||||
|
||||
peerConnection, err := api.NewPeerConnection(
|
||||
pionWebRTC.Configuration{
|
||||
@@ -147,55 +172,90 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
Credential: w.TurnServersCredential,
|
||||
},
|
||||
},
|
||||
//ICETransportPolicy: pionWebRTC.ICETransportPolicyRelay, // This will force a relay server, we might make this configurable.
|
||||
ICETransportPolicy: policy,
|
||||
},
|
||||
)
|
||||
|
||||
if err == nil && peerConnection != nil {
|
||||
|
||||
if _, err = peerConnection.AddTrack(videoTrack); err != nil {
|
||||
var videoSender *pionWebRTC.RTPSender = nil
|
||||
if videoSender, err = peerConnection.AddTrack(videoTrack); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while adding video track: " + err.Error())
|
||||
}
|
||||
// Read incoming RTCP packets
|
||||
// Before these packets are returned they are processed by interceptors. For things
|
||||
// like NACK this needs to be called.
|
||||
go func() {
|
||||
rtcpBuf := make([]byte, 1500)
|
||||
for {
|
||||
if _, _, rtcpErr := videoSender.Read(rtcpBuf); rtcpErr != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err = peerConnection.AddTrack(audioTrack); err != nil {
|
||||
var audioSender *pionWebRTC.RTPSender = nil
|
||||
if audioSender, err = peerConnection.AddTrack(audioTrack); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while adding audio track: " + err.Error())
|
||||
}
|
||||
|
||||
peerConnection.OnICEConnectionStateChange(func(connectionState pionWebRTC.ICEConnectionState) {
|
||||
if connectionState == pionWebRTC.ICEConnectionStateDisconnected {
|
||||
atomic.AddInt64(&peerConnectionCount, -1)
|
||||
} // Read incoming RTCP packets
|
||||
// Before these packets are returned they are processed by interceptors. For things
|
||||
// like NACK this needs to be called.
|
||||
go func() {
|
||||
rtcpBuf := make([]byte, 1500)
|
||||
for {
|
||||
if _, _, rtcpErr := audioSender.Read(rtcpBuf); rtcpErr != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
peerConnection.OnConnectionStateChange(func(connectionState pionWebRTC.PeerConnectionState) {
|
||||
if connectionState == pionWebRTC.PeerConnectionStateDisconnected || connectionState == pionWebRTC.PeerConnectionStateClosed {
|
||||
// Set lock
|
||||
CandidatesMutex.Lock()
|
||||
peerConnections[handshake.SessionID] = nil
|
||||
atomic.AddInt64(&peerConnectionCount, -1)
|
||||
_, ok := CandidateArrays[sessionKey]
|
||||
if ok {
|
||||
close(CandidateArrays[sessionKey])
|
||||
delete(CandidateArrays, sessionKey)
|
||||
}
|
||||
CandidatesMutex.Unlock()
|
||||
|
||||
close(w.PacketsCount)
|
||||
// Not really needed.
|
||||
//senders := peerConnection.GetSenders()
|
||||
//for _, sender := range senders {
|
||||
// if err := peerConnection.RemoveTrack(sender); err != nil {
|
||||
// log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while removing track: " + err.Error())
|
||||
// }
|
||||
//}
|
||||
if err := peerConnection.Close(); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while closing peer connection: " + err.Error())
|
||||
}
|
||||
} else if connectionState == pionWebRTC.ICEConnectionStateConnected {
|
||||
peerConnections[handshake.SessionID] = nil
|
||||
delete(peerConnections, handshake.SessionID)
|
||||
CandidatesMutex.Unlock()
|
||||
} else if connectionState == pionWebRTC.PeerConnectionStateConnected {
|
||||
CandidatesMutex.Lock()
|
||||
atomic.AddInt64(&peerConnectionCount, 1)
|
||||
} else if connectionState == pionWebRTC.ICEConnectionStateChecking {
|
||||
// Iterate over the candidates and send them to the remote client
|
||||
// Non blocking channel
|
||||
for candidate := range CandidateArrays[sessionKey] {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Received candidate from channel: " + candidate)
|
||||
if candidateErr := peerConnection.AddICECandidate(pionWebRTC.ICECandidateInit{Candidate: string(candidate)}); candidateErr != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while adding candidate: " + candidateErr.Error())
|
||||
}
|
||||
}
|
||||
} else if connectionState == pionWebRTC.ICEConnectionStateFailed {
|
||||
CandidatesMutex.Unlock()
|
||||
} else if connectionState == pionWebRTC.PeerConnectionStateFailed {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): ICEConnectionStateFailed")
|
||||
}
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): connection state changed to: " + connectionState.String())
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Number of peers connected (" + strconv.FormatInt(peerConnectionCount, 10) + ")")
|
||||
})
|
||||
|
||||
go func() {
|
||||
// Iterate over the candidates and send them to the remote client
|
||||
// Non blocking channe
|
||||
for candidate := range CandidateArrays[sessionKey] {
|
||||
CandidatesMutex.Lock()
|
||||
log.Log.Info(">>>> webrtc.main.InitializeWebRTCConnection(): Received candidate from channel: " + candidate)
|
||||
if candidateErr := peerConnection.AddICECandidate(pionWebRTC.ICECandidateInit{Candidate: string(candidate)}); candidateErr != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while adding candidate: " + candidateErr.Error())
|
||||
}
|
||||
CandidatesMutex.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
offer := w.CreateOffer(sd)
|
||||
if err = peerConnection.SetRemoteDescription(offer); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while setting remote description: " + err.Error())
|
||||
@@ -214,14 +274,16 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
if candidate == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Create a config map
|
||||
valueMap := make(map[string]interface{})
|
||||
candateJSON := candidate.ToJSON()
|
||||
sdpmid := "0"
|
||||
candateJSON.SDPMid = &sdpmid
|
||||
candateBinary, err := json.Marshal(candateJSON)
|
||||
if err == nil {
|
||||
valueMap["candidate"] = string(candateBinary)
|
||||
// SDP is not needed to be send..
|
||||
//valueMap["sdp"] = []byte(base64.StdEncoding.EncodeToString([]byte(answer.SDP)))
|
||||
valueMap["session_id"] = handshake.SessionID
|
||||
} else {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): something went wrong while marshalling candidate: " + err.Error())
|
||||
}
|
||||
@@ -250,6 +312,7 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
// Create a config map
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["sdp"] = []byte(base64.StdEncoding.EncodeToString([]byte(answer.SDP)))
|
||||
valueMap["session_id"] = handshake.SessionID
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Send SDP answer")
|
||||
|
||||
// We'll send the candidate to the hub
|
||||
@@ -306,16 +369,22 @@ func WriteToTrack(livestreamCursor *packets.QueueCursor, configuration *models.C
|
||||
// Later when we read a packet we need to figure out which track to send it to.
|
||||
hasH264 := false
|
||||
hasPCM_MULAW := false
|
||||
hasAAC := false
|
||||
hasOpus := false
|
||||
streams, _ := rtspClient.GetStreams()
|
||||
for _, stream := range streams {
|
||||
if stream.Name == "H264" {
|
||||
hasH264 = true
|
||||
} else if stream.Name == "PCM_MULAW" {
|
||||
hasPCM_MULAW = true
|
||||
} else if stream.Name == "AAC" {
|
||||
hasAAC = true
|
||||
} else if stream.Name == "OPUS" {
|
||||
hasOpus = true
|
||||
}
|
||||
}
|
||||
|
||||
if !hasH264 && !hasPCM_MULAW {
|
||||
if !hasH264 && !hasPCM_MULAW && !hasAAC && !hasOpus {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): no valid video codec and audio codec found.")
|
||||
} else {
|
||||
if config.Capture.TranscodingWebRTC == "true" {
|
||||
@@ -326,7 +395,8 @@ func WriteToTrack(livestreamCursor *packets.QueueCursor, configuration *models.C
|
||||
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var previousTime time.Duration
|
||||
var lastAudioSample *pionMedia.Sample = nil
|
||||
var lastVideoSample *pionMedia.Sample = nil
|
||||
|
||||
start := false
|
||||
receivedKeyFrame := false
|
||||
@@ -336,14 +406,12 @@ func WriteToTrack(livestreamCursor *packets.QueueCursor, configuration *models.C
|
||||
for cursorError == nil {
|
||||
|
||||
pkt, cursorError = livestreamCursor.ReadPacket()
|
||||
bufferDuration := pkt.Time - previousTime
|
||||
previousTime = pkt.Time
|
||||
|
||||
if config.Capture.ForwardWebRTC != "true" && peerConnectionCount == 0 {
|
||||
start = false
|
||||
receivedKeyFrame = false
|
||||
continue
|
||||
}
|
||||
//if config.Capture.ForwardWebRTC != "true" && peerConnectionCount == 0 {
|
||||
// start = false
|
||||
// receivedKeyFrame = false
|
||||
// continue
|
||||
//}
|
||||
|
||||
select {
|
||||
case lastKeepAlive = <-communication.HandleLiveHDKeepalive:
|
||||
@@ -385,35 +453,61 @@ func WriteToTrack(livestreamCursor *packets.QueueCursor, configuration *models.C
|
||||
//}
|
||||
|
||||
if pkt.IsVideo {
|
||||
|
||||
// Start at the first keyframe
|
||||
if pkt.IsKeyFrame {
|
||||
start = true
|
||||
}
|
||||
if start {
|
||||
sample := pionMedia.Sample{Data: pkt.Data, Duration: bufferDuration}
|
||||
sample := pionMedia.Sample{Data: pkt.Data, PacketTimestamp: uint32(pkt.Time)}
|
||||
//sample = pionMedia.Sample{Data: pkt.Data, Duration: time.Second}
|
||||
if config.Capture.ForwardWebRTC == "true" {
|
||||
// We will send the video to a remote peer
|
||||
// TODO..
|
||||
} else {
|
||||
if err := videoTrack.WriteSample(sample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
if lastVideoSample != nil {
|
||||
duration := sample.PacketTimestamp - lastVideoSample.PacketTimestamp
|
||||
bufferDurationCasted := time.Duration(duration) * time.Millisecond
|
||||
lastVideoSample.Duration = bufferDurationCasted
|
||||
if err := videoTrack.WriteSample(*lastVideoSample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
lastVideoSample = &sample
|
||||
}
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
// We will send the audio
|
||||
sample := pionMedia.Sample{Data: pkt.Data, Duration: pkt.Time}
|
||||
if err := audioTrack.WriteSample(sample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
|
||||
// @TODO: We need to check if the audio is PCM_MULAW or AAC
|
||||
// If AAC we need to transcode it to PCM_MULAW
|
||||
// If PCM_MULAW we can send it directly.
|
||||
|
||||
if hasAAC {
|
||||
// We will transcode the audio from AAC to PCM_MULAW
|
||||
// Not sure how to do this yet, but we need to use a decoder
|
||||
// and then encode it to PCM_MULAW.
|
||||
// TODO..
|
||||
//d := fdkaac.NewAacDecoder()
|
||||
continue
|
||||
}
|
||||
|
||||
// We will send the audio
|
||||
sample := pionMedia.Sample{Data: pkt.Data, PacketTimestamp: uint32(pkt.Time)}
|
||||
|
||||
if lastAudioSample != nil {
|
||||
duration := sample.PacketTimestamp - lastAudioSample.PacketTimestamp
|
||||
bufferDurationCasted := time.Duration(duration) * time.Millisecond
|
||||
lastAudioSample.Duration = bufferDurationCasted
|
||||
if err := audioTrack.WriteSample(*lastAudioSample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
lastAudioSample = &sample
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, p := range peerConnections {
|
||||
if p != nil {
|
||||
p.Close()
|
||||
}
|
||||
}
|
||||
|
||||
peerConnectionCount = 0
|
||||
log.Log.Info("webrtc.main.WriteToTrack(): stop writing to track.")
|
||||
|
||||
4
machinery/update-mod.sh
Executable file
4
machinery/update-mod.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
export GOSUMDB=off
|
||||
rm -rf go.*
|
||||
go mod init github.com/kerberos-io/agent/machinery
|
||||
go mod tidy
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"name": "agent-ui",
|
||||
"version": "0.1.0",
|
||||
"private": false,
|
||||
"dependencies": {
|
||||
"@giantmachines/redux-websocket": "^1.5.1",
|
||||
"@kerberos-io/ui": "^1.76.0",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Allgemeine Einstellungen für den Kerberos Agent",
|
||||
"key": "Schlüssel",
|
||||
"camera_name": "Kamera Name",
|
||||
"camera_friendly_name": "Kamera Anzeigename",
|
||||
"timezone": "Zeitzone",
|
||||
"select_timezone": "Zeitzone auswählen",
|
||||
"advanced_configuration": "Erweiterte Konfiguration",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN Server",
|
||||
"turn_username": "Benutzername",
|
||||
"turn_password": "Passwort",
|
||||
"force_turn": "Erzwinge TURN",
|
||||
"force_turn_description": "Erzwinge die Verwendung von TURN",
|
||||
"stun_turn_forward": "Weiterleiten und transkodieren",
|
||||
"stun_turn_description_forward": "Optiemierungen und Verbesserungen der TURN/STUN Kommunikation.",
|
||||
"stun_turn_webrtc": "Weiterleiten an WebRTC Schnittstelle",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Die möglichkeit zur Speicherung der Daten an einem Zentralen Ort ist der Beginn einer effektiven Videoüberwachung. Es kann zwischen",
|
||||
"description2_persistence": ", oder einem Drittanbieter gewählt werden.",
|
||||
"select_persistence": "Speicherort auswählen",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "Der Proxy Endpunkt zum hochladen der Aufnahmen.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
},
|
||||
"navigation": {
|
||||
"profile": "Profile",
|
||||
"admin": "admin",
|
||||
"admin": "Admin",
|
||||
"management": "Management",
|
||||
"dashboard": "Dashboard",
|
||||
"recordings": "Recordings",
|
||||
@@ -32,11 +32,11 @@
|
||||
"latest_events": "Latest events",
|
||||
"configure_connection": "Configure connection",
|
||||
"no_events": "No events",
|
||||
"no_events_description": "No recordings where found, make sure your Kerberos Agent is properly configured.",
|
||||
"no_events_description": "No recordings were found, make sure your Agent is properly configured.",
|
||||
"motion_detected": "Motion was detected",
|
||||
"live_view": "Live view",
|
||||
"loading_live_view": "Loading live view",
|
||||
"loading_live_view_description": "Hold on we are loading your live view here. If you didn't configure your camera connection, update it on the settings pages.",
|
||||
"loading_live_view_description": "Hold on, we are loading your live view here. If you didn't configure your camera connection, update it on the settings pages.",
|
||||
"time": "Time",
|
||||
"description": "Description",
|
||||
"name": "Name"
|
||||
@@ -59,31 +59,32 @@
|
||||
"persistence": "Persistence"
|
||||
},
|
||||
"info": {
|
||||
"kerberos_hub_demo": "Have a look at our Kerberos Hub demo environment, to see Kerberos Hub in action!",
|
||||
"configuration_updated_success": "Your configuration have been updated successfully.",
|
||||
"kerberos_hub_demo": "Have a look at our Hub demo environment, to see Hub in action!",
|
||||
"configuration_updated_success": "Your configuration has been updated successfully.",
|
||||
"configuration_updated_error": "Something went wrong while saving.",
|
||||
"verify_hub": "Verifying your Kerberos Hub settings.",
|
||||
"verify_hub_success": "Kerberos Hub settings are successfully verified.",
|
||||
"verify_hub_error": "Something went wrong while verifying Kerberos Hub",
|
||||
"verify_hub": "Verifying your Hub settings.",
|
||||
"verify_hub_success": "Hub settings are successfully verified.",
|
||||
"verify_hub_error": "Something went wrong while verifying Hub.",
|
||||
"verify_persistence": "Verifying your persistence settings.",
|
||||
"verify_persistence_success": "Persistence settings are successfully verified.",
|
||||
"verify_persistence_error": "Something went wrong while verifying the persistence",
|
||||
"verify_persistence_error": "Something went wrong while verifying the persistence.",
|
||||
"verify_camera": "Verifying your camera settings.",
|
||||
"verify_camera_success": "Camera settings are successfully verified.",
|
||||
"verify_camera_error": "Something went wrong while verifying the camera settings",
|
||||
"verify_camera_error": "Something went wrong while verifying the camera settings.",
|
||||
"verify_onvif": "Verifying your ONVIF settings.",
|
||||
"verify_onvif_success": "ONVIF settings are successfully verified.",
|
||||
"verify_onvif_error": "Something went wrong while verifying the ONVIF settings"
|
||||
"verify_onvif_error": "Something went wrong while verifying the ONVIF settings."
|
||||
},
|
||||
"overview": {
|
||||
"general": "General",
|
||||
"description_general": "General settings for your Kerberos Agent",
|
||||
"description_general": "General settings for your Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera name",
|
||||
"camera_friendly_name": "Friendly name",
|
||||
"timezone": "Timezone",
|
||||
"select_timezone": "Select a timezone",
|
||||
"advanced_configuration": "Advanced configuration",
|
||||
"description_advanced_configuration": "Detailed configuration options to enable or disable specific parts of the Kerberos Agent",
|
||||
"description_advanced_configuration": "Detailed configuration options to enable or disable specific parts of the Agent",
|
||||
"offline_mode": "Offline mode",
|
||||
"description_offline_mode": "Disable all outgoing traffic",
|
||||
"encryption": "Encryption",
|
||||
@@ -100,9 +101,9 @@
|
||||
"camera": "Camera",
|
||||
"description_camera": "Camera settings are required to make a connection to your camera of choice.",
|
||||
"only_h264": "Currently only H264/H265 RTSP streams are supported.",
|
||||
"rtsp_url": "RTSP url",
|
||||
"rtsp_url": "RTSP URL",
|
||||
"rtsp_h264": "A H264/H265 RTSP connection to your camera.",
|
||||
"sub_rtsp_url": "Sub RTSP url (used for livestreaming)",
|
||||
"sub_rtsp_url": "Sub RTSP URL (used for livestreaming)",
|
||||
"sub_rtsp_h264": "A secondary RTSP connection to the low resolution of your camera.",
|
||||
"onvif": "ONVIF",
|
||||
"description_onvif": "Credentials to communicate with ONVIF capabilities. These are used for PTZ or other capabilities provided by the camera.",
|
||||
@@ -114,28 +115,28 @@
|
||||
},
|
||||
"recording": {
|
||||
"recording": "Recording",
|
||||
"description_recording": "Specify how you would like to make recordings. Having a continuous 24/7 setup or a motion based recording.",
|
||||
"description_recording": "Specify how you would like to make recordings. Having a continuous 24/7 setup or a motion-based recording.",
|
||||
"continuous_recording": "Continuous recording",
|
||||
"description_continuous_recording": "Make 24/7 or motion based recordings.",
|
||||
"max_duration": "max video duration (seconds)",
|
||||
"description_continuous_recording": "Make 24/7 or motion-based recordings.",
|
||||
"max_duration": "Max video duration (seconds)",
|
||||
"description_max_duration": "The maximum duration of a recording.",
|
||||
"pre_recording": "pre recording (key frames buffered)",
|
||||
"pre_recording": "Pre recording (key frames buffered)",
|
||||
"description_pre_recording": "Seconds before an event occurred.",
|
||||
"post_recording": "post recording (seconds)",
|
||||
"post_recording": "Post recording (seconds)",
|
||||
"description_post_recording": "Seconds after an event occurred.",
|
||||
"threshold": "Recording threshold (pixels)",
|
||||
"description_threshold": "The number of pixels changed to record",
|
||||
"description_threshold": "The number of pixels changed to record.",
|
||||
"autoclean": "Auto clean",
|
||||
"description_autoclean": "Specify if the Kerberos Agent can cleanup recordings when a specific storage capacity (MB) is reached. This will remove the oldest recordings when the capacity is reached.",
|
||||
"description_autoclean": "Specify if the Agent can clean up recordings when a specific storage capacity (MB) is reached. This will remove the oldest recordings when the capacity is reached.",
|
||||
"autoclean_enable": "Enable auto clean",
|
||||
"autoclean_description_enable": "Remove oldest recording when capacity reached.",
|
||||
"autoclean_max_directory_size": "Maximum directory size (MB)",
|
||||
"autoclean_description_max_directory_size": "The maximum MB's of recordings stored.",
|
||||
"autoclean_description_max_directory_size": "The maximum MBs of recordings stored.",
|
||||
"fragmentedrecordings": "Fragmented recordings",
|
||||
"description_fragmentedrecordings": "When recordings are fragmented they are suitable for an HLS stream. When turned on the MP4 container will look a bit different.",
|
||||
"description_fragmentedrecordings": "When recordings are fragmented they are suitable for an HLS stream. When turned on, the MP4 container will look a bit different.",
|
||||
"fragmentedrecordings_enable": "Enable fragmentation",
|
||||
"fragmentedrecordings_description_enable": "Fragmented recordings are required for HLS.",
|
||||
"fragmentedrecordings_duration": "fragment duration",
|
||||
"fragmentedrecordings_duration": "Fragment duration",
|
||||
"fragmentedrecordings_description_duration": "Duration of a single fragment."
|
||||
},
|
||||
"streaming": {
|
||||
@@ -145,19 +146,26 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Username",
|
||||
"turn_password": "Password",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Forwarding and transcoding",
|
||||
"stun_turn_description_forward": "Optimisations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_description_forward": "Optimizations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_webrtc": "Forwarding to WebRTC broker",
|
||||
"stun_turn_description_webrtc": "Forward h264 stream through MQTT",
|
||||
"stun_turn_description_webrtc": "Forward H264 stream through MQTT",
|
||||
"stun_turn_transcode": "Transcode stream",
|
||||
"stun_turn_description_transcode": "Convert stream to a lower resolution",
|
||||
"stun_turn_downscale": "Downscale resolution (in % of original resolution)",
|
||||
"mqtt": "MQTT",
|
||||
"description_mqtt": "A MQTT broker is used to communicate from",
|
||||
"description2_mqtt": "to the Kerberos Agent, to achieve for example livestreaming or ONVIF (PTZ) capabilities.",
|
||||
"mqtt_brokeruri": "Broker Uri",
|
||||
"description_mqtt": "An MQTT broker is used to communicate from",
|
||||
"description2_mqtt": "to the Agent, to achieve for example livestreaming or ONVIF (PTZ) capabilities.",
|
||||
"mqtt_brokeruri": "Broker URI",
|
||||
"mqtt_username": "Username",
|
||||
"mqtt_password": "Password"
|
||||
"mqtt_password": "Password",
|
||||
"realtimeprocessing": "Realtime Processing",
|
||||
"description_realtimeprocessing": "By enabling realtime processing, you will receive realtime video keyframes through the MQTT connection specified above.",
|
||||
"realtimeprocessing_topic": "Topic to publish",
|
||||
"realtimeprocessing_enabled": "Enable realtime processing",
|
||||
"description_realtimeprocessing_enabled": "Send realtime video keyframes through MQTT."
|
||||
},
|
||||
"conditions": {
|
||||
"timeofinterest": "Time Of Interest",
|
||||
@@ -172,53 +180,61 @@
|
||||
"friday": "Friday",
|
||||
"saturday": "Saturday",
|
||||
"externalcondition": "External Condition",
|
||||
"description_externalcondition": "Depending on an external webservice recording can be enabled or disabled.",
|
||||
"description_externalcondition": "Depending on an external web service, recording can be enabled or disabled.",
|
||||
"regionofinterest": "Region Of Interest",
|
||||
"description_regionofinterest": "By defining one or more regions, motion will be tracked only in the regions you have defined."
|
||||
},
|
||||
"persistence": {
|
||||
"kerberoshub": "Kerberos Hub",
|
||||
"description_kerberoshub": "Kerberos Agents can send heartbeats to a central",
|
||||
"description2_kerberoshub": "installation. Heartbeats and other relevant information are synced to Kerberos Hub to show realtime information about your video landscape.",
|
||||
"kerberoshub": "Hub",
|
||||
"description_kerberoshub": "Agents can send heartbeats to a central",
|
||||
"description2_kerberoshub": "installation. Heartbeats and other relevant information are synced to Hub to show realtime information about your video landscape.",
|
||||
"persistence": "Persistence",
|
||||
"saasoffering": "Kerberos Hub (SAAS offering)",
|
||||
"secondary_persistence": "Secondary Persistence",
|
||||
"description_secondary_persistence": "Recordings will be sent to secondary persistence if the primary persistence is unavailable or fails. This can be useful for failover purposes.",
|
||||
"saasoffering": "Hub (SaaS offering)",
|
||||
"description_persistence": "Having the ability to store your recordings is the beginning of everything. You can choose between our",
|
||||
"description2_persistence": ", or a 3rd party provider",
|
||||
"select_persistence": "Select a persistence",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Hub will be encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "The Proxy endpoint for uploading your recordings.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
"kerberoshub_apiurl": "Hub API URL",
|
||||
"kerberoshub_description_apiurl": "The API endpoint for uploading your recordings.",
|
||||
"kerberoshub_publickey": "Public key",
|
||||
"kerberoshub_description_publickey": "The public key granted to your Kerberos Hub account.",
|
||||
"kerberoshub_description_publickey": "The public key granted to your Hub account.",
|
||||
"kerberoshub_privatekey": "Private key",
|
||||
"kerberoshub_description_privatekey": "The private key granted to your Kerberos Hub account.",
|
||||
"kerberoshub_description_privatekey": "The private key granted to your Hub account.",
|
||||
"kerberoshub_site": "Site",
|
||||
"kerberoshub_description_site": "The site ID the Kerberos Agents are belonging to in Kerberos Hub.",
|
||||
"kerberoshub_description_site": "The site ID the Agents belong to in Hub.",
|
||||
"kerberoshub_region": "Region",
|
||||
"kerberoshub_description_region": "The region we are storing our recordings in.",
|
||||
"kerberoshub_bucket": "Bucket",
|
||||
"kerberoshub_description_bucket": "The bucket we are storing our recordings in.",
|
||||
"kerberoshub_username": "Username/Directory (should match Kerberos Hub username)",
|
||||
"kerberoshub_description_username": "The username of your Kerberos Hub account.",
|
||||
"kerberosvault_apiurl": "Kerberos Vault API URL",
|
||||
"kerberosvault_description_apiurl": "The Kerberos Vault API",
|
||||
"kerberoshub_username": "Username/Directory (should match Hub username)",
|
||||
"kerberoshub_description_username": "The username of your Hub account.",
|
||||
"kerberosvault_apiurl": "Vault API URL",
|
||||
"kerberosvault_description_apiurl": "The Vault API",
|
||||
"kerberosvault_provider": "Provider",
|
||||
"kerberosvault_description_provider": "The provider to which your recordings will be send.",
|
||||
"kerberosvault_directory": "Directory (should match Kerberos Hub username)",
|
||||
"kerberosvault_description_directory": "Sub directory the recordings will be stored in your provider.",
|
||||
"kerberosvault_description_provider": "The provider to which your recordings will be sent.",
|
||||
"kerberosvault_directory": "Directory (should match Hub username)",
|
||||
"kerberosvault_description_directory": "Subdirectory the recordings will be stored in your provider.",
|
||||
"kerberosvault_accesskey": "Access key",
|
||||
"kerberosvault_description_accesskey": "The access key of your Kerberos Vault account.",
|
||||
"kerberosvault_description_accesskey": "The access key of your Vault account.",
|
||||
"kerberosvault_secretkey": "Secret key",
|
||||
"kerberosvault_description_secretkey": "The secret key of your Kerberos Vault account.",
|
||||
"kerberosvault_description_secretkey": "The secret key of your Vault account.",
|
||||
"kerberosvault_maxretries": "Max retries",
|
||||
"kerberosvault_description_maxretries": "The maximum number of retries to upload a recording.",
|
||||
"kerberosvault_timeout": "Timeout",
|
||||
"kerberosvault_description_timeout": "If a timeout occurs, recordings will be sent directly to the secondary Vault.",
|
||||
"dropbox_directory": "Directory",
|
||||
"dropbox_description_directory": "The sub directory where the recordings will be stored in your Dropbox account.",
|
||||
"dropbox_description_directory": "The subdirectory where the recordings will be stored in your Dropbox account.",
|
||||
"dropbox_accesstoken": "Access token",
|
||||
"dropbox_description_accesstoken": "The access token of your Dropbox account/app.",
|
||||
"verify_connection": "Verify Connection",
|
||||
"remove_after_upload": "Once recordings are uploaded to some persistence, you might want to remove them from the local Kerberos Agent.",
|
||||
"remove_after_upload": "Once recordings are uploaded to some persistence, you might want to remove them from the local Agent.",
|
||||
"remove_after_upload_description": "Remove recordings after they are uploaded successfully.",
|
||||
"remove_after_upload_enabled": "Enabled delete on upload"
|
||||
"remove_after_upload_enabled": "Enable delete on upload"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "General settings for your Kerberos Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera name",
|
||||
"camera_friendly_name": "Camera friendly name",
|
||||
"timezone": "Timezone",
|
||||
"select_timezone": "Select a timezone",
|
||||
"advanced_configuration": "Advanced configuration",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Username",
|
||||
"turn_password": "Password",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Forwarding and transcoding",
|
||||
"stun_turn_description_forward": "Optimisations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_webrtc": "Forwarding to WebRTC broker",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Having the ability to store your recordings is the beginning of everything. You can choose between our",
|
||||
"description2_persistence": ", or a 3rd party provider",
|
||||
"select_persistence": "Select a persistence",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "The Proxy endpoint for uploading your recordings.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -79,6 +79,7 @@
|
||||
"description_general": "Paramètres généraux pour votre Agent Kerberos",
|
||||
"key": "Clé",
|
||||
"camera_name": "Nom de la caméra",
|
||||
"camera_friendly_name": "Nom convivial de la caméra",
|
||||
"timezone": "Fuseau horaire",
|
||||
"select_timezone": "Sélectionner un fuseau horaire",
|
||||
"advanced_configuration": "Configuration avancée",
|
||||
@@ -144,6 +145,8 @@
|
||||
"turn_server": "Serveur TURN",
|
||||
"turn_username": "Nom d'utilisateur",
|
||||
"turn_password": "Mot de passe",
|
||||
"force_turn": "Forcer l'utilisation de TURN",
|
||||
"force_turn_description": "Forcer l'utilisation de TURN au lieu de STUN",
|
||||
"stun_turn_forward": "Redirection et transcodage",
|
||||
"stun_turn_description_forward": "Optimisations et améliorations pour la communication TURN/STUN.",
|
||||
"stun_turn_webrtc": "Redirection pour l'agent WebRTC",
|
||||
@@ -184,6 +187,8 @@
|
||||
"description_persistence": "Avoir la possibilité de stocker vos enregistrements est le commencement de tout. Vous pouvez choisir entre notre",
|
||||
"description2_persistence": " ou auprès d'un fournisseur tiers",
|
||||
"select_persistence": "Sélectionner une persistance",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "URL du proxy Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "Le point de terminaison du proxy pour téléverser vos enregistrements.",
|
||||
"kerberoshub_apiurl": "URL de l'API Kerberos Hub",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "आपके Kerberos एजेंट के लिए सामान्य सेटिंग्स",
|
||||
"key": "की",
|
||||
"camera_name": "कैमरे का नाम",
|
||||
"camera_friendly_name": "कैमरे का नाम",
|
||||
"timezone": "समय क्षेत्र",
|
||||
"select_timezone": "समयक्षेत्र चुनें",
|
||||
"advanced_configuration": "एडवांस कॉन्फ़िगरेशन",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "उपयोगकर्ता नाम",
|
||||
"turn_password": "पासवर्ड",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "फोरवर्डींग और ट्रांसकोडिंग",
|
||||
"stun_turn_description_forward": "TURN/STUN संचार के लिए अनुकूलन और संवर्द्धन।",
|
||||
"stun_turn_webrtc": "WebRTC ब्रोकर को फोरवर्डींग किया जा रहा है",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "अपनी रिकॉर्डिंग संग्रहीत करने की क्षमता होना हर चीज़ की शुरुआत है। ",
|
||||
"description2_persistence": ", या कोई तृतीय पक्ष प्रदाता",
|
||||
"select_persistence": "एक दृढ़ता का चयन करें",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos हब प्रॉक्सी URL",
|
||||
"kerberoshub_description_proxyurl": "आपकी रिकॉर्डिंग अपलोड करने के लिए प्रॉक्सी एंडपॉइंट।",
|
||||
"kerberoshub_apiurl": "Kerberos हब API URL",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Impostazioni generali del Kerberos Agent",
|
||||
"key": "Chiave",
|
||||
"camera_name": "Nome videocamera",
|
||||
"camera_friendly_name": "Nome amichevole videocamera",
|
||||
"timezone": "Fuso orario",
|
||||
"select_timezone": "Seleziona un fuso orario",
|
||||
"advanced_configuration": "Configurazione avanzata",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Username",
|
||||
"turn_password": "Password",
|
||||
"force_turn": "Forza TURN",
|
||||
"force_turn_description": "Forza l'uso di TURN per lo streaming in diretta.",
|
||||
"stun_turn_forward": "Inoltro e transcodifica",
|
||||
"stun_turn_description_forward": "Ottimizzazioni e miglioramenti per la comunicazione TURN/STUN.",
|
||||
"stun_turn_webrtc": "Inoltro al broker WebRTC",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "La possibilità di poter salvare le tue registrazioni rappresenta l'inizio di tutto. Puoi scegliere tra il nostro",
|
||||
"description2_persistence": ", oppure un provider di terze parti",
|
||||
"select_persistence": "Seleziona una persistenza",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "URL Proxy Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "Endpoint del Proxy per l'upload delle registrazioni.",
|
||||
"kerberoshub_apiurl": "API URL Kerberos Hub",
|
||||
@@ -221,4 +226,4 @@
|
||||
"remove_after_upload_enabled": "Abilita cancellazione al caricamento"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Kerberos エージェントの一般設定",
|
||||
"key": "鍵",
|
||||
"camera_name": "カメラ名",
|
||||
"camera_friendly_name": "カメラのフレンドリー名",
|
||||
"timezone": "タイムゾーン",
|
||||
"select_timezone": "タイムゾーンを選択",
|
||||
"advanced_configuration": "詳細設定",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURNサーバー",
|
||||
"turn_username": "ユーザー名",
|
||||
"turn_password": "パスワード",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "転送とトランスコーディング",
|
||||
"stun_turn_description_forward": "TURN/STUN 通信の最適化と機能強化。",
|
||||
"stun_turn_webrtc": "WebRTC ブローカーへの転送",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "録音を保存する機能を持つことは、すべての始まりです。",
|
||||
"description2_persistence": "、またはサードパーティのプロバイダ",
|
||||
"select_persistence": "永続性を選択",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos ハブ プロキシ URL",
|
||||
"kerberoshub_description_proxyurl": "記録をアップロードするためのプロキシ エンドポイント。",
|
||||
"kerberoshub_apiurl": "ケルベロス ハブ API URL",
|
||||
@@ -221,4 +226,4 @@
|
||||
"remove_after_upload_enabled": "Enabled delete on upload"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Algemene instellingen voor jouw Kerberos Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera naam",
|
||||
"camera_friendly_name": "Camera vriendelijke naam",
|
||||
"timezone": "Tijdzone",
|
||||
"select_timezone": "Selecteer uw tijdzone",
|
||||
"advanced_configuration": "Geavanceerde instellingen",
|
||||
@@ -146,6 +147,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Gebruikersnaam",
|
||||
"turn_password": "Wachtwoord",
|
||||
"force_turn": "Verplicht TURN",
|
||||
"force_turn_description": "Verplicht TURN connectie, ook al is er een STUN connectie mogelijk.",
|
||||
"stun_turn_forward": "Doorsturen en transcoden",
|
||||
"stun_turn_description_forward": "Optimalisatie en verbetering voor TURN/STUN communicatie.",
|
||||
"stun_turn_webrtc": "Doorsturen naar een WebRTC broker",
|
||||
@@ -186,6 +189,8 @@
|
||||
"description_persistence": "De mogelijkheid om jouw opnames op te slaan is het begin van alles. Je kan kiezen tussen ons",
|
||||
"description2_persistence": ", of een 3rd party provider",
|
||||
"select_persistence": "Selecteer een opslagmethode",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "De Proxy url voor het opladen van jouw opnames.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "General settings for your Kerberos Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera name",
|
||||
"camera_friendly_name": "Camera friendly name",
|
||||
"timezone": "Timezone",
|
||||
"select_timezone": "Select a timezone",
|
||||
"advanced_configuration": "Advanced configuration",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Username",
|
||||
"turn_password": "Password",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Forwarding and transcoding",
|
||||
"stun_turn_description_forward": "Optimisations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_webrtc": "Forwarding to WebRTC broker",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Having the ability to store your recordings is the beginning of everything. You can choose between our",
|
||||
"description2_persistence": ", or a 3rd party provider",
|
||||
"select_persistence": "Select a persistence",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "The Proxy endpoint for uploading your recordings.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Configurações gerais para seu agente Kerberos",
|
||||
"key": "Chave",
|
||||
"camera_name": "Nome da câmera",
|
||||
"camera_friendly_name": "Nome amigável da câmera",
|
||||
"timezone": "Fuso horário",
|
||||
"select_timezone": "Selecione a timezone",
|
||||
"advanced_configuration": "Configurações avançadas",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "Servidor TURN",
|
||||
"turn_username": "Usuario",
|
||||
"turn_password": "Senha",
|
||||
"force_turn": "Forçar TURN",
|
||||
"force_turn_description": "Forçar o uso de TURN em vez de STUN.",
|
||||
"stun_turn_forward": "Encaminhamento e transcodificação",
|
||||
"stun_turn_description_forward": "Otimizações e melhorias para a comunicação TURN/STUN.",
|
||||
"stun_turn_webrtc": "Encaminhamento para broker WebRTC",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Ter a capacidade de armazenar suas gravações é o começo de tudo. Você pode escolher entre nossos",
|
||||
"description2_persistence": ", ou um provedor terceirizado",
|
||||
"select_persistence": "Selecione um provedor de armazenamento",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Url proxy para Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "O endpoint Proxy para enviar suas gravações.",
|
||||
"kerberoshub_apiurl": "Url de API do Kerberos Hub",
|
||||
|
||||
234
ui/public/locales/ro/translation.json
Normal file
234
ui/public/locales/ro/translation.json
Normal file
@@ -0,0 +1,234 @@
|
||||
{
|
||||
"breadcrumb": {
|
||||
"watch_recordings": "Vizionează înregistrări",
|
||||
"configure": "Configurează"
|
||||
},
|
||||
"buttons": {
|
||||
"save": "Salvează",
|
||||
"verify_connection": "Verifică conexiunea"
|
||||
},
|
||||
"navigation": {
|
||||
"profile": "Profil",
|
||||
"admin": "admin",
|
||||
"management": "Management",
|
||||
"dashboard": "Tablou de bord",
|
||||
"recordings": "Înregistrări",
|
||||
"settings": "Setări",
|
||||
"help_support": "Ajutor & Suport",
|
||||
"swagger": "Swagger API",
|
||||
"documentation": "Documentație",
|
||||
"ui_library": "Bibliotecă UI",
|
||||
"layout": "Limbă și aspect",
|
||||
"choose_language": "Alege limba"
|
||||
},
|
||||
"dashboard": {
|
||||
"title": "Tablou de bord",
|
||||
"heading": "Prezentare generală a supravegherii video",
|
||||
"number_of_days": "Număr de zile",
|
||||
"total_recordings": "Înregistrări totale",
|
||||
"connected": "Conectat",
|
||||
"not_connected": "Neconectat",
|
||||
"offline_mode": "Mod offline",
|
||||
"latest_events": "Evenimente recente",
|
||||
"configure_connection": "Configurează conexiunea",
|
||||
"no_events": "Niciun eveniment",
|
||||
"no_events_description": "Nu au fost găsite înregistrări, asigurați-vă că agentul dvs. Kerberos este configurat corect.",
|
||||
"motion_detected": "Mișcare detectată",
|
||||
"live_view": "Vizualizare live",
|
||||
"loading_live_view": "Se încarcă vizualizarea live",
|
||||
"loading_live_view_description": "Așteptați, încărcăm vizualizarea dvs. live. Dacă nu ați configurat conexiunea camerei, actualizați-o în paginile de setări.",
|
||||
"time": "Timp",
|
||||
"description": "Descriere",
|
||||
"name": "Nume"
|
||||
},
|
||||
"recordings": {
|
||||
"title": "Înregistrări",
|
||||
"heading": "Toate înregistrările tale într-un singur loc",
|
||||
"search_media": "Caută media"
|
||||
},
|
||||
"settings": {
|
||||
"title": "Setări",
|
||||
"heading": "Prezentare generală a setărilor camerei și agentului",
|
||||
"submenu": {
|
||||
"all": "Toate",
|
||||
"overview": "General",
|
||||
"camera": "Camera",
|
||||
"recording": "Înregistrare",
|
||||
"streaming": "Streaming",
|
||||
"conditions": "Condiții",
|
||||
"persistence": "Persistență"
|
||||
},
|
||||
"info": {
|
||||
"kerberos_hub_demo": "Aruncă o privire asupra mediului nostru de demonstrație Kerberos Hub, pentru a vedea Kerberos Hub în acțiune!",
|
||||
"configuration_updated_success": "Configurarea ta a fost actualizată cu succes.",
|
||||
"configuration_updated_error": "Ceva a mers prost în timpul salvării.",
|
||||
"verify_hub": "Verificarea setărilor tale Kerberos Hub.",
|
||||
"verify_hub_success": "Setările Kerberos Hub au fost verificate cu succes.",
|
||||
"verify_hub_error": "Ceva a mers prost în timpul verificării Kerberos Hub.",
|
||||
"verify_persistence": "Verificarea setărilor tale de persistență.",
|
||||
"verify_persistence_success": "Setările de persistență au fost verificate cu succes.",
|
||||
"verify_persistence_error": "Ceva a mers prost în timpul verificării persistenței.",
|
||||
"verify_camera": "Verificarea setărilor tale pentru cameră.",
|
||||
"verify_camera_success": "Setările pentru cameră au fost verificate cu succes.",
|
||||
"verify_camera_error": "Ceva a mers prost în timpul verificării setărilor pentru cameră.",
|
||||
"verify_onvif": "Verificarea setărilor tale ONVIF.",
|
||||
"verify_onvif_success": "Setările ONVIF au fost verificate cu succes.",
|
||||
"verify_onvif_error": "Ceva a mers prost în timpul verificării setărilor ONVIF."
|
||||
},
|
||||
"overview": {
|
||||
"general": "General",
|
||||
"description_general": "Setări generale pentru Agentul tău Kerberos",
|
||||
"key": "Cheie",
|
||||
"camera_name": "Numele camerei",
|
||||
"camera_friendly_name": "Nume prietenos",
|
||||
"timezone": "Fus orar",
|
||||
"select_timezone": "Selectează un fus orar",
|
||||
"advanced_configuration": "Configurare avansată",
|
||||
"description_advanced_configuration": "Opțiuni detaliate de configurare pentru activarea sau dezactivarea anumitor părți ale Agentului Kerberos",
|
||||
"offline_mode": "Mod offline",
|
||||
"description_offline_mode": "Dezactivează tot traficul ieșit",
|
||||
"encryption": "Criptare",
|
||||
"description_encryption": "Activează criptarea pentru tot traficul ieșit. Mesajele MQTT și/sau înregistrările vor fi criptate folosind AES-256. O cheie privată este utilizată pentru semnare.",
|
||||
"encryption_enabled": "Activează criptarea MQTT",
|
||||
"description_encryption_enabled": "Activează criptarea pentru toate mesajele MQTT.",
|
||||
"encryption_recordings_enabled": "Activează criptarea înregistrărilor",
|
||||
"description_encryption_recordings_enabled": "Activează criptarea pentru toate înregistrările.",
|
||||
"encryption_fingerprint": "Amprentă",
|
||||
"encryption_privatekey": "Cheie privată",
|
||||
"encryption_symmetrickey": "Cheie simetrică"
|
||||
},
|
||||
"camera": {
|
||||
"camera": "Camera",
|
||||
"description_camera": "Setările camerei sunt necesare pentru a face o conexiune cu camera aleasă de tine.",
|
||||
"only_h264": "În prezent sunt suportate doar fluxurile RTSP H264/H265.",
|
||||
"rtsp_url": "URL RTSP",
|
||||
"rtsp_h264": "O conexiune RTSP H264/H265 la camera ta.",
|
||||
"sub_rtsp_url": "URL RTSP secundar (folosit pentru transmisie live)",
|
||||
"sub_rtsp_h264": "O conexiune RTSP secundară la rezoluția redusă a camerei tale.",
|
||||
"onvif": "ONVIF",
|
||||
"description_onvif": "Credențiale pentru comunicarea cu capabilitățile ONVIF. Acestea sunt folosite pentru funcții PTZ sau alte capabilități oferite de cameră.",
|
||||
"onvif_xaddr": "Adresă ONVIF",
|
||||
"onvif_username": "Nume utilizator ONVIF",
|
||||
"onvif_password": "Parolă ONVIF",
|
||||
"verify_connection": "Verifică conexiunea",
|
||||
"verify_sub_connection": "Verifică conexiunea secundară"
|
||||
},
|
||||
"recording": {
|
||||
"recording": "Înregistrare",
|
||||
"description_recording": "Specificați cum doriți să realizați înregistrări. Puteți avea o configurație continuă 24/7 sau înregistrări bazate pe mișcare.",
|
||||
"continuous_recording": "Înregistrare continuă",
|
||||
"description_continuous_recording": "Realizați înregistrări 24/7 sau bazate pe mișcare.",
|
||||
"max_duration": "durata maximă a videoclipului (secunde)",
|
||||
"description_max_duration": "Durata maximă a unei înregistrări.",
|
||||
"pre_recording": "pre-înregistrare (cadre cheie tamponate)",
|
||||
"description_pre_recording": "Secunde înainte de producerea unui eveniment.",
|
||||
"post_recording": "post-înregistrare (secunde)",
|
||||
"description_post_recording": "Secunde după producerea unui eveniment.",
|
||||
"threshold": "Prag de înregistrare (pixeli)",
|
||||
"description_threshold": "Numărul de pixeli modificați pentru a înregistra.",
|
||||
"autoclean": "Curățare automată",
|
||||
"description_autoclean": "Specificați dacă Agentul Kerberos poate curăța automat înregistrările când se atinge o anumită capacitate de stocare (MB). Se vor șterge cele mai vechi înregistrări când se atinge capacitatea specificată.",
|
||||
"autoclean_enable": "Activează curățarea automată",
|
||||
"autoclean_description_enable": "Șterge cele mai vechi înregistrări când capacitatea este atinsă.",
|
||||
"autoclean_max_directory_size": "Dimensiunea maximă a directorului (MB)",
|
||||
"autoclean_description_max_directory_size": "Maximum de MB stocați în înregistrări.",
|
||||
"fragmentedrecordings": "Înregistrări fragmentate",
|
||||
"description_fragmentedrecordings": "Când înregistrările sunt fragmentate, sunt potrivite pentru un flux HLS. Când este activat, containerul MP4 va arăta puțin diferit.",
|
||||
"fragmentedrecordings_enable": "Activează fragmentarea",
|
||||
"fragmentedrecordings_description_enable": "Înregistrările fragmentate sunt necesare pentru HLS.",
|
||||
"fragmentedrecordings_duration": "durata fragmentului",
|
||||
"fragmentedrecordings_description_duration": "Durata unui singur fragment."
|
||||
},
|
||||
"streaming": {
|
||||
"stun_turn": "STUN/TURN pentru WebRTC",
|
||||
"description_stun_turn": "Pentru transmisii live la rezoluție completă folosim conceptul WebRTC. Una dintre capabilitățile cheie este funcționalitatea ICE-candidate, care permite traversarea NAT folosind conceptele STUN/TURN.",
|
||||
"stun_server": "Server STUN",
|
||||
"turn_server": "Server TURN",
|
||||
"turn_username": "Nume utilizator",
|
||||
"turn_password": "Parolă",
|
||||
"force_turn": "Forțează TURN",
|
||||
"force_turn_description": "Utilizează TURN în mod forțat, chiar și atunci când STUN este disponibil.",
|
||||
"stun_turn_forward": "Redirecționare și transcodare",
|
||||
"stun_turn_description_forward": "Optimizări și îmbunătățiri pentru comunicarea TURN/STUN.",
|
||||
"stun_turn_webrtc": "Redirecționare către broker WebRTC",
|
||||
"stun_turn_description_webrtc": "Redirecționare flux h264 prin MQTT",
|
||||
"stun_turn_transcode": "Transcodare flux",
|
||||
"stun_turn_description_transcode": "Convertire flux la o rezoluție mai mică",
|
||||
"stun_turn_downscale": "Scădere rezoluție (în % din rezoluția originală)",
|
||||
"mqtt": "MQTT",
|
||||
"description_mqtt": "Un broker MQTT este utilizat pentru comunicare de la",
|
||||
"description2_mqtt": "către Agentul Kerberos, pentru a realiza, de exemplu, transmisiuni live sau capabilități ONVIF (PTZ).",
|
||||
"mqtt_brokeruri": "URI broker MQTT",
|
||||
"mqtt_username": "Nume utilizator",
|
||||
"mqtt_password": "Parolă",
|
||||
"realtimeprocessing": "Procesare în timp real",
|
||||
"description_realtimeprocessing": "Prin activarea procesării în timp real, veți primi cadre cheie video în timp real prin conexiunea MQTT specificată mai sus.",
|
||||
"realtimeprocessing_topic": "Topic pentru publicare",
|
||||
"realtimeprocessing_enabled": "Activează procesarea în timp real",
|
||||
"description_realtimeprocessing_enabled": "Trimite cadre video în timp real prin MQTT."
|
||||
},
|
||||
"conditions": {
|
||||
"timeofinterest": "Timpul de Interes",
|
||||
"description_timeofinterest": "Realizează înregistrări doar între intervale de timp specifice (bazate pe fusul orar).",
|
||||
"timeofinterest_enabled": "Activat",
|
||||
"timeofinterest_description_enabled": "Dacă este activat, puteți specifica intervale de timp",
|
||||
"sunday": "Duminică",
|
||||
"monday": "Luni",
|
||||
"tuesday": "Marți",
|
||||
"wednesday": "Miercuri",
|
||||
"thursday": "Joi",
|
||||
"friday": "Vineri",
|
||||
"saturday": "Sâmbătă",
|
||||
"externalcondition": "Condiție Externă",
|
||||
"description_externalcondition": "În funcție de un serviciu web extern, înregistrarea poate fi activată sau dezactivată.",
|
||||
"regionofinterest": "Regiunea de Interes",
|
||||
"description_regionofinterest": "Prin definirea unei sau mai multor regiuni, mișcarea va fi urmărită doar în regiunile pe care le-ați definit."
|
||||
},
|
||||
"persistence": {
|
||||
"kerberoshub": "Kerberos Hub",
|
||||
"description_kerberoshub": "Agenta Kerberos poate trimite semnale de puls către o",
|
||||
"description2_kerberoshub": "instalație centrală. Semnalele de puls și alte informații relevante sunt sincronizate cu Kerberos Hub pentru a afișa informații în timp real despre peisajul video.",
|
||||
"persistence": "Persistență",
|
||||
"saasoffering": "Kerberos Hub (ofertă SAAS)",
|
||||
"description_persistence": "Capacitatea de a stoca înregistrările este începutul fiecărei",
|
||||
"description2_persistence": ", sau de la un furnizor terț",
|
||||
"select_persistence": "Selectați o persistență",
|
||||
"kerberoshub_encryption": "Criptare",
|
||||
"kerberoshub_encryption_description": "Tot traficul de la/spre Kerberos Hub va fi criptat folosind AES-256.",
|
||||
"kerberoshub_proxyurl": "URL Proxy Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "Punctul final Proxy pentru încărcarea înregistrărilor tale.",
|
||||
"kerberoshub_apiurl": "URL API Kerberos Hub",
|
||||
"kerberoshub_description_apiurl": "Punctul final API pentru încărcarea înregistrărilor tale.",
|
||||
"kerberoshub_publickey": "Cheie publică",
|
||||
"kerberoshub_description_publickey": "Cheia publică acordată contului tău Kerberos Hub.",
|
||||
"kerberoshub_privatekey": "Cheie privată",
|
||||
"kerberoshub_description_privatekey": "Cheia privată acordată contului tău Kerberos Hub.",
|
||||
"kerberoshub_site": "Site",
|
||||
"kerberoshub_description_site": "ID-ul site-ului la care aparțin Agenta Kerberos în Kerberos Hub.",
|
||||
"kerberoshub_region": "Regiune",
|
||||
"kerberoshub_description_region": "Regiunea în care sunt stocate înregistrările noastre.",
|
||||
"kerberoshub_bucket": "Bucket",
|
||||
"kerberoshub_description_bucket": "Bucket-ul în care sunt stocate înregistrările noastre.",
|
||||
"kerberoshub_username": "Nume utilizator/Director (trebuie să se potrivească cu numele de utilizator Kerberos Hub)",
|
||||
"kerberoshub_description_username": "Numele de utilizator al contului tău Kerberos Hub.",
|
||||
"kerberosvault_apiurl": "URL API Kerberos Vault",
|
||||
"kerberosvault_description_apiurl": "API-ul Kerberos Vault",
|
||||
"kerberosvault_provider": "Furnizor",
|
||||
"kerberosvault_description_provider": "Furnizorul către care vor fi trimise înregistrările tale.",
|
||||
"kerberosvault_directory": "Director (trebuie să se potrivească cu numele de utilizator Kerberos Hub)",
|
||||
"kerberosvault_description_directory": "Subdirectorul în care vor fi stocate înregistrările la furnizorul tău.",
|
||||
"kerberosvault_accesskey": "Cheie de acces",
|
||||
"kerberosvault_description_accesskey": "Cheia de acces a contului tău Kerberos Vault.",
|
||||
"kerberosvault_secretkey": "Cheie secretă",
|
||||
"kerberosvault_description_secretkey": "Cheia secretă a contului tău Kerberos Vault.",
|
||||
"dropbox_directory": "Director",
|
||||
"dropbox_description_directory": "Subdirectorul în care vor fi stocate înregistrările în contul tău Dropbox.",
|
||||
"dropbox_accesstoken": "Token de acces",
|
||||
"dropbox_description_accesstoken": "Tokenul de acces al contului/aplicației tale Dropbox.",
|
||||
"verify_connection": "Verifică conexiunea",
|
||||
"remove_after_upload": "Odată ce înregistrările sunt încărcate într-o persistență, este posibil să doriți să le ștergeți de pe Agenta Kerberos locală.",
|
||||
"remove_after_upload_description": "Ștergeți înregistrările după ce sunt încărcate cu succes.",
|
||||
"remove_after_upload_enabled": "Ștergere activată la încărcare"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Общие настройки Kerberos Agent",
|
||||
"key": "Ключ",
|
||||
"camera_name": "Название камеры",
|
||||
"camera_friendly_name": "Дружественное название камеры",
|
||||
"timezone": "Часовой пояс",
|
||||
"select_timezone": "Выберите часовой пояс",
|
||||
"advanced_configuration": "Расширенные настройки",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN сервер",
|
||||
"turn_username": "Имя пользователя",
|
||||
"turn_password": "Пароль",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Переадресация и транскодирование",
|
||||
"stun_turn_description_forward": "Оптимизация и усовершенствование связи TURN/STUN.",
|
||||
"stun_turn_webrtc": "Переадресация на WebRTC-брокера",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Возможность хранения записей - это начало всего. Вы можете выбрать один из наших вариантов",
|
||||
"description2_persistence": ", или стороннего провайдера",
|
||||
"select_persistence": "Выберите хранилище",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "Конечная точка Proxy для загрузки записей.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
234
ui/public/locales/vi/translation.json
Normal file
234
ui/public/locales/vi/translation.json
Normal file
@@ -0,0 +1,234 @@
|
||||
{
|
||||
"breadcrumb": {
|
||||
"watch_recordings": "Xem bản ghi",
|
||||
"configure": "Cấu hình"
|
||||
},
|
||||
"buttons": {
|
||||
"save": "Lưu",
|
||||
"verify_connection": "Xác minh kết nối"
|
||||
},
|
||||
"navigation": {
|
||||
"profile": "Hồ sơ",
|
||||
"admin": "Quản trị",
|
||||
"management": "Quản lý",
|
||||
"dashboard": "Bảng điều khiển",
|
||||
"recordings": "Bản ghi",
|
||||
"settings": "Cài đặt",
|
||||
"help_support": "Trợ giúp & Hỗ trợ",
|
||||
"swagger": "API Swagger",
|
||||
"documentation": "Tài liệu",
|
||||
"ui_library": "Thư viện UI",
|
||||
"layout": "Ngôn ngữ & Bố cục",
|
||||
"choose_language": "Chọn ngôn ngữ"
|
||||
},
|
||||
"dashboard": {
|
||||
"title": "Bảng điều khiển",
|
||||
"heading": "Tổng quan về giám sát video của bạn",
|
||||
"number_of_days": "Số ngày",
|
||||
"total_recordings": "Tổng số bản ghi",
|
||||
"connected": "Đã kết nối",
|
||||
"not_connected": "Chưa kết nối",
|
||||
"offline_mode": "Chế độ ngoại tuyến",
|
||||
"latest_events": "Sự kiện gần đây",
|
||||
"configure_connection": "Cấu hình kết nối",
|
||||
"no_events": "Không có sự kiện",
|
||||
"no_events_description": "Không tìm thấy bản ghi nào, hãy đảm bảo Kerberos Agent của bạn được cấu hình đúng cách.",
|
||||
"motion_detected": "Phát hiện chuyển động",
|
||||
"live_view": "Xem trực tiếp",
|
||||
"loading_live_view": "Đang tải xem trực tiếp",
|
||||
"loading_live_view_description": "Vui lòng chờ trong khi chúng tôi tải xem trực tiếp của bạn. Nếu bạn chưa cấu hình kết nối camera, hãy cập nhật trong trang cài đặt.",
|
||||
"time": "Thời gian",
|
||||
"description": "Mô tả",
|
||||
"name": "Tên"
|
||||
},
|
||||
"recordings": {
|
||||
"title": "Bản ghi",
|
||||
"heading": "Tất cả bản ghi của bạn ở một nơi",
|
||||
"search_media": "Tìm kiếm phương tiện"
|
||||
},
|
||||
"settings": {
|
||||
"title": "Cài đặt",
|
||||
"heading": "Thiết lập camera của bạn",
|
||||
"submenu": {
|
||||
"all": "Tất cả",
|
||||
"overview": "Tổng quan",
|
||||
"camera": "Camera",
|
||||
"recording": "Ghi hình",
|
||||
"streaming": "Truyền phát",
|
||||
"conditions": "Điều kiện",
|
||||
"persistence": "Lưu trữ"
|
||||
},
|
||||
"info": {
|
||||
"kerberos_hub_demo": "Xem thử môi trường demo của Kerberos Hub để thấy Kerberos Hub hoạt động như thế nào!",
|
||||
"configuration_updated_success": "Cấu hình của bạn đã được cập nhật thành công.",
|
||||
"configuration_updated_error": "Đã xảy ra lỗi khi lưu.",
|
||||
"verify_hub": "Đang xác minh cài đặt Kerberos Hub của bạn.",
|
||||
"verify_hub_success": "Cài đặt Kerberos Hub đã được xác minh thành công.",
|
||||
"verify_hub_error": "Đã xảy ra lỗi khi xác minh Kerberos Hub",
|
||||
"verify_persistence": "Đang xác minh cài đặt lưu trữ.",
|
||||
"verify_persistence_success": "Cài đặt lưu trữ đã được xác minh thành công.",
|
||||
"verify_persistence_error": "Đã xảy ra lỗi khi xác minh lưu trữ",
|
||||
"verify_camera": "Đang xác minh cài đặt camera.",
|
||||
"verify_camera_success": "Cài đặt camera đã được xác minh thành công.",
|
||||
"verify_camera_error": "Đã xảy ra lỗi khi xác minh cài đặt camera",
|
||||
"verify_onvif": "Đang xác minh cài đặt ONVIF.",
|
||||
"verify_onvif_success": "Cài đặt ONVIF đã được xác minh thành công.",
|
||||
"verify_onvif_error": "Đã xảy ra lỗi khi xác minh cài đặt ONVIF"
|
||||
},
|
||||
"overview": {
|
||||
"general": "Chung",
|
||||
"description_general": "Cài đặt chung cho Kerberos Agent của bạn",
|
||||
"key": "Khóa",
|
||||
"camera_name": "Tên camera",
|
||||
"camera_friendly_name": "Tên thân thiện",
|
||||
"timezone": "Múi giờ",
|
||||
"select_timezone": "Chọn múi giờ",
|
||||
"advanced_configuration": "Cấu hình nâng cao",
|
||||
"description_advanced_configuration": "Tùy chọn cấu hình chi tiết để bật hoặc tắt các phần cụ thể của Kerberos Agent",
|
||||
"offline_mode": "Chế độ ngoại tuyến",
|
||||
"description_offline_mode": "Vô hiệu hóa toàn bộ lưu lượng đi",
|
||||
"encryption": "Mã hóa",
|
||||
"description_encryption": "Bật mã hóa cho toàn bộ lưu lượng đi. Các tin nhắn MQTT và/hoặc bản ghi sẽ được mã hóa bằng AES-256. Một khóa riêng tư được sử dụng để ký.",
|
||||
"encryption_enabled": "Bật mã hóa MQTT",
|
||||
"description_encryption_enabled": "Bật mã hóa cho toàn bộ tin nhắn MQTT.",
|
||||
"encryption_recordings_enabled": "Bật mã hóa bản ghi",
|
||||
"description_encryption_recordings_enabled": "Bật mã hóa cho tất cả các bản ghi.",
|
||||
"encryption_fingerprint": "Dấu vân tay",
|
||||
"encryption_privatekey": "Khóa riêng tư",
|
||||
"encryption_symmetrickey": "Khóa đối xứng"
|
||||
},
|
||||
"camera": {
|
||||
"camera": "Camera",
|
||||
"description_camera": "Cài đặt camera là bắt buộc để kết nối với camera bạn chọn.",
|
||||
"only_h264": "Hiện tại chỉ hỗ trợ luồng RTSP H264/H265.",
|
||||
"rtsp_url": "URL RTSP",
|
||||
"rtsp_h264": "Kết nối RTSP H264/H265 với camera của bạn.",
|
||||
"sub_rtsp_url": "URL RTSP phụ (dùng để phát trực tiếp)",
|
||||
"sub_rtsp_h264": "Kết nối RTSP phụ với độ phân giải thấp của camera.",
|
||||
"onvif": "ONVIF",
|
||||
"description_onvif": "Thông tin xác thực để giao tiếp với các chức năng ONVIF. Chúng được sử dụng cho PTZ hoặc các khả năng khác do camera cung cấp.",
|
||||
"onvif_xaddr": "Địa chỉ ONVIF",
|
||||
"onvif_username": "Tên người dùng ONVIF",
|
||||
"onvif_password": "Mật khẩu ONVIF",
|
||||
"verify_connection": "Xác minh kết nối",
|
||||
"verify_sub_connection": "Xác minh kết nối phụ"
|
||||
},
|
||||
"recording": {
|
||||
"recording": "Ghi hình",
|
||||
"description_recording": "Chỉ định cách bạn muốn thực hiện ghi hình. Có thể ghi liên tục 24/7 hoặc dựa trên chuyển động.",
|
||||
"continuous_recording": "Ghi hình liên tục",
|
||||
"description_continuous_recording": "Ghi hình liên tục 24/7 hoặc dựa trên chuyển động.",
|
||||
"max_duration": "Thời lượng video tối đa (giây)",
|
||||
"description_max_duration": "Thời lượng tối đa của một bản ghi.",
|
||||
"pre_recording": "Ghi trước (khung hình chính được lưu vào bộ đệm)",
|
||||
"description_pre_recording": "Số giây trước khi sự kiện xảy ra.",
|
||||
"post_recording": "Ghi sau (giây)",
|
||||
"description_post_recording": "Số giây sau khi sự kiện xảy ra.",
|
||||
"threshold": "Ngưỡng ghi hình (pixel)",
|
||||
"description_threshold": "Số pixel thay đổi cần đạt để bắt đầu ghi hình.",
|
||||
"autoclean": "Tự động dọn dẹp",
|
||||
"description_autoclean": "Chỉ định xem Kerberos Agent có thể dọn dẹp các bản ghi khi dung lượng lưu trữ đạt giới hạn nhất định (MB) hay không. Hệ thống sẽ xóa bản ghi cũ nhất khi đạt giới hạn.",
|
||||
"autoclean_enable": "Bật tự động dọn dẹp",
|
||||
"autoclean_description_enable": "Xóa bản ghi cũ nhất khi đạt giới hạn dung lượng.",
|
||||
"autoclean_max_directory_size": "Dung lượng thư mục tối đa (MB)",
|
||||
"autoclean_description_max_directory_size": "Dung lượng tối đa (MB) của các bản ghi được lưu trữ.",
|
||||
"fragmentedrecordings": "Ghi hình phân đoạn",
|
||||
"description_fragmentedrecordings": "Khi các bản ghi được phân đoạn, chúng phù hợp để phát trực tuyến HLS. Khi bật, định dạng MP4 sẽ có một số khác biệt.",
|
||||
"fragmentedrecordings_enable": "Bật ghi hình phân đoạn",
|
||||
"fragmentedrecordings_description_enable": "Ghi hình phân đoạn là bắt buộc đối với HLS.",
|
||||
"fragmentedrecordings_duration": "Thời lượng phân đoạn",
|
||||
"fragmentedrecordings_description_duration": "Thời lượng của một phân đoạn duy nhất."
|
||||
},
|
||||
"streaming": {
|
||||
"stun_turn": "STUN/TURN cho WebRTC",
|
||||
"description_stun_turn": "Để phát trực tiếp độ phân giải đầy đủ, chúng tôi sử dụng khái niệm WebRTC. Một trong những tính năng chính là ICE-candidate, cho phép vượt qua NAT bằng STUN/TURN.",
|
||||
"stun_server": "Máy chủ STUN",
|
||||
"turn_server": "Máy chủ TURN",
|
||||
"turn_username": "Tên người dùng",
|
||||
"turn_password": "Mật khẩu",
|
||||
"force_turn": "Buộc sử dụng TURN",
|
||||
"force_turn_description": "Buộc sử dụng TURN ngay cả khi STUN có sẵn.",
|
||||
"stun_turn_forward": "Chuyển tiếp và mã hóa",
|
||||
"stun_turn_description_forward": "Tối ưu hóa và cải thiện giao tiếp TURN/STUN.",
|
||||
"stun_turn_webrtc": "Chuyển tiếp đến WebRTC broker",
|
||||
"stun_turn_description_webrtc": "Chuyển tiếp luồng H264 qua MQTT",
|
||||
"stun_turn_transcode": "Chuyển mã luồng",
|
||||
"stun_turn_description_transcode": "Chuyển đổi luồng sang độ phân giải thấp hơn",
|
||||
"stun_turn_downscale": "Giảm độ phân giải (theo % của độ phân giải gốc)",
|
||||
"mqtt": "MQTT",
|
||||
"description_mqtt": "Một MQTT broker được sử dụng để giao tiếp từ",
|
||||
"description2_mqtt": "đến Kerberos Agent, nhằm hỗ trợ phát trực tiếp hoặc chức năng ONVIF (PTZ).",
|
||||
"mqtt_brokeruri": "Broker Uri",
|
||||
"mqtt_username": "Tên người dùng",
|
||||
"mqtt_password": "Mật khẩu",
|
||||
"realtimeprocessing": "Xử lý thời gian thực",
|
||||
"description_realtimeprocessing": "Bằng cách bật xử lý thời gian thực, bạn sẽ nhận được các khung hình video thời gian thực qua kết nối MQTT đã chỉ định.",
|
||||
"realtimeprocessing_topic": "Chủ đề để xuất bản",
|
||||
"realtimeprocessing_enabled": "Bật xử lý thời gian thực",
|
||||
"description_realtimeprocessing_enabled": "Gửi khung hình video thời gian thực qua MQTT."
|
||||
},
|
||||
"conditions": {
|
||||
"timeofinterest": "Thời gian quan tâm",
|
||||
"description_timeofinterest": "Chỉ ghi hình trong các khoảng thời gian cụ thể (dựa trên múi giờ).",
|
||||
"timeofinterest_enabled": "Đã bật",
|
||||
"timeofinterest_description_enabled": "Nếu bật, bạn có thể chỉ định các khoảng thời gian ghi hình.",
|
||||
"sunday": "Chủ nhật",
|
||||
"monday": "Thứ hai",
|
||||
"tuesday": "Thứ ba",
|
||||
"wednesday": "Thứ tư",
|
||||
"thursday": "Thứ năm",
|
||||
"friday": "Thứ sáu",
|
||||
"saturday": "Thứ bảy",
|
||||
"externalcondition": "Điều kiện bên ngoài",
|
||||
"description_externalcondition": "Tùy thuộc vào một dịch vụ web bên ngoài, việc ghi hình có thể được bật hoặc tắt.",
|
||||
"regionofinterest": "Khu vực quan tâm",
|
||||
"description_regionofinterest": "Bằng cách xác định một hoặc nhiều khu vực, hệ thống sẽ chỉ theo dõi chuyển động trong các khu vực bạn đã chọn."
|
||||
},
|
||||
"persistence": {
|
||||
"kerberoshub": "Kerberos Hub",
|
||||
"description_kerberoshub": "Các Kerberos Agent có thể gửi tín hiệu nhịp tim đến một hệ thống trung tâm",
|
||||
"description2_kerberoshub": "để đồng bộ hóa thông tin quan trọng với Kerberos Hub, giúp hiển thị trạng thái giám sát video theo thời gian thực.",
|
||||
"persistence": "Lưu trữ",
|
||||
"saasoffering": "Kerberos Hub (dịch vụ SAAS)",
|
||||
"description_persistence": "Khả năng lưu trữ bản ghi là bước khởi đầu của mọi thứ. Bạn có thể chọn giữa dịch vụ của chúng tôi",
|
||||
"description2_persistence": "hoặc một nhà cung cấp bên thứ ba.",
|
||||
"select_persistence": "Chọn phương thức lưu trữ",
|
||||
"kerberoshub_encryption": "Mã hóa",
|
||||
"kerberoshub_encryption_description": "Tất cả lưu lượng đến/từ Kerberos Hub sẽ được mã hóa bằng AES-256.",
|
||||
"kerberoshub_proxyurl": "URL Proxy Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "Điểm cuối Proxy để tải bản ghi lên.",
|
||||
"kerberoshub_apiurl": "URL API Kerberos Hub",
|
||||
"kerberoshub_description_apiurl": "Điểm cuối API để tải bản ghi lên.",
|
||||
"kerberoshub_publickey": "Khóa công khai",
|
||||
"kerberoshub_description_publickey": "Khóa công khai được cấp cho tài khoản Kerberos Hub của bạn.",
|
||||
"kerberoshub_privatekey": "Khóa riêng tư",
|
||||
"kerberoshub_description_privatekey": "Khóa riêng tư được cấp cho tài khoản Kerberos Hub của bạn.",
|
||||
"kerberoshub_site": "Trang web",
|
||||
"kerberoshub_description_site": "ID trang web mà các Kerberos Agent thuộc về trong Kerberos Hub.",
|
||||
"kerberoshub_region": "Khu vực",
|
||||
"kerberoshub_description_region": "Khu vực nơi chúng tôi lưu trữ bản ghi.",
|
||||
"kerberoshub_bucket": "Kho lưu trữ",
|
||||
"kerberoshub_description_bucket": "Kho lưu trữ nơi chúng tôi lưu trữ bản ghi.",
|
||||
"kerberoshub_username": "Tên người dùng / Thư mục (phải khớp với tên người dùng Kerberos Hub)",
|
||||
"kerberoshub_description_username": "Tên người dùng tài khoản Kerberos Hub của bạn.",
|
||||
"kerberosvault_apiurl": "URL API Kerberos Vault",
|
||||
"kerberosvault_description_apiurl": "API của Kerberos Vault",
|
||||
"kerberosvault_provider": "Nhà cung cấp",
|
||||
"kerberosvault_description_provider": "Nhà cung cấp nơi bản ghi của bạn sẽ được gửi đến.",
|
||||
"kerberosvault_directory": "Thư mục (phải khớp với tên người dùng Kerberos Hub)",
|
||||
"kerberosvault_description_directory": "Thư mục con nơi các bản ghi sẽ được lưu trữ trong nhà cung cấp của bạn.",
|
||||
"kerberosvault_accesskey": "Khóa truy cập",
|
||||
"kerberosvault_description_accesskey": "Khóa truy cập của tài khoản Kerberos Vault của bạn.",
|
||||
"kerberosvault_secretkey": "Khóa bí mật",
|
||||
"kerberosvault_description_secretkey": "Khóa bí mật của tài khoản Kerberos Vault của bạn.",
|
||||
"dropbox_directory": "Thư mục",
|
||||
"dropbox_description_directory": "Thư mục con nơi bản ghi sẽ được lưu trữ trong tài khoản Dropbox của bạn.",
|
||||
"dropbox_accesstoken": "Mã truy cập",
|
||||
"dropbox_description_accesstoken": "Mã truy cập của tài khoản / ứng dụng Dropbox của bạn.",
|
||||
"verify_connection": "Xác minh kết nối",
|
||||
"remove_after_upload": "Sau khi bản ghi được tải lên một hệ thống lưu trữ, bạn có thể muốn xóa chúng khỏi Kerberos Agent cục bộ.",
|
||||
"remove_after_upload_description": "Xóa bản ghi sau khi chúng được tải lên thành công.",
|
||||
"remove_after_upload_enabled": "Bật xóa sau khi tải lên"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Kerberos Agent 常规设置",
|
||||
"key": "Key",
|
||||
"camera_name": "相机名称",
|
||||
"camera_friendly_name": "相机友好名称",
|
||||
"timezone": "时区",
|
||||
"select_timezone": "选择时区",
|
||||
"advanced_configuration": "高级配置",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN 服务",
|
||||
"turn_username": "账户",
|
||||
"turn_password": "密码",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "转发和转码",
|
||||
"stun_turn_description_forward": "TURN/STUN 通信的优化和增强。",
|
||||
"stun_turn_webrtc": "转发到 WebRTC 代理",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "能够存储您的录像是一切的开始。您可以在我们的",
|
||||
"description2_persistence": ", 或第三方提供商之间进行选择。",
|
||||
"select_persistence": "选择持久化存储",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub 代理 URL",
|
||||
"kerberoshub_description_proxyurl": "用于上传您录像的代理端点",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -100,7 +100,7 @@ class App extends React.Component {
|
||||
</div>
|
||||
)}
|
||||
<div id="page-root">
|
||||
<Sidebar logo={logo} title="Kerberos Agent" version="v3.1.1" mobile>
|
||||
<Sidebar logo={logo} title="Kerberos Agent" version="v3.1.8" mobile>
|
||||
<Profilebar
|
||||
username={username}
|
||||
email="support@kerberos.io"
|
||||
|
||||
@@ -4,6 +4,7 @@ import {
|
||||
doVerifyOnvif,
|
||||
doVerifyHub,
|
||||
doVerifyPersistence,
|
||||
doVerifySecondaryPersistence,
|
||||
doGetKerberosAgentTags,
|
||||
doGetDashboardInformation,
|
||||
doGetEvents,
|
||||
@@ -107,6 +108,28 @@ export const verifyPersistence = (config, onSuccess, onError) => {
|
||||
};
|
||||
};
|
||||
|
||||
export const verifySecondaryPersistence = (config, onSuccess, onError) => {
|
||||
return (dispatch) => {
|
||||
doVerifySecondaryPersistence(
|
||||
config,
|
||||
() => {
|
||||
dispatch({
|
||||
type: 'VERIFY_SECONDARY_PERSISTENCE',
|
||||
});
|
||||
if (onSuccess) {
|
||||
onSuccess();
|
||||
}
|
||||
},
|
||||
(error) => {
|
||||
const { data } = error.response.data;
|
||||
if (onError) {
|
||||
onError(data);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
export const verifyHub = (config, onSuccess, onError) => {
|
||||
return (dispatch) => {
|
||||
doVerifyHub(
|
||||
|
||||
@@ -72,6 +72,25 @@ export function doVerifyPersistence(config, onSuccess, onError) {
|
||||
});
|
||||
}
|
||||
|
||||
export function doVerifySecondaryPersistence(config, onSuccess, onError) {
|
||||
const endpoint = API.post(`persistence/secondary/verify`, {
|
||||
...config,
|
||||
});
|
||||
endpoint
|
||||
.then((res) => {
|
||||
if (res.status !== 200) {
|
||||
throw new Error(res.data);
|
||||
}
|
||||
return res.data;
|
||||
})
|
||||
.then((data) => {
|
||||
onSuccess(data);
|
||||
})
|
||||
.catch((error) => {
|
||||
onError(error);
|
||||
});
|
||||
}
|
||||
|
||||
export function doVerifyHub(config, onSuccess, onError) {
|
||||
const endpoint = API.post(`hub/verify`, {
|
||||
...config,
|
||||
|
||||
@@ -26,6 +26,7 @@ const LanguageSelect = () => {
|
||||
ja: { label: '日本', dir: 'rlt', active: false },
|
||||
hi: { label: 'हिंदी', dir: 'ltr', active: false },
|
||||
ru: { label: 'Русский', dir: 'ltr', active: false },
|
||||
ro: { label: 'Română', dir: 'ltr', active: false },
|
||||
};
|
||||
|
||||
if (!languageMap[selected]) {
|
||||
|
||||
@@ -9,9 +9,9 @@ const dev = {
|
||||
ENV: 'dev',
|
||||
// Comment the below lines, when using codespaces or other special DNS names (which you can't control)
|
||||
HOSTNAME: hostname,
|
||||
API_URL: `${protocol}//${hostname}:80/api`,
|
||||
URL: `${protocol}//${hostname}:80`,
|
||||
WS_URL: `${websocketprotocol}//${hostname}:80/ws`,
|
||||
API_URL: `${protocol}//${hostname}:8080/api`,
|
||||
URL: `${protocol}//${hostname}:8080`,
|
||||
WS_URL: `${websocketprotocol}//${hostname}:8080/ws`,
|
||||
MODE: window['env']['mode'],
|
||||
// Uncomment, and comment the above lines, when using codespaces or other special DNS names (which you can't control)
|
||||
// HOSTNAME: externalHost,
|
||||
|
||||
@@ -14,7 +14,7 @@ i18n
|
||||
escapeValue: false,
|
||||
},
|
||||
load: 'languageOnly',
|
||||
whitelist: ['de', 'en', 'nl', 'fr', 'pl', 'es', 'pt', 'ja', 'ru'],
|
||||
whitelist: ['de', 'en', 'nl', 'fr', 'pl', 'es', 'pt', 'ja', 'ru', 'ro'],
|
||||
});
|
||||
|
||||
export default i18n;
|
||||
|
||||
@@ -33,6 +33,7 @@ import {
|
||||
verifyCamera,
|
||||
verifyHub,
|
||||
verifyPersistence,
|
||||
verifySecondaryPersistence,
|
||||
getConfig,
|
||||
updateConfig,
|
||||
} from '../../actions/agent';
|
||||
@@ -63,6 +64,9 @@ class Settings extends React.Component {
|
||||
verifyPersistenceSuccess: false,
|
||||
verifyPersistenceError: false,
|
||||
verifyPersistenceMessage: '',
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: false,
|
||||
verifySecondaryPersistenceMessage: '',
|
||||
verifyCameraSuccess: false,
|
||||
verifyCameraError: false,
|
||||
verifyCameraMessage: '',
|
||||
@@ -70,6 +74,7 @@ class Settings extends React.Component {
|
||||
verifyOnvifError: false,
|
||||
verifyOnvifErrorMessage: '',
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
loadingHub: false,
|
||||
loadingCamera: false,
|
||||
};
|
||||
@@ -125,6 +130,8 @@ class Settings extends React.Component {
|
||||
this.onUpdateTimeline = this.onUpdateTimeline.bind(this);
|
||||
this.initialiseLiveview = this.initialiseLiveview.bind(this);
|
||||
this.verifyPersistenceSettings = this.verifyPersistenceSettings.bind(this);
|
||||
this.verifySecondaryPersistenceSettings =
|
||||
this.verifySecondaryPersistenceSettings.bind(this);
|
||||
this.verifyHubSettings = this.verifyHubSettings.bind(this);
|
||||
this.verifyCameraSettings = this.verifyCameraSettings.bind(this);
|
||||
this.verifySubCameraSettings = this.verifySubCameraSettings.bind(this);
|
||||
@@ -350,6 +357,8 @@ class Settings extends React.Component {
|
||||
configSuccess: false,
|
||||
configError: false,
|
||||
loadingCamera: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
loadingOnvif: true,
|
||||
});
|
||||
|
||||
@@ -390,6 +399,8 @@ class Settings extends React.Component {
|
||||
configError: false,
|
||||
verifyPersistenceSuccess: false,
|
||||
verifyPersistenceError: false,
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: false,
|
||||
verifyHubSuccess: false,
|
||||
verifyHubError: false,
|
||||
verifyHubErrorMessage: '',
|
||||
@@ -401,6 +412,8 @@ class Settings extends React.Component {
|
||||
verifyOnvifSuccess: false,
|
||||
verifyOnvifError: false,
|
||||
loadingHub: true,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
|
||||
// .... test fields
|
||||
@@ -441,6 +454,8 @@ class Settings extends React.Component {
|
||||
verifyHubError: false,
|
||||
verifyPersistenceSuccess: false,
|
||||
verifyPersistenceError: false,
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: false,
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
verifyCameraSuccess: false,
|
||||
@@ -449,6 +464,7 @@ class Settings extends React.Component {
|
||||
verifyOnvifError: false,
|
||||
verifyCameraErrorMessage: '',
|
||||
loading: true,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
|
||||
dispatchVerifyPersistence(
|
||||
@@ -461,6 +477,7 @@ class Settings extends React.Component {
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
},
|
||||
(error) => {
|
||||
@@ -471,6 +488,58 @@ class Settings extends React.Component {
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
verifySecondaryPersistenceSettings() {
|
||||
const { config, dispatchVerifySecondaryPersistence } = this.props;
|
||||
if (config) {
|
||||
this.setState({
|
||||
configSuccess: false,
|
||||
configError: false,
|
||||
verifyHubSuccess: false,
|
||||
verifyHubError: false,
|
||||
verifyPersistenceSuccess: false,
|
||||
verifyPersistenceError: false,
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: false,
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
verifyCameraSuccess: false,
|
||||
verifyCameraError: false,
|
||||
verifyOnvifSuccess: false,
|
||||
verifyOnvifError: false,
|
||||
verifyCameraErrorMessage: '',
|
||||
loading: false,
|
||||
loadingSecondary: true,
|
||||
});
|
||||
|
||||
dispatchVerifySecondaryPersistence(
|
||||
config.config,
|
||||
() => {
|
||||
this.setState({
|
||||
verifySecondaryPersistenceSuccess: true,
|
||||
verifySecondaryPersistenceError: false,
|
||||
verifySecondaryPersistenceMessage: '',
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
},
|
||||
(error) => {
|
||||
this.setState({
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: true,
|
||||
verifySecondaryPersistenceMessage: error,
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
}
|
||||
);
|
||||
@@ -537,6 +606,9 @@ class Settings extends React.Component {
|
||||
verifyPersistenceSuccess,
|
||||
verifyPersistenceError,
|
||||
verifyPersistenceMessage,
|
||||
verifySecondaryPersistenceSuccess,
|
||||
verifySecondaryPersistenceError,
|
||||
verifySecondaryPersistenceMessage,
|
||||
verifyCameraSuccess,
|
||||
verifyCameraError,
|
||||
verifyCameraErrorMessage,
|
||||
@@ -546,6 +618,7 @@ class Settings extends React.Component {
|
||||
verifyOnvifErrorMessage,
|
||||
loadingCamera,
|
||||
loading,
|
||||
loadingSecondary,
|
||||
loadingHub,
|
||||
} = this.state;
|
||||
|
||||
@@ -798,6 +871,20 @@ class Settings extends React.Component {
|
||||
)} :${verifyPersistenceMessage}`}
|
||||
/>
|
||||
)}
|
||||
{verifySecondaryPersistenceSuccess && (
|
||||
<InfoBar
|
||||
type="success"
|
||||
message={t('settings.info.verify_persistence_success')}
|
||||
/>
|
||||
)}
|
||||
{verifySecondaryPersistenceError && (
|
||||
<InfoBar
|
||||
type="alert"
|
||||
message={`${t(
|
||||
'settings.info.verify_persistence_error'
|
||||
)} :${verifySecondaryPersistenceMessage}`}
|
||||
/>
|
||||
)}
|
||||
<div className="stats grid-container --two-columns">
|
||||
<div>
|
||||
{/* General settings block */}
|
||||
@@ -824,6 +911,15 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.overview.camera_friendly_name')}
|
||||
defaultValue={config.friendly_name}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField('', 'friendly_name', value, config)
|
||||
}
|
||||
/>
|
||||
|
||||
<Dropdown
|
||||
isRadio
|
||||
icon="world"
|
||||
@@ -1088,6 +1184,101 @@ class Settings extends React.Component {
|
||||
this.onUpdateField('', 'turn_password', value, config)
|
||||
}
|
||||
/>
|
||||
<br />
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.turn_force === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle('', 'turn_force', event, config)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.force_turn')}</span>
|
||||
<p>{t('settings.streaming.force_turn_description')}</p>
|
||||
</div>
|
||||
</div>
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
label={t('buttons.save')}
|
||||
onClick={this.saveConfig}
|
||||
type="default"
|
||||
icon="pencil"
|
||||
/>
|
||||
</BlockFooter>
|
||||
</Block>
|
||||
)}
|
||||
|
||||
{/* STUN/TURN block */}
|
||||
{showStreamingSection && config.offline !== 'true' && (
|
||||
<Block>
|
||||
<BlockHeader>
|
||||
<h4>{t('settings.streaming.stun_turn_forward')}</h4>
|
||||
</BlockHeader>
|
||||
<BlockBody>
|
||||
<p>{t('settings.streaming.stun_turn_description_forward')}</p>
|
||||
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.capture.forwardwebrtc === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle(
|
||||
'capture',
|
||||
'forwardwebrtc',
|
||||
event,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.stun_turn_webrtc')}</span>
|
||||
<p>
|
||||
{t('settings.streaming.stun_turn_description_webrtc')}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.capture.transcodingwebrtc === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle(
|
||||
'capture',
|
||||
'transcodingwebrtc',
|
||||
event,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.stun_turn_transcode')}</span>
|
||||
<p>
|
||||
{t(
|
||||
'settings.streaming.stun_turn_description_transcode'
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{config.capture.transcodingwebrtc === 'true' && (
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.streaming.stun_turn_downscale')}
|
||||
value={config.capture.transcodingresolution}
|
||||
placeholder="The % of the original resolution."
|
||||
onChange={(value) =>
|
||||
this.onUpdateNumberField(
|
||||
'capture',
|
||||
'transcodingresolution',
|
||||
value,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
@@ -1129,7 +1320,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.persistence.kerberoshub_publickey')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberoshub_description_publickey'
|
||||
@@ -1140,7 +1332,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.persistence.kerberoshub_privatekey')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberoshub_description_privatekey'
|
||||
@@ -1161,6 +1354,27 @@ class Settings extends React.Component {
|
||||
this.onUpdateField('', 'hub_site', value, config)
|
||||
}
|
||||
/>
|
||||
|
||||
<br />
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.hub_encryption === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle('', 'hub_encryption', event, config)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>
|
||||
{t('settings.persistence.kerberoshub_encryption')}
|
||||
</span>
|
||||
<p>
|
||||
{t(
|
||||
'settings.persistence.kerberoshub_encryption_description'
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
@@ -1336,7 +1550,8 @@ class Settings extends React.Component {
|
||||
</div>
|
||||
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.overview.encryption_fingerprint')}
|
||||
value={config.encryption.fingerprint}
|
||||
onChange={(value) =>
|
||||
@@ -1349,7 +1564,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.overview.encryption_privatekey')}
|
||||
value={config.encryption.private_key}
|
||||
onChange={(value) =>
|
||||
@@ -1362,7 +1578,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.overview.encryption_symmetrickey')}
|
||||
value={config.encryption.symmetric_key}
|
||||
onChange={(value) =>
|
||||
@@ -1439,75 +1656,55 @@ class Settings extends React.Component {
|
||||
</Block>
|
||||
)}
|
||||
|
||||
{/* STUN/TURN block */}
|
||||
{showStreamingSection && config.offline !== 'true' && (
|
||||
<Block>
|
||||
<BlockHeader>
|
||||
<h4>{t('settings.streaming.stun_turn_forward')}</h4>
|
||||
<h4>{t('settings.streaming.realtimeprocessing')}</h4>
|
||||
</BlockHeader>
|
||||
<BlockBody>
|
||||
<p>{t('settings.streaming.stun_turn_description_forward')}</p>
|
||||
<p>
|
||||
{t('settings.streaming.description_realtimeprocessing')}
|
||||
</p>
|
||||
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.capture.forwardwebrtc === 'true'}
|
||||
on={config.realtimeprocessing === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle(
|
||||
'capture',
|
||||
'forwardwebrtc',
|
||||
'',
|
||||
'realtimeprocessing',
|
||||
event,
|
||||
config.capture
|
||||
config
|
||||
)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.stun_turn_webrtc')}</span>
|
||||
<p>
|
||||
{t('settings.streaming.stun_turn_description_webrtc')}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.capture.transcodingwebrtc === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle(
|
||||
'capture',
|
||||
'transcodingwebrtc',
|
||||
event,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.stun_turn_transcode')}</span>
|
||||
<span>
|
||||
{t('settings.streaming.realtimeprocessing_enabled')}
|
||||
</span>
|
||||
<p>
|
||||
{t(
|
||||
'settings.streaming.stun_turn_description_transcode'
|
||||
'settings.streaming.description_realtimeprocessing_enabled'
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{config.capture.transcodingwebrtc === 'true' && (
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.streaming.stun_turn_downscale')}
|
||||
value={config.capture.transcodingresolution}
|
||||
placeholder="The % of the original resolution."
|
||||
onChange={(value) =>
|
||||
this.onUpdateNumberField(
|
||||
'capture',
|
||||
'transcodingresolution',
|
||||
value,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.streaming.realtimeprocessing_topic')}
|
||||
value={config.realtimeprocessing_topic}
|
||||
placeholder="kerberos/keyframes/key"
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'',
|
||||
'realtimeprocessing_topic',
|
||||
value,
|
||||
config
|
||||
)
|
||||
}
|
||||
/>
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
@@ -2296,7 +2493,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t(
|
||||
'settings.persistence.kerberosvault_accesskey'
|
||||
)}
|
||||
@@ -2316,7 +2514,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t(
|
||||
'settings.persistence.kerberosvault_secretkey'
|
||||
)}
|
||||
@@ -2337,6 +2536,43 @@ class Settings extends React.Component {
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
label={t(
|
||||
'settings.persistence.kerberosvault_maxretries'
|
||||
)}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_maxretries'
|
||||
)}
|
||||
value={
|
||||
config.kstorage ? config.kstorage.max_retries : ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage',
|
||||
'max_retries',
|
||||
value,
|
||||
config.kstorage
|
||||
)
|
||||
}
|
||||
/>
|
||||
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_timeout')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_timeout'
|
||||
)}
|
||||
value={config.kstorage ? config.kstorage.timeout : ''}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage',
|
||||
'timeout',
|
||||
value,
|
||||
config.kstorage
|
||||
)
|
||||
}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
{config.cloud === this.DROPBOX && (
|
||||
@@ -2396,6 +2632,140 @@ class Settings extends React.Component {
|
||||
</BlockFooter>
|
||||
</Block>
|
||||
)}
|
||||
|
||||
{/* Secondary Vault block */}
|
||||
{showPersistenceSection && config.cloud === this.KERBEROS_VAULT && (
|
||||
<Block>
|
||||
<BlockHeader>
|
||||
<h4>{t('settings.persistence.secondary_persistence')}</h4>
|
||||
</BlockHeader>
|
||||
<BlockBody>
|
||||
<p>
|
||||
{t(
|
||||
'settings.persistence.description_secondary_persistence'
|
||||
)}
|
||||
</p>
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_apiurl')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_apiurl'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.uri
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'uri',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_provider')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_provider'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.provider
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'provider',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_directory')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_directory'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.directory
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'directory',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.persistence.kerberosvault_accesskey')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_accesskey'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.access_key
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'access_key',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.persistence.kerberosvault_secretkey')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_secretkey'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.secret_access_key
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'secret_access_key',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
label={t('settings.persistence.verify_connection')}
|
||||
disabled={loadingSecondary}
|
||||
onClick={this.verifySecondaryPersistenceSettings}
|
||||
type={loadingSecondary ? 'neutral' : 'default'}
|
||||
icon="verify"
|
||||
/>
|
||||
<Button
|
||||
label="Save"
|
||||
type="submit"
|
||||
onClick={this.saveConfig}
|
||||
buttonType="submit"
|
||||
icon="pencil"
|
||||
/>
|
||||
</BlockFooter>
|
||||
</Block>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -2420,6 +2790,8 @@ const mapDispatchToProps = (dispatch /* , ownProps */) => ({
|
||||
dispatch(verifyHub(config, success, error)),
|
||||
dispatchVerifyPersistence: (config, success, error) =>
|
||||
dispatch(verifyPersistence(config, success, error)),
|
||||
dispatchVerifySecondaryPersistence: (config, success, error) =>
|
||||
dispatch(verifySecondaryPersistence(config, success, error)),
|
||||
dispatchGetConfig: (callback) => dispatch(getConfig(callback)),
|
||||
dispatchUpdateConfig: (field, value) => dispatch(updateConfig(field, value)),
|
||||
dispatchSaveConfig: (config, success, error) =>
|
||||
@@ -2437,6 +2809,7 @@ Settings.propTypes = {
|
||||
images: PropTypes.array.isRequired,
|
||||
dispatchVerifyHub: PropTypes.func.isRequired,
|
||||
dispatchVerifyPersistence: PropTypes.func.isRequired,
|
||||
dispatchVerifySecondaryPersistence: PropTypes.func.isRequired,
|
||||
dispatchGetConfig: PropTypes.func.isRequired,
|
||||
dispatchUpdateConfig: PropTypes.func.isRequired,
|
||||
dispatchSaveConfig: PropTypes.func.isRequired,
|
||||
|
||||
Reference in New Issue
Block a user