mirror of
https://github.com/kerberos-io/agent.git
synced 2026-03-02 22:59:15 +00:00
Compare commits
400 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c02e0aeb1 | ||
|
|
d5464362bb | ||
|
|
5bcefd0015 | ||
|
|
5bb9def42d | ||
|
|
ff38ccbadf | ||
|
|
f64e899de9 | ||
|
|
b8a81d18af | ||
|
|
8c2e3e4cdd | ||
|
|
11c4ee518d | ||
|
|
51b9d76973 | ||
|
|
f3c1cb9b82 | ||
|
|
a1368361e4 | ||
|
|
abfdea0179 | ||
|
|
8aaeb62fa3 | ||
|
|
e30dd7d4a0 | ||
|
|
ac3f9aa4e8 | ||
|
|
04c568f488 | ||
|
|
e270223968 | ||
|
|
01ab1a9218 | ||
|
|
6f0794b09c | ||
|
|
1ae6a46d88 | ||
|
|
9d83cab5cc | ||
|
|
6f559c2f00 | ||
|
|
c147944f5a | ||
|
|
e8ca776e4e | ||
|
|
de5c4b6e0a | ||
|
|
9ba64de090 | ||
|
|
7ceeebe76e | ||
|
|
bd7dbcfcf2 | ||
|
|
8c7a46e3ae | ||
|
|
57ccfaabf5 | ||
|
|
4a9cb51e95 | ||
|
|
ab6f621e76 | ||
|
|
c365ae5af2 | ||
|
|
b05c3d1baa | ||
|
|
c7c7203fad | ||
|
|
d93f85b4f3 | ||
|
|
031212b98c | ||
|
|
a4837b3cb3 | ||
|
|
77629ac9b8 | ||
|
|
59608394af | ||
|
|
9dfcaa466f | ||
|
|
88442e4525 | ||
|
|
891ae2e5d5 | ||
|
|
32b471f570 | ||
|
|
5d745fc989 | ||
|
|
edfa6ec4c6 | ||
|
|
0c460efea6 | ||
|
|
96df049e59 | ||
|
|
2cb454e618 | ||
|
|
7f2ebb655e | ||
|
|
63857fb5cc | ||
|
|
f4c75f9aa9 | ||
|
|
c3936dc884 | ||
|
|
2868ddc499 | ||
|
|
176610a694 | ||
|
|
f60aff4fd6 | ||
|
|
847f62303a | ||
|
|
f174e2697e | ||
|
|
acac2d5d42 | ||
|
|
f304c2ed3e | ||
|
|
2003a38cdc | ||
|
|
a67c5a1f39 | ||
|
|
b7a87f95e5 | ||
|
|
0aa0b8ad8f | ||
|
|
2bff868de6 | ||
|
|
8b59828126 | ||
|
|
f55e25db07 | ||
|
|
243c969666 | ||
|
|
ec7f2e0303 | ||
|
|
a4a032d994 | ||
|
|
0a84744e49 | ||
|
|
1425430376 | ||
|
|
ca8d88ffce | ||
|
|
af3f8bb639 | ||
|
|
1f9772d472 | ||
|
|
94cf361b55 | ||
|
|
6acdf258e7 | ||
|
|
cc0a810ab3 | ||
|
|
c19bfbe552 | ||
|
|
39aaf5ad6c | ||
|
|
6fba2ff05d | ||
|
|
d78e682759 | ||
|
|
ed582a9d57 | ||
|
|
aa925d5c9b | ||
|
|
08d191e542 | ||
|
|
cc075d7237 | ||
|
|
1974bddfbe | ||
|
|
12cb88e1c1 | ||
|
|
c054526998 | ||
|
|
ffa97598b8 | ||
|
|
f5afbf3a63 | ||
|
|
e666695c96 | ||
|
|
55816e4b7b | ||
|
|
016fb51951 | ||
|
|
550a444650 | ||
|
|
4332e43f27 | ||
|
|
fdc3bfb4a4 | ||
|
|
c17d6b7117 | ||
|
|
5d7a8103c0 | ||
|
|
5d7cb98b8f | ||
|
|
f6046c6a6c | ||
|
|
f59f9d71a9 | ||
|
|
ff72f9647d | ||
|
|
fa604b16cf | ||
|
|
0342869733 | ||
|
|
8685ce31a2 | ||
|
|
0e259f0e7a | ||
|
|
5823abed95 | ||
|
|
86acff58f0 | ||
|
|
d3fc5d4c29 | ||
|
|
50bb40938c | ||
|
|
1977d98ad9 | ||
|
|
448d4a946d | ||
|
|
61ac314bb7 | ||
|
|
c1b144ca28 | ||
|
|
e16987bf9d | ||
|
|
9991597984 | ||
|
|
2c0314cea4 | ||
|
|
0584e52b98 | ||
|
|
1fc90eaee2 | ||
|
|
aef3eacbc9 | ||
|
|
2843568473 | ||
|
|
53ffc8cae0 | ||
|
|
86e654fe19 | ||
|
|
46d57f7664 | ||
|
|
963d8672eb | ||
|
|
9b7a62816a | ||
|
|
237134fe0e | ||
|
|
c8730e8f26 | ||
|
|
acbbe8b444 | ||
|
|
f690016aa5 | ||
|
|
396cfe5d8b | ||
|
|
39fe640ccf | ||
|
|
d389c9b0b6 | ||
|
|
b149686db8 | ||
|
|
c4358cbfad | ||
|
|
cfc5bd3dfe | ||
|
|
c29c1b6a92 | ||
|
|
0f45a2a4b4 | ||
|
|
92edcc13c0 | ||
|
|
5392e2ba90 | ||
|
|
79e1f659c7 | ||
|
|
bf35e5efb6 | ||
|
|
c50137f255 | ||
|
|
f12da749b2 | ||
|
|
a166083423 | ||
|
|
b400d4e773 | ||
|
|
120054d3e5 | ||
|
|
620117c31b | ||
|
|
4e371488c1 | ||
|
|
b154b56308 | ||
|
|
6d92817237 | ||
|
|
b8c1855830 | ||
|
|
a9f7ff4b72 | ||
|
|
b3cd080e14 | ||
|
|
bfde87f888 | ||
|
|
c4453bb8b3 | ||
|
|
40f65a30b3 | ||
|
|
5361de63e0 | ||
|
|
3a8552d362 | ||
|
|
d3840103fc | ||
|
|
d12a9f0612 | ||
|
|
c0d74f7e09 | ||
|
|
8ebea9e4c5 | ||
|
|
89269caf92 | ||
|
|
0c83170f51 | ||
|
|
6081cb4be9 | ||
|
|
ea1dbb3087 | ||
|
|
0523208d36 | ||
|
|
919f21b48b | ||
|
|
2c1c10a2ac | ||
|
|
7e3320b252 | ||
|
|
35ccac8b65 | ||
|
|
dad8165d11 | ||
|
|
ba54188de2 | ||
|
|
3b440c9905 | ||
|
|
42b98b7f20 | ||
|
|
ba3312b57c | ||
|
|
223ba255e9 | ||
|
|
a1df2be207 | ||
|
|
d7f225ca73 | ||
|
|
b3cfabb5df | ||
|
|
5310dd4550 | ||
|
|
cde7dbb58a | ||
|
|
65e68231c7 | ||
|
|
5502555869 | ||
|
|
ad6e7e752f | ||
|
|
63af4660ef | ||
|
|
24fc340001 | ||
|
|
78d786b69d | ||
|
|
756aeaa0eb | ||
|
|
055fb67d7a | ||
|
|
bee522a6bf | ||
|
|
3fbf59c622 | ||
|
|
abd8b8b605 | ||
|
|
abdad47bf3 | ||
|
|
d2c24edf5d | ||
|
|
22f4a7f119 | ||
|
|
a25d3d32e4 | ||
|
|
ed68c32e04 | ||
|
|
4114b3839a | ||
|
|
3f73c009fd | ||
|
|
02fb70c76e | ||
|
|
aaddcb854d | ||
|
|
e73c7a6ecc | ||
|
|
1dc2202f37 | ||
|
|
ac710ae1f5 | ||
|
|
f5ea82ff03 | ||
|
|
ef52325240 | ||
|
|
354855feb1 | ||
|
|
c4cd25b588 | ||
|
|
dbb870229e | ||
|
|
a66fe8c054 | ||
|
|
2352431c79 | ||
|
|
49bc168812 | ||
|
|
98f1ebf20a | ||
|
|
65feb6d182 | ||
|
|
58555d352f | ||
|
|
839a177cf0 | ||
|
|
404517ec40 | ||
|
|
035bd18bc2 | ||
|
|
8bf7a0d244 | ||
|
|
607d8fd0d1 | ||
|
|
12807e289c | ||
|
|
3a984f1c73 | ||
|
|
b84e34da06 | ||
|
|
541d151570 | ||
|
|
4ad97e1286 | ||
|
|
a80b375e89 | ||
|
|
91cb390f6e | ||
|
|
90780dae28 | ||
|
|
ddb08e90e1 | ||
|
|
0d95026819 | ||
|
|
79db3a9dfe | ||
|
|
9f63ffd540 | ||
|
|
9c7116a462 | ||
|
|
dd9b4d43ac | ||
|
|
aa63eca24c | ||
|
|
6df97171d9 | ||
|
|
56f7d69b3d | ||
|
|
3e2b29284e | ||
|
|
18ceca7510 | ||
|
|
5a08d1f3de | ||
|
|
18af6db00c | ||
|
|
6d170c8dc0 | ||
|
|
9c4c3c654d | ||
|
|
6952e387f4 | ||
|
|
66c9ae5c27 | ||
|
|
0fb7601dcb | ||
|
|
07c6e680d1 | ||
|
|
b972bc3040 | ||
|
|
969d42dbca | ||
|
|
6680df9382 | ||
|
|
8877157db5 | ||
|
|
ac814dc357 | ||
|
|
4fcb12c3a3 | ||
|
|
7bcc30f4b7 | ||
|
|
481f917fcf | ||
|
|
700a32e4c8 | ||
|
|
b5a72d904e | ||
|
|
cf3e491462 | ||
|
|
6068705c07 | ||
|
|
37beaa64d7 | ||
|
|
8c5b03487b | ||
|
|
360ae0c0db | ||
|
|
6aad8b7b35 | ||
|
|
9ce037fdc0 | ||
|
|
0eb77ccd16 | ||
|
|
fb876bd216 | ||
|
|
865aec88fc | ||
|
|
9792bdf494 | ||
|
|
d836e89e7f | ||
|
|
53a52b3594 | ||
|
|
ba6ce25b21 | ||
|
|
8c9e18475f | ||
|
|
4548d5328b | ||
|
|
da870fe890 | ||
|
|
66b660e688 | ||
|
|
08f8ca78d6 | ||
|
|
1e61e99005 | ||
|
|
c272e1ab5c | ||
|
|
5cff11c0af | ||
|
|
28b213779f | ||
|
|
666ff202ad | ||
|
|
9cb3c9753a | ||
|
|
c4577e94b1 | ||
|
|
9756183d3b | ||
|
|
83c65fe3d8 | ||
|
|
e6717c87cd | ||
|
|
5a3c1d6c9d | ||
|
|
81045ea955 | ||
|
|
9f9fe3bd37 | ||
|
|
84f7f844c9 | ||
|
|
4fde419db9 | ||
|
|
78cad6cf06 | ||
|
|
4763e5a92e | ||
|
|
50939ee4ce | ||
|
|
884bc2acc1 | ||
|
|
11fd041fa9 | ||
|
|
a6d5c2b614 | ||
|
|
9e3d705c6f | ||
|
|
1004731903 | ||
|
|
9f2ec91688 | ||
|
|
185135ed94 | ||
|
|
27e7d98c68 | ||
|
|
79f56771e3 | ||
|
|
a7839147d6 | ||
|
|
834d82d532 | ||
|
|
989f2f5943 | ||
|
|
3af1df5b19 | ||
|
|
acf06e6e63 | ||
|
|
3f43e15cc2 | ||
|
|
c14683ec0d | ||
|
|
213aaa5c15 | ||
|
|
9fb00c32d5 | ||
|
|
57ec08066c | ||
|
|
e0c6375261 | ||
|
|
79205abe29 | ||
|
|
24326558d0 | ||
|
|
3f981c0f2f | ||
|
|
b6eb7b8317 | ||
|
|
4267ae6305 | ||
|
|
0cb40bd93a | ||
|
|
d2a8890a43 | ||
|
|
e5a5a5326b | ||
|
|
61febd55c8 | ||
|
|
3eac752654 | ||
|
|
df4f1863fc | ||
|
|
acee2784d3 | ||
|
|
8ecb2f94a9 | ||
|
|
8657baf641 | ||
|
|
13d1948c9f | ||
|
|
8e8d51b719 | ||
|
|
ca2413363e | ||
|
|
b067758915 | ||
|
|
b2b8485b28 | ||
|
|
c69d635431 | ||
|
|
a305ca36ce | ||
|
|
a6a97b09f0 | ||
|
|
4d17a15633 | ||
|
|
5fdb4b712e | ||
|
|
3d39251ac6 | ||
|
|
9e59cd1596 | ||
|
|
0ada943699 | ||
|
|
ecadf7a4db | ||
|
|
413ed12fe2 | ||
|
|
6195fa5b9c | ||
|
|
d31524ae52 | ||
|
|
472a40a5f6 | ||
|
|
fb9de04002 | ||
|
|
3f29d1c46f | ||
|
|
b67a72ba9a | ||
|
|
8fc9bc264d | ||
|
|
b2589f498d | ||
|
|
b1ff5134f2 | ||
|
|
3551d02d50 | ||
|
|
4c413012a4 | ||
|
|
74ea2f6cdd | ||
|
|
2a7d9b62d4 | ||
|
|
21d81b94dd | ||
|
|
091662ff26 | ||
|
|
803e8f55ef | ||
|
|
14d38ecf08 | ||
|
|
34d945055b | ||
|
|
8c44da8233 | ||
|
|
a8b79947ef | ||
|
|
7c653f809d | ||
|
|
49f1603f40 | ||
|
|
b4369ea932 | ||
|
|
83ba7baa4b | ||
|
|
9339ae30fd | ||
|
|
c18f2bd445 | ||
|
|
319876bbb0 | ||
|
|
442ba97c61 | ||
|
|
00e0b0b547 | ||
|
|
145f478249 | ||
|
|
aac2150a3a | ||
|
|
9b713637b9 | ||
|
|
699660d472 | ||
|
|
751aa17534 | ||
|
|
2681bd2fe3 | ||
|
|
93adb3dabc | ||
|
|
0e15e58a88 | ||
|
|
ef2ea999df | ||
|
|
ca367611d7 | ||
|
|
eb8f073856 | ||
|
|
3ae43eba16 | ||
|
|
9719a08eaa | ||
|
|
1e165cbeb8 | ||
|
|
8be8cafd00 | ||
|
|
e74d2aadb5 | ||
|
|
9c97422f43 | ||
|
|
deb0a3ff1f | ||
|
|
95ed1f0e97 | ||
|
|
6a111dadd6 | ||
|
|
95b3623c04 | ||
|
|
326d62a640 | ||
|
|
9d990650f3 | ||
|
|
4bc891b640 |
@@ -1,2 +1,26 @@
|
||||
FROM kerberos/devcontainer:0a50dc9
|
||||
LABEL AUTHOR=Kerberos.io
|
||||
FROM mcr.microsoft.com/devcontainers/go:1.24-bookworm
|
||||
|
||||
# Install node environment
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
nodejs \
|
||||
npm \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install ffmpeg
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ffmpeg \
|
||||
libavcodec-extra \
|
||||
libavutil-dev \
|
||||
libavformat-dev \
|
||||
libavfilter-dev \
|
||||
libavdevice-dev \
|
||||
libswscale-dev \
|
||||
libswresample-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
USER vscode
|
||||
|
||||
# Install go swagger
|
||||
RUN go install github.com/swaggo/swag/cmd/swag@latest
|
||||
@@ -1,33 +1,24 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
|
||||
// https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/docker-existing-dockerfile
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||
{
|
||||
"name": "A Dockerfile containing FFmpeg, OpenCV, Go and Yarn",
|
||||
// Sets the run context to one level up instead of the .devcontainer folder.
|
||||
"context": "..",
|
||||
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
|
||||
"dockerFile": "./Dockerfile",
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
"forwardPorts": [
|
||||
3000,
|
||||
80
|
||||
"name": "go:1.24-bookworm",
|
||||
"runArgs": [
|
||||
"--name=agent",
|
||||
"--network=host"
|
||||
],
|
||||
// Uncomment the next line to run commands after the container is created - for example installing curl.
|
||||
"postCreateCommand": "cd ui && yarn install && yarn build && cd ../machinery && go mod download",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers-contrib/features/ansible:1": {}
|
||||
},
|
||||
"dockerFile": "Dockerfile",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"ms-kubernetes-tools.vscode-kubernetes-tools",
|
||||
"GitHub.copilot"
|
||||
"GitHub.copilot",
|
||||
"ms-azuretools.vscode-docker",
|
||||
"mongodb.mongodb-vscode"
|
||||
]
|
||||
}
|
||||
},
|
||||
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
|
||||
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
|
||||
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
|
||||
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
|
||||
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
|
||||
// "remoteUser": "vscode"
|
||||
"forwardPorts": [
|
||||
3000,
|
||||
8080
|
||||
],
|
||||
"postCreateCommand": "cd ui && yarn install && yarn build && cd ../machinery && go mod download"
|
||||
}
|
||||
58
.github/workflows/docker-dev.yml
vendored
58
.github/workflows/docker-dev.yml
vendored
@@ -1,58 +0,0 @@
|
||||
name: Docker development build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ develop ]
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Create new and append to latest manifest
|
||||
run: docker buildx imagetools create -t kerberos/agent-dev:latest kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
build-other:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
#architecture: [arm64, arm/v7, arm/v6]
|
||||
architecture: [arm64, arm/v7]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create --append -t kerberos/agent-dev:latest kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
54
.github/workflows/docker-nightly.yml
vendored
54
.github/workflows/docker-nightly.yml
vendored
@@ -1,54 +0,0 @@
|
||||
name: Docker nightly build
|
||||
|
||||
on:
|
||||
# Triggers the workflow every day at 9PM (CET).
|
||||
schedule:
|
||||
- cron: "0 22 * * *"
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
run: git clone https://github.com/kerberos-io/agent && cd agent
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
build-other:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [arm64, arm/v7, arm/v6]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
run: git clone https://github.com/kerberos-io/agent && cd agent
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
114
.github/workflows/docker.yml
vendored
114
.github/workflows/docker.yml
vendored
@@ -1,114 +0,0 @@
|
||||
name: Docker master build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
|
||||
env:
|
||||
REPO: kerberos/agent
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
id: short-sha
|
||||
with:
|
||||
length: 7
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}} --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create -t $REPO:${{ steps.short-sha.outputs.sha }} $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
|
||||
- name: Run Buildx with output
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{steps.short-sha.outputs.sha}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
- name: Strip binary
|
||||
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
|
||||
# We'll make a GitHub release and push the build (tar) as an artifact
|
||||
- uses: rickstaa/action-create-tag@v1
|
||||
with:
|
||||
tag: ${{ steps.short-sha.outputs.sha }}
|
||||
message: "Release ${{ steps.short-sha.outputs.sha }}"
|
||||
- name: Create a release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
latest: true
|
||||
name: ${{ steps.short-sha.outputs.sha }}
|
||||
tag: ${{ steps.short-sha.outputs.sha }}
|
||||
artifacts: "agent-${{matrix.architecture}}.tar"
|
||||
# Taken from GoReleaser's own release workflow.
|
||||
# The available Snapcraft Action has some bugs described in the issue below.
|
||||
# The mkdirs are a hack for https://github.com/goreleaser/goreleaser/issues/1715.
|
||||
#- name: Setup Snapcraft
|
||||
# run: |
|
||||
# sudo apt-get update
|
||||
# sudo apt-get -yq --no-install-suggests --no-install-recommends install snapcraft
|
||||
# mkdir -p $HOME/.cache/snapcraft/download
|
||||
# mkdir -p $HOME/.cache/snapcraft/stage-packages
|
||||
#- name: Use Snapcraft
|
||||
# run: tar -xf agent-${{matrix.architecture}}.tar && snapcraft
|
||||
build-other:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
needs: build-amd64
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [arm64, arm-v7, arm-v6]
|
||||
#architecture: [arm64, arm-v7]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
id: short-sha
|
||||
with:
|
||||
length: 7
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}} --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t $REPO:${{ steps.short-sha.outputs.sha }} $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create --append -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
|
||||
- name: Run Buildx with output
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{steps.short-sha.outputs.sha}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
- name: Strip binary
|
||||
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
|
||||
- name: Create a release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
latest: true
|
||||
allowUpdates: true
|
||||
name: ${{ steps.short-sha.outputs.sha }}
|
||||
tag: ${{ steps.short-sha.outputs.sha }}
|
||||
artifacts: "agent-${{matrix.architecture}}.tar"
|
||||
|
||||
44
.github/workflows/go.yml
vendored
44
.github/workflows/go.yml
vendored
@@ -2,37 +2,37 @@ name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ develop, master ]
|
||||
branches: [develop, master]
|
||||
pull_request:
|
||||
branches: [ develop, master ]
|
||||
branches: [develop, master]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: kerberos/base:0a50dc9
|
||||
|
||||
image: kerberos/base:eb6b088
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
#No longer supported Go versions.
|
||||
#go-version: ['1.17', '1.18', '1.19']
|
||||
go-version: ['1.20', '1.21']
|
||||
#go-version: ['1.17', '1.18', '1.19', '1.20', '1.21']
|
||||
go-version: ["1.24"]
|
||||
|
||||
steps:
|
||||
- name: Set up Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up git ownershi
|
||||
run: git config --system --add safe.directory /__w/agent/agent
|
||||
- name: Get dependencies
|
||||
run: cd machinery && go mod download
|
||||
- name: Build
|
||||
run: cd machinery && go build -v ./...
|
||||
- name: Vet
|
||||
run: cd machinery && go vet -v ./...
|
||||
- name: Test
|
||||
run: cd machinery && go test -v ./...
|
||||
- name: Set up Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up git ownershi
|
||||
run: git config --system --add safe.directory /__w/agent/agent
|
||||
- name: Get dependencies
|
||||
run: cd machinery && go mod download
|
||||
- name: Build
|
||||
run: cd machinery && go build -v ./...
|
||||
- name: Vet
|
||||
run: cd machinery && go vet -v ./...
|
||||
- name: Test
|
||||
run: cd machinery && go test -v ./...
|
||||
|
||||
51
.github/workflows/issue-userstory-create.yml
vendored
Normal file
51
.github/workflows/issue-userstory-create.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Create User Story Issue
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
issue_title:
|
||||
description: 'Title for the issue'
|
||||
required: true
|
||||
issue_description:
|
||||
description: 'Brief description of the feature'
|
||||
required: true
|
||||
complexity:
|
||||
description: 'Complexity of the feature'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- 'Low'
|
||||
- 'Medium'
|
||||
- 'High'
|
||||
default: 'Medium'
|
||||
duration:
|
||||
description: 'Estimated duration'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- '1 day'
|
||||
- '3 days'
|
||||
- '1 week'
|
||||
- '2 weeks'
|
||||
- '1 month'
|
||||
default: '1 week'
|
||||
|
||||
jobs:
|
||||
create-issue:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Create Issue with User Story
|
||||
uses: cedricve/llm-create-issue-user-story@main
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
azure_openai_api_key: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
azure_openai_endpoint: ${{ secrets.AZURE_OPENAI_ENDPOINT }}
|
||||
azure_openai_version: ${{ secrets.AZURE_OPENAI_VERSION }}
|
||||
openai_model: ${{ secrets.OPENAI_MODEL }}
|
||||
issue_title: ${{ github.event.inputs.issue_title }}
|
||||
issue_description: ${{ github.event.inputs.issue_description }}
|
||||
complexity: ${{ github.event.inputs.complexity }}
|
||||
duration: ${{ github.event.inputs.duration }}
|
||||
labels: 'user-story,feature'
|
||||
assignees: ${{ github.actor }}
|
||||
60
.github/workflows/nightly-build.yml
vendored
Normal file
60
.github/workflows/nightly-build.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Nightly build
|
||||
|
||||
on:
|
||||
# Triggers the workflow every day at 9PM (CET).
|
||||
schedule:
|
||||
- cron: "0 22 * * *"
|
||||
# Allows manual triggering from the Actions tab.
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
nightly-build-amd64:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: master
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
nightly-build-other:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [arm64, arm/v7, arm/v6]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: master
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
|
||||
75
.github/workflows/pr-build.yml
vendored
Normal file
75
.github/workflows/pr-build.yml
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
name: Build pull request
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
|
||||
env:
|
||||
REPO: kerberos/agent
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
contents: write
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
id: short-sha
|
||||
with:
|
||||
length: 7
|
||||
- name: Run Build
|
||||
run: |
|
||||
docker build -t ${{matrix.architecture}} .
|
||||
CID=$(docker create ${{matrix.architecture}})
|
||||
docker cp ${CID}:/home/agent ./output-${{matrix.architecture}}
|
||||
docker rm ${CID}
|
||||
- name: Strip binary
|
||||
run: tar -cf agent-${{matrix.architecture}}.tar -C output-${{matrix.architecture}} . && rm -rf output-${{matrix.architecture}}
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: agent-${{matrix.architecture}}.tar
|
||||
path: agent-${{matrix.architecture}}.tar
|
||||
|
||||
build-arm64:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
permissions:
|
||||
contents: write
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [arm64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
id: short-sha
|
||||
with:
|
||||
length: 7
|
||||
- name: Run Build
|
||||
run: |
|
||||
docker build -t ${{matrix.architecture}} -f Dockerfile.arm64 .
|
||||
CID=$(docker create ${{matrix.architecture}})
|
||||
docker cp ${CID}:/home/agent ./output-${{matrix.architecture}}
|
||||
docker rm ${CID}
|
||||
- name: Strip binary
|
||||
run: tar -cf agent-${{matrix.architecture}}.tar -C output-${{matrix.architecture}} . && rm -rf output-${{matrix.architecture}}
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: agent-${{matrix.architecture}}.tar
|
||||
path: agent-${{matrix.architecture}}.tar
|
||||
|
||||
26
.github/workflows/pr-description.yaml
vendored
Normal file
26
.github/workflows/pr-description.yaml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Autofill PR description
|
||||
|
||||
on: pull_request
|
||||
|
||||
env:
|
||||
ORGANIZATION: uugai
|
||||
PROJECT: ${{ github.event.repository.name }}
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
|
||||
jobs:
|
||||
openai-pr-description:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Autofill PR description if empty using OpenAI
|
||||
uses: cedricve/azureopenai-pr-description@master
|
||||
with:
|
||||
github_token: ${{ secrets.TOKEN }}
|
||||
openai_api_key: ${{ secrets.OPENAI_API_KEY }}
|
||||
azure_openai_api_key: ${{ secrets.AZURE_OPENAI_API_KEY }}
|
||||
azure_openai_endpoint: ${{ secrets.AZURE_OPENAI_ENDPOINT }}
|
||||
azure_openai_version: ${{ secrets.AZURE_OPENAI_VERSION }}
|
||||
openai_model: ${{ secrets.OPENAI_MODEL }}
|
||||
pull_request_url: https://pr${{ env.PR_NUMBER }}.api.kerberos.lol
|
||||
overwrite_description: true
|
||||
130
.github/workflows/release-create.yml
vendored
Normal file
130
.github/workflows/release-create.yml
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
name: Create a new release
|
||||
on:
|
||||
release:
|
||||
types: [created]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag for the Docker image"
|
||||
required: true
|
||||
default: "test"
|
||||
|
||||
env:
|
||||
REPO: kerberos/agent
|
||||
|
||||
jobs:
|
||||
build-amd64:
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
contents: write
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [amd64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
id: short-sha
|
||||
with:
|
||||
length: 7
|
||||
- name: Run Build
|
||||
run: |
|
||||
docker build --provenance=false --build-arg VERSION=${{github.event.inputs.tag || github.ref_name}} -t ${{matrix.architecture}} .
|
||||
CID=$(docker create ${{matrix.architecture}})
|
||||
docker cp ${CID}:/home/agent ./output-${{matrix.architecture}}
|
||||
docker rm ${CID}
|
||||
- name: Strip binary
|
||||
run: tar -cf agent-${{matrix.architecture}}.tar -C output-${{matrix.architecture}} . && rm -rf output-${{matrix.architecture}}
|
||||
- name: Build and push Docker image
|
||||
run: |
|
||||
docker tag ${{matrix.architecture}} $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
docker push $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: agent-${{matrix.architecture}}.tar
|
||||
path: agent-${{matrix.architecture}}.tar
|
||||
|
||||
build-arm64:
|
||||
runs-on: ubuntu-24.04-arm
|
||||
permissions:
|
||||
contents: write
|
||||
strategy:
|
||||
matrix:
|
||||
architecture: [arm64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- uses: benjlevesque/short-sha@v2.1
|
||||
id: short-sha
|
||||
with:
|
||||
length: 7
|
||||
- name: Run Build
|
||||
run: |
|
||||
docker build --provenance=false --build-arg VERSION=${{github.event.inputs.tag || github.ref_name}} -t ${{matrix.architecture}} -f Dockerfile.arm64 .
|
||||
CID=$(docker create ${{matrix.architecture}})
|
||||
docker cp ${CID}:/home/agent ./output-${{matrix.architecture}}
|
||||
docker rm ${CID}
|
||||
- name: Strip binary
|
||||
run: tar -cf agent-${{matrix.architecture}}.tar -C output-${{matrix.architecture}} . && rm -rf output-${{matrix.architecture}}
|
||||
- name: Build and push Docker image
|
||||
run: |
|
||||
docker tag ${{matrix.architecture}} $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
docker push $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: agent-${{matrix.architecture}}.tar
|
||||
path: agent-${{matrix.architecture}}.tar
|
||||
|
||||
create-manifest:
|
||||
runs-on: ubuntu-24.04
|
||||
needs: [build-amd64, build-arm64]
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
- name: Create and push multi-arch manifest
|
||||
run: |
|
||||
docker manifest create $REPO:${{ github.event.inputs.tag || github.ref_name }} \
|
||||
$REPO-arch:arch-amd64-${{github.event.inputs.tag || github.ref_name}} \
|
||||
$REPO-arch:arch-arm64-${{github.event.inputs.tag || github.ref_name}}
|
||||
docker manifest push $REPO:${{ github.event.inputs.tag || github.ref_name }}
|
||||
- name: Create and push latest manifest
|
||||
run: |
|
||||
docker manifest create $REPO:latest \
|
||||
$REPO-arch:arch-amd64-${{github.event.inputs.tag || github.ref_name}} \
|
||||
$REPO-arch:arch-arm64-${{github.event.inputs.tag || github.ref_name}}
|
||||
docker manifest push $REPO:latest
|
||||
if: github.event.inputs.tag == 'test'
|
||||
|
||||
create-release:
|
||||
runs-on: ubuntu-24.04
|
||||
needs: [build-amd64, build-arm64]
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
- name: Create a release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
latest: true
|
||||
allowUpdates: true
|
||||
name: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
tag: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
generateReleaseNotes: false
|
||||
omitBodyDuringUpdate: true
|
||||
artifacts: "agent-*.tar/agent-*.tar"
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,6 +1,8 @@
|
||||
ui/node_modules
|
||||
ui/build
|
||||
ui/public/assets/env.js
|
||||
.DS_Store
|
||||
__debug*
|
||||
.idea
|
||||
machinery/www
|
||||
yarn.lock
|
||||
@@ -10,5 +12,7 @@ machinery/data/recordings
|
||||
machinery/data/snapshots
|
||||
machinery/test*
|
||||
machinery/init-dev.sh
|
||||
machinery/.env
|
||||
deployments/docker/private-docker-compose.yaml
|
||||
machinery/.env.local
|
||||
machinery/vendor
|
||||
deployments/docker/private-docker-compose.yaml
|
||||
video.mp4
|
||||
19
.travis.yml
19
.travis.yml
@@ -1,19 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
- tip
|
||||
|
||||
before_install:
|
||||
- cd machinery
|
||||
- go mod download
|
||||
|
||||
script:
|
||||
- go vet
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
33
.vscode/launch.json
vendored
Normal file
33
.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Golang",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/machinery/main.go",
|
||||
"args": [
|
||||
"-action",
|
||||
"run",
|
||||
"-port",
|
||||
"8080"
|
||||
],
|
||||
"envFile": "${workspaceFolder}/machinery/.env.local",
|
||||
"buildFlags": "--tags dynamic",
|
||||
},
|
||||
{
|
||||
"name": "Launch React",
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"cwd": "${workspaceFolder}/ui",
|
||||
"runtimeExecutable": "yarn",
|
||||
"runtimeArgs": [
|
||||
"start"
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
27
Dockerfile
27
Dockerfile
@@ -1,6 +1,8 @@
|
||||
|
||||
FROM kerberos/base:0a50dc9 AS build-machinery
|
||||
LABEL AUTHOR=Kerberos.io
|
||||
ARG BASE_IMAGE_VERSION=amd64-ddbe40e
|
||||
ARG VERSION=0.0.0
|
||||
FROM kerberos/base:${BASE_IMAGE_VERSION} AS build-machinery
|
||||
LABEL AUTHOR=uug.ai
|
||||
|
||||
ENV GOROOT=/usr/local/go
|
||||
ENV GOPATH=/go
|
||||
@@ -33,7 +35,8 @@ RUN cat /go/src/github.com/kerberos-io/agent/machinery/version
|
||||
|
||||
RUN cd /go/src/github.com/kerberos-io/agent/machinery && \
|
||||
go mod download && \
|
||||
go build -tags timetzdata,netgo,osusergo --ldflags '-s -w -extldflags "-static -latomic"' main.go && \
|
||||
VERSION=$(cd /go/src/github.com/kerberos-io/agent && git describe --tags --always 2>/dev/null || echo "${VERSION}") && \
|
||||
go build -tags timetzdata,netgo,osusergo --ldflags "-s -w -X github.com/kerberos-io/agent/machinery/src/utils.VERSION=${VERSION} -extldflags '-static -latomic'" main.go && \
|
||||
mkdir -p /agent && \
|
||||
mv main /agent && \
|
||||
mv version /agent && \
|
||||
@@ -43,8 +46,7 @@ RUN cd /go/src/github.com/kerberos-io/agent/machinery && \
|
||||
mkdir -p /agent/data/log && \
|
||||
mkdir -p /agent/data/recordings && \
|
||||
mkdir -p /agent/data/capture-test && \
|
||||
mkdir -p /agent/data/config && \
|
||||
rm -rf /go/src/gitlab.com/
|
||||
mkdir -p /agent/data/config
|
||||
|
||||
####################################
|
||||
# Let's create a /dist folder containing just the files necessary for runtime.
|
||||
@@ -58,18 +60,6 @@ RUN cp -r /agent ./
|
||||
|
||||
RUN /dist/agent/main version
|
||||
|
||||
###############################################
|
||||
# Build Bento4 -> we want fragmented mp4 files
|
||||
|
||||
ENV BENTO4_VERSION 1.6.0-639
|
||||
RUN cd /tmp && git clone https://github.com/axiomatic-systems/Bento4 && cd Bento4 && \
|
||||
git checkout tags/v${BENTO4_VERSION} && \
|
||||
cd Build && \
|
||||
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||
make && \
|
||||
mv /tmp/Bento4/Build/mp4fragment /dist/agent/ && \
|
||||
rm -rf /tmp/Bento4
|
||||
|
||||
FROM node:18.14.0-alpine3.16 AS build-ui
|
||||
|
||||
RUN apk update && apk upgrade --available && sync
|
||||
@@ -111,7 +101,6 @@ RUN apk update && apk add ca-certificates curl libstdc++ libc6-compat --no-cache
|
||||
# Try running agent
|
||||
|
||||
RUN mv /agent/* /home/agent/
|
||||
RUN cp /home/agent/mp4fragment /usr/local/bin/
|
||||
RUN /home/agent/main version
|
||||
|
||||
#######################
|
||||
@@ -148,4 +137,4 @@ HEALTHCHECK CMD curl --fail http://localhost:80 || exit 1
|
||||
# Leeeeettttt'ssss goooooo!!!
|
||||
# Run the shizzle from the right working directory.
|
||||
WORKDIR /home/agent
|
||||
CMD ["./main", "-action", "run", "-port", "80"]
|
||||
CMD ["./main", "-action", "run", "-port", "80"]
|
||||
140
Dockerfile.arm64
Normal file
140
Dockerfile.arm64
Normal file
@@ -0,0 +1,140 @@
|
||||
|
||||
ARG BASE_IMAGE_VERSION=arm64-ddbe40e
|
||||
ARG VERSION=0.0.0
|
||||
FROM kerberos/base:${BASE_IMAGE_VERSION} AS build-machinery
|
||||
LABEL AUTHOR=uug.ai
|
||||
|
||||
ENV GOROOT=/usr/local/go
|
||||
ENV GOPATH=/go
|
||||
ENV PATH=$GOPATH/bin:$GOROOT/bin:/usr/local/lib:$PATH
|
||||
ENV GOSUMDB=off
|
||||
|
||||
##########################################
|
||||
# Installing some additional dependencies.
|
||||
|
||||
RUN apt-get upgrade -y && apt-get update && apt-get install -y --fix-missing --no-install-recommends \
|
||||
git build-essential cmake pkg-config unzip libgtk2.0-dev \
|
||||
curl ca-certificates libcurl4-openssl-dev libssl-dev libjpeg62-turbo-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
##############################################################################
|
||||
# Copy all the relevant source code in the Docker image, so we can build this.
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kerberos-io/agent
|
||||
COPY machinery /go/src/github.com/kerberos-io/agent/machinery
|
||||
RUN rm -rf /go/src/github.com/kerberos-io/agent/machinery/.env
|
||||
|
||||
##################################################################
|
||||
# Get the latest commit hash, so we know which version we're running
|
||||
COPY .git /go/src/github.com/kerberos-io/agent/.git
|
||||
RUN cd /go/src/github.com/kerberos-io/agent/.git && git log --format="%H" -n 1 | head -c7 > /go/src/github.com/kerberos-io/agent/machinery/version
|
||||
RUN cat /go/src/github.com/kerberos-io/agent/machinery/version
|
||||
|
||||
##################
|
||||
# Build Machinery
|
||||
|
||||
RUN cd /go/src/github.com/kerberos-io/agent/machinery && \
|
||||
go mod download && \
|
||||
VERSION=$(cd /go/src/github.com/kerberos-io/agent && git describe --tags --always 2>/dev/null || echo "${VERSION}") && \
|
||||
go build -tags timetzdata,netgo,osusergo --ldflags "-s -w -X github.com/kerberos-io/agent/machinery/src/utils.VERSION=${VERSION} -extldflags '-static -latomic'" main.go && \
|
||||
mkdir -p /agent && \
|
||||
mv main /agent && \
|
||||
mv version /agent && \
|
||||
mv data /agent && \
|
||||
mkdir -p /agent/data/cloud && \
|
||||
mkdir -p /agent/data/snapshots && \
|
||||
mkdir -p /agent/data/log && \
|
||||
mkdir -p /agent/data/recordings && \
|
||||
mkdir -p /agent/data/capture-test && \
|
||||
mkdir -p /agent/data/config
|
||||
|
||||
####################################
|
||||
# Let's create a /dist folder containing just the files necessary for runtime.
|
||||
# Later, it will be copied as the / (root) of the output image.
|
||||
|
||||
WORKDIR /dist
|
||||
RUN cp -r /agent ./
|
||||
|
||||
####################################################################################
|
||||
# This will collect dependent libraries so they're later copied to the final image.
|
||||
|
||||
RUN /dist/agent/main version
|
||||
|
||||
FROM node:18.14.0-alpine3.16 AS build-ui
|
||||
|
||||
RUN apk update && apk upgrade --available && sync
|
||||
|
||||
########################
|
||||
# Build Web (React app)
|
||||
|
||||
RUN mkdir -p /go/src/github.com/kerberos-io/agent/machinery/www
|
||||
COPY ui /go/src/github.com/kerberos-io/agent/ui
|
||||
RUN cd /go/src/github.com/kerberos-io/agent/ui && rm -rf yarn.lock && yarn config set network-timeout 300000 && \
|
||||
yarn && yarn build
|
||||
|
||||
####################################
|
||||
# Let's create a /dist folder containing just the files necessary for runtime.
|
||||
# Later, it will be copied as the / (root) of the output image.
|
||||
|
||||
WORKDIR /dist
|
||||
RUN mkdir -p ./agent && cp -r /go/src/github.com/kerberos-io/agent/machinery/www ./agent/
|
||||
|
||||
############################################
|
||||
# Publish main binary to GitHub release
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
############################
|
||||
# Protect by non-root user.
|
||||
|
||||
RUN addgroup -S kerberosio && adduser -S agent -G kerberosio && addgroup agent video
|
||||
|
||||
#################################
|
||||
# Copy files from previous images
|
||||
|
||||
COPY --chown=0:0 --from=build-machinery /dist /
|
||||
COPY --chown=0:0 --from=build-ui /dist /
|
||||
|
||||
RUN apk update && apk add ca-certificates curl libstdc++ libc6-compat --no-cache && rm -rf /var/cache/apk/*
|
||||
|
||||
##################
|
||||
# Try running agent
|
||||
|
||||
RUN mv /agent/* /home/agent/
|
||||
RUN /home/agent/main version
|
||||
|
||||
#######################
|
||||
# Make template config
|
||||
|
||||
RUN cp /home/agent/data/config/config.json /home/agent/data/config.template.json
|
||||
|
||||
###########################
|
||||
# Set permissions correctly
|
||||
|
||||
RUN chown -R agent:kerberosio /home/agent/data
|
||||
RUN chown -R agent:kerberosio /home/agent/www
|
||||
|
||||
###########################
|
||||
# Grant the necessary root capabilities to the process trying to bind to the privileged port
|
||||
RUN apk add libcap && setcap 'cap_net_bind_service=+ep' /home/agent/main
|
||||
|
||||
###################
|
||||
# Run non-root user
|
||||
|
||||
USER agent
|
||||
|
||||
######################################
|
||||
# By default the app runs on port 80
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
######################################
|
||||
# Check if agent is still running
|
||||
|
||||
HEALTHCHECK CMD curl --fail http://localhost:80 || exit 1
|
||||
|
||||
###################################################
|
||||
# Leeeeettttt'ssss goooooo!!!
|
||||
# Run the shizzle from the right working directory.
|
||||
WORKDIR /home/agent
|
||||
CMD ["./main", "-action", "run", "-port", "80"]
|
||||
206
README.md
206
README.md
@@ -17,11 +17,14 @@
|
||||
<a href="LICENSE"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
|
||||
[](https://brianmacdonald.github.io/Ethonate/address#0xf4a759C9436E2280Ea9cdd23d3144D95538fF4bE)
|
||||
<a target="_blank" href="https://twitter.com/kerberosio?ref_src=twsrc%5Etfw"><img src="https://img.shields.io/twitter/url.svg?label=Follow%20%40kerberosio&style=social&url=https%3A%2F%2Ftwitter.com%2Fkerberosio" alt="Twitter Widget"></a>
|
||||
[](https://discord.gg/Bj77Vqfp2G)
|
||||
[](https://snapcraft.io/kerberosio)
|
||||
|
||||
[](https://joinslack.kerberos.io/)
|
||||
|
||||
[**Docker Hub**](https://hub.docker.com/r/kerberos/agent) | [**Documentation**](https://doc.kerberos.io) | [**Website**](https://kerberos.io) | [**View Demo**](https://demo.kerberos.io)
|
||||
|
||||
> Before you continue, this repository discusses one of the components of the Kerberos.io stack, the Kerberos Agent, in depth. If you are [looking for an end-to-end deployment guide have a look here](https://github.com/kerberos-io/deployment).
|
||||
|
||||
Kerberos Agent is an isolated and scalable video (surveillance) management agent made available as Open Source under the MIT License. This means that all the source code is available for you or your company, and you can use, transform and distribute the source code; as long you keep a reference of the original license. Kerberos Agent can be used for commercial usage (which was not the case for v2). Read more [about the license here](LICENSE).
|
||||
|
||||

|
||||
@@ -30,7 +33,7 @@ Kerberos Agent is an isolated and scalable video (surveillance) management agent
|
||||
|
||||
- An IP camera which supports a RTSP H264 or H265 encoded stream,
|
||||
- (or) a USB camera, Raspberry Pi camera or other camera, that [you can transform to a valid RTSP H264 or H265 stream](https://github.com/kerberos-io/camera-to-rtsp).
|
||||
- Any hardware (ARMv6, ARMv7, ARM64, AMD) that can run a binary or container, for example: a Raspberry Pi, NVidia Jetson, Intel NUC, a VM, Bare metal machine or a full blown Kubernetes cluster.
|
||||
- Any hardware (ARMv6, ARMv7, ARM64, AMD64) that can run a binary or container, for example: a Raspberry Pi, NVidia Jetson, Intel NUC, a VM, Bare metal machine or a full blown Kubernetes cluster.
|
||||
|
||||
## :video_camera: Is my camera working?
|
||||
|
||||
@@ -64,8 +67,8 @@ There are a myriad of cameras out there (USB, IP and other cameras), and it migh
|
||||
|
||||
1. [Contribute with Codespaces](#contribute-with-codespaces)
|
||||
2. [Develop and build](#develop-and-build)
|
||||
3. [Building from source](#building-from-source)
|
||||
4. [Building for Docker](#building-for-docker)
|
||||
3. [Building from source](#building-from-source)
|
||||
4. [Building for Docker](#building-for-docker)
|
||||
|
||||
### Varia
|
||||
|
||||
@@ -75,17 +78,17 @@ There are a myriad of cameras out there (USB, IP and other cameras), and it migh
|
||||
|
||||
## Quickstart - Docker
|
||||
|
||||
The easiest to get your Kerberos Agent up and running is to use our public image on [Docker hub](https://hub.docker.com/r/kerberos/agent). Once you have selected a specific tag, run below `docker` command, which will open the web interface of your Kerberos agent on port `80`, and off you go. For a more configurable and persistent deployment have a look at [Running and automating a Kerberos Agent](#running-and-automating-a-kerberos-agent).
|
||||
The easiest way to get your Kerberos Agent up and running is to use our public image on [Docker hub](https://hub.docker.com/r/kerberos/agent). Once you have selected a specific tag, run `docker` command below, which will open the web interface of your Kerberos agent on port `80`, and off you go. For a more configurable and persistent deployment have a look at [Running and automating a Kerberos Agent](#running-and-automating-a-kerberos-agent).
|
||||
|
||||
docker run -p 80:80 --name mycamera -d --restart=always kerberos/agent:latest
|
||||
|
||||
If you want to connect to an USB or Raspberry Pi camera, [you'll need to run our side car container](https://github.com/kerberos-io/camera-to-rtsp) which proxy the camera to an RTSP stream. In that case you'll want to configure the Kerberos Agent container to run in the host network, so it can connect directly to the RTSP sidecar.
|
||||
If you want to connect to a USB or Raspberry Pi camera, [you'll need to run our side car container](https://github.com/kerberos-io/camera-to-rtsp) which proxies the camera to an RTSP stream. In that case you'll want to configure the Kerberos Agent container to run in the host network, so it can connect directly to the RTSP sidecar.
|
||||
|
||||
docker run --network=host --name mycamera -d --restart=always kerberos/agent:latest
|
||||
|
||||
## Quickstart - Balena
|
||||
|
||||
Run Kerberos Agent with [Balena Cloud](https://www.balena.io/) super powers. Monitor your Kerberos Agent with seamless remote access, over the air updates, an encrypted public `https` endpoint and many more. Checkout our application `video-surveillance` on [Balena Hub](https://hub.balena.io/apps/2064752/video-surveillance), and create your first or fleet of Kerberos Agent(s).
|
||||
Run Kerberos Agent with [Balena Cloud](https://www.balena.io/) super powers. Monitor your Kerberos Agent with seamless remote access, over the air updates, an encrypted public `https` endpoint and much more. Checkout our application `video-surveillance` on [Balena Hub](https://hub.balena.io/apps/2064752/video-surveillance), and create your first or fleet of Kerberos Agent(s).
|
||||
|
||||
[](https://dashboard.balena-cloud.com/deploy?repoUrl=https://github.com/kerberos-io/balena-agent)
|
||||
|
||||
@@ -101,17 +104,19 @@ Once installed you can find your Kerberos Agent configration at `/var/snap/kerbe
|
||||
|
||||
## A world of Kerberos Agents
|
||||
|
||||
The Kerberos Agent is an isolated and scalable video (surveillance) management agent with a strong focus on user experience, scalability, resilience, extension and integration. Next to the Kerberos Agent, Kerberos.io provides many other tools such as [Kerberos Factory](https://github.com/kerberos-io/factory), [Kerberos Vault](https://github.com/kerberos-io/vault) and [Kerberos Hub](https://github.com/kerberos-io/hub) to provide additional capabilities: bring your own cloud, bring your own storage, central overview, live streaming, machine learning etc.
|
||||
The Kerberos Agent is an isolated and scalable video (surveillance) management agent with a strong focus on user experience, scalability, resilience, extension and integration. Next to the Kerberos Agent, Kerberos.io provides many other tools such as [Kerberos Factory](https://github.com/kerberos-io/factory), [Kerberos Vault](https://github.com/kerberos-io/vault), and [Kerberos Hub](https://github.com/kerberos-io/hub) to provide additional capabilities: bring your own cloud, bring your own storage, central overview, live streaming, machine learning, etc.
|
||||
|
||||
As mentioned above Kerberos.io applies the concept of agents. An agent is running next to (or on) your camera, and is processing a single camera feed. It applies motion based or continuous recording and make those recordings available through a user friendly web interface. A Kerberos Agent allows you to connect to other cloud services or integrates with custom applications. Kerberos Agent is used for personal usage and scales to enterprise production level deployments.
|
||||
[](https://github.com/kerberos-io/deployment)
|
||||
|
||||
As mentioned above Kerberos.io applies the concept of agents. An agent is running next to (or on) your camera, and is processing a single camera feed. It applies motion based or continuous recording and makes those recordings available through a user friendly web interface. A Kerberos Agent allows you to connect to other cloud services or integrate with custom applications. Kerberos Agent is used for personal applications and scales to enterprise production level deployments. Learn more about the [deployment strategies here](<(https://github.com/kerberos-io/deployment)>).
|
||||
|
||||
This repository contains everything you'll need to know about our core product, Kerberos Agent. Below you'll find a brief list of features and functions.
|
||||
|
||||
- Low memory and CPU usage.
|
||||
- Simplified and modern user interface.
|
||||
- Multi architecture (ARMv7, ARMv8, amd64, etc).).
|
||||
- Multi architecture (ARMv6, ARMv7, ARM64, AMD64)
|
||||
- Multi stream, for example recording in H265, live streaming and motion detection in H264.
|
||||
- Multi camera support: IP Cameras (H264 and H265), USB cameras and Raspberry Pi Cameras [through a RTSP proxy](https://github.com/kerberos-io/camera-to-rtsp
|
||||
- Multi camera support: IP Cameras (H264 and H265), USB cameras and Raspberry Pi Cameras [through a RTSP proxy](https://github.com/kerberos-io/camera-to-rtsp).
|
||||
- Single camera per instance (e.g. one container per camera).
|
||||
- Low resolution streaming through MQTT and high resolution streaming through WebRTC (only supports H264/PCM).
|
||||
- Backchannel audio from Kerberos Hub to IP camera (requires PCM ULAW codec)
|
||||
@@ -129,7 +134,7 @@ This repository contains everything you'll need to know about our core product,
|
||||
|
||||
## How to run and deploy a Kerberos Agent
|
||||
|
||||
As described before a Kerberos Agent is a container, which can be deployed through various ways and automation tools such as `docker`, `docker compose`, `kubernetes` and the list goes on. To simplify your life we have come with concrete and working examples of deployments to help you speed up your Kerberos.io journey.
|
||||
A Kerberos Agent, as previously mentioned, is a container. You can deploy it using various methods and automation tools, including `docker`, `docker compose`, `kubernetes` and more. To streamline your Kerberos.io experience, we provide concrete deployment examples to speed up your Kerberos.io journey”
|
||||
|
||||
We have documented the different deployment models [in the `deployments` directory](https://github.com/kerberos-io/agent/tree/master/deployments) of this repository. There you'll learn and find how to deploy using:
|
||||
|
||||
@@ -143,7 +148,7 @@ We have documented the different deployment models [in the `deployments` directo
|
||||
- [Balena](https://github.com/kerberos-io/agent/tree/master/deployments#8-balena)
|
||||
- [Snap](https://github.com/kerberos-io/agent/tree/master/deployments#9-snap)
|
||||
|
||||
By default your Kerberos Agents will store all its configuration and recordings inside the container. To help you automate and have a more consistent data governance, you can attach volumes to configure and persist data of your Kerberos Agents, and/or configure each Kerberos Agent through environment variables.
|
||||
By default, your Kerberos Agents store all configuration and recordings within the container. To help you automate and have a more consistent data governance, you can attach volumes to configure and persist data of your Kerberos Agents and/or configure each Kerberos Agent through environment variables.
|
||||
|
||||
## Access the Kerberos Agent
|
||||
|
||||
@@ -158,23 +163,23 @@ The default username and password for the Kerberos Agent is:
|
||||
|
||||
## Configure and persist with volume mounts
|
||||
|
||||
An example of how to mount a host directory is shown below using `docker`, but is applicable for [all the deployment models and tools described above](#running-and-automating-a-kerberos-agent).
|
||||
An example of how to mount a host directory is shown below using `docker`, but is applicable for [all of the deployment models and tools described above](#running-and-automating-a-kerberos-agent).
|
||||
|
||||
You attach a volume to your container by leveraging the `-v` option. To mount your own configuration file and recordings folder, execute as following:
|
||||
You attach a volume to your container by leveraging the `-v` option. To mount your own configuration file and recordings folder, run the following commands:
|
||||
|
||||
docker run -p 80:80 --name mycamera \
|
||||
-v $(pwd)/agent/config:/home/agent/data/config \
|
||||
-v $(pwd)/agent/recordings:/home/agent/data/recordings \
|
||||
-d --restart=always kerberos/agent:latest
|
||||
|
||||
More example [can be found in the deployment section](https://github.com/kerberos-io/agent/tree/master/deployments) for each deployment and automation tool. Please note to verify the permissions of the directory/volume you are attaching. More information in [this issue](https://github.com/kerberos-io/agent/issues/80).
|
||||
More examples for each deployment and automation tool [can be found in the deployment section](https://github.com/kerberos-io/agent/tree/master/deployments). Be sure to verify the permissions of the directory/volume you are attaching. More information in [this issue](https://github.com/kerberos-io/agent/issues/80).
|
||||
|
||||
chmod -R 755 kerberos-agent/
|
||||
chown 100:101 kerberos-agent/ -R
|
||||
|
||||
## Configure with environment variables
|
||||
|
||||
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deployments easier when leveraging `docker compose` or `kubernetes` deployments much easier and scalable. Using this approach we simplify automation through `ansible` and `terraform`.
|
||||
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deploying with `docker compose` or `kubernetes` much easier and more scalable. Using this approach, we simplify automation through `ansible` and `terraform`.
|
||||
|
||||
docker run -p 80:80 --name mycamera \
|
||||
-e AGENT_NAME=mycamera \
|
||||
@@ -185,76 +190,90 @@ Next to attaching the configuration file, it is also possible to override the co
|
||||
|
||||
| Name | Description | Default Value |
|
||||
| --------------------------------------- | ----------------------------------------------------------------------------------------------- | ------------------------------ |
|
||||
| `LOG_LEVEL` | Level for logging, could be "info", "warning", "debug", "error" or "fatal". | "info" |
|
||||
| `LOG_OUTPUT` | Logging output format "json" or "text". | "text" |
|
||||
| `AGENT_MODE` | You can choose to run this in 'release' for production, and or 'demo' for showcasing. | "release" |
|
||||
| `AGENT_TLS_INSECURE` | Specify if you want to use `InsecureSkipVerify` for the internal HTTP client. | "false" |
|
||||
| `AGENT_USERNAME` | The username used to authenticate against the Kerberos Agent login page. | "root" |
|
||||
| `AGENT_PASSWORD` | The password used to authenticate against the Kerberos Agent login page. | "root" |
|
||||
| `AGENT_KEY` | A unique identifier for your Kerberos Agent, this is auto-generated but can be overriden. | "" |
|
||||
| `AGENT_NAME` | The agent friendly-name. | "agent" |
|
||||
| `AGENT_TIMEZONE` | Timezone which is used for converting time. | "Africa/Ceuta" |
|
||||
| `AGENT_REMOVE_AFTER_UPLOAD` | When enabled, recordings uploaded successfully to a storage will be removed from disk. | "true" |
|
||||
| `AGENT_OFFLINE` | Makes sure no external connection is made. | "false" |
|
||||
| `AGENT_AUTO_CLEAN` | Cleans up the recordings directory. | "true" |
|
||||
| `AGENT_AUTO_CLEAN_MAX_SIZE` | If `AUTO_CLEAN` enabled, set the max size of the recordings directory in (MB). | "100" |
|
||||
| `AGENT_TIME` | Enable the timetable for Kerberos Agent | "false" |
|
||||
| `AGENT_TIMETABLE` | A (weekly) time table to specify when to make recordings "start1,end1,start2,end2;start1.. | "" |
|
||||
| `AGENT_REGION_POLYGON` | A single polygon set for motion detection: "x1,y1;x2,y2;x3,y3;... | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_RTSP` | Full-HD RTSP endpoint to the camera you're targetting. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_SUB_RTSP` | Sub-stream RTSP endpoint used for livestreaming (WebRTC). | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF` | Mark as a compliant ONVIF device. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR` | ONVIF endpoint/address running on the camera. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME` | ONVIF username to authenticate against. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD` | ONVIF password to authenticate against. | "" |
|
||||
| `AGENT_CAPTURE_MOTION` | Toggle for enabling or disabling motion. | "true" |
|
||||
| `AGENT_CAPTURE_LIVEVIEW` | Toggle for enabling or disabling liveview. | "true" |
|
||||
| `AGENT_CAPTURE_SNAPSHOTS` | Toggle for enabling or disabling snapshot generation. | "true" |
|
||||
| `AGENT_CAPTURE_RECORDING` | Toggle for enabling making recordings. | "true" |
|
||||
| `AGENT_CAPTURE_CONTINUOUS` | Toggle for enabling continuous "true" or motion "false". | "false" |
|
||||
| `AGENT_CAPTURE_PRERECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) before after motion event. | "10" |
|
||||
| `AGENT_CAPTURE_POSTRECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) after motion event. | "20" |
|
||||
| `AGENT_CAPTURE_MAXLENGTH` | The maximum length of a single recording (seconds). | "30" |
|
||||
| `AGENT_CAPTURE_PIXEL_CHANGE` | If `CONTINUOUS` set to `false`, the number of pixel require to change before motion triggers. | "150" |
|
||||
| `AGENT_CAPTURE_FRAGMENTED` | Set the format of the recorded MP4 to fragmented (suitable for HLS). | "false" |
|
||||
| `AGENT_CAPTURE_FRAGMENTED_DURATION` | If `AGENT_CAPTURE_FRAGMENTED` set to `true`, define the duration (seconds) of a fragment. | "8" |
|
||||
| `AGENT_MQTT_URI` | A MQTT broker endpoint that is used for bi-directional communication (live view, onvif, etc) | "tcp://mqtt.kerberos.io:1883" |
|
||||
| `AGENT_MQTT_USERNAME` | Username of the MQTT broker. | "" |
|
||||
| `AGENT_MQTT_PASSWORD` | Password of the MQTT broker. | "" |
|
||||
| `AGENT_STUN_URI` | When using WebRTC, you'll need to provide a STUN server. | "stun:turn.kerberos.io:8443" |
|
||||
| `AGENT_TURN_URI` | When using WebRTC, you'll need to provide a TURN server. | "turn:turn.kerberos.io:8443" |
|
||||
| `AGENT_TURN_USERNAME` | TURN username used for WebRTC. | "username1" |
|
||||
| `AGENT_TURN_PASSWORD` | TURN password used for WebRTC. | "password1" |
|
||||
| `AGENT_CLOUD` | Store recordings in Kerberos Hub (s3), Kerberos Vault (kstorage) or Dropbox (dropbox). | "s3" |
|
||||
| `AGENT_HUB_URI` | The Kerberos Hub API, defaults to our Kerberos Hub SAAS. | "https://api.hub.domain.com" |
|
||||
| `AGENT_HUB_KEY` | The access key linked to your account in Kerberos Hub. | "" |
|
||||
| `AGENT_HUB_PRIVATE_KEY` | The secret access key linked to your account in Kerberos Hub. | "" |
|
||||
| `AGENT_HUB_REGION` | The Kerberos Hub region, to which you want to upload. | "" |
|
||||
| `AGENT_HUB_SITE` | The site ID of a site you've created in your Kerberos Hub account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_URI` | The Kerberos Vault API url. | "https://vault.domain.com/api" |
|
||||
| `AGENT_KERBEROSVAULT_ACCESS_KEY` | The access key of a Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECRET_KEY` | The secret key of a Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_PROVIDER` | A Kerberos Vault provider you have created (optional). | "" |
|
||||
| `AGENT_KERBEROSVAULT_DIRECTORY` | The directory, in the provider, where the recordings will be stored in. | "" |
|
||||
| `AGENT_DROPBOX_ACCESS_TOKEN` | The Access Token from your Dropbox app, that is used to leverage the Dropbox SDK. | "" |
|
||||
| `AGENT_DROPBOX_DIRECTORY` | The directory, in the provider, where the recordings will be stored in. | "" |
|
||||
| `AGENT_ENCRYPTION` | Enable 'true' or disable 'false' end-to-end encryption for MQTT messages. | "false" |
|
||||
| `AGENT_ENCRYPTION_RECORDINGS` | Enable 'true' or disable 'false' end-to-end encryption for recordings. | "false" |
|
||||
| `AGENT_ENCRYPTION_FINGERPRINT` | The fingerprint of the keypair (public/private keys), so you know which one to use. | "" |
|
||||
| `AGENT_ENCRYPTION_PRIVATE_KEY` | The private key (assymetric/RSA) to decryptand sign requests send over MQTT. | "" |
|
||||
| `AGENT_ENCRYPTION_SYMMETRIC_KEY` | The symmetric key (AES) to encrypt and decrypt request send over MQTT. | "" |
|
||||
| `LOG_LEVEL` | Level for logging, could be "info", "warning", "debug", "error" or "fatal". | "info" |
|
||||
| `LOG_OUTPUT` | Logging output format "json" or "text". | "text" |
|
||||
| `AGENT_MODE` | You can choose to run this in 'release' for production, and or 'demo' for showcasing. | "release" |
|
||||
| `AGENT_TLS_INSECURE` | Specify if you want to use `InsecureSkipVerify` for the internal HTTP client. | "false" |
|
||||
| `AGENT_USERNAME` | The username used to authenticate against the Kerberos Agent login page. | "root" |
|
||||
| `AGENT_PASSWORD` | The password used to authenticate against the Kerberos Agent login page. | "root" |
|
||||
| `AGENT_KEY` | A unique identifier for your Kerberos Agent, this is auto-generated but can be overriden. | "" |
|
||||
| `AGENT_NAME` | The agent friendly-name. | "agent" |
|
||||
| `AGENT_TIMEZONE` | Timezone which is used for converting time. | "Africa/Ceuta" |
|
||||
| `AGENT_REMOVE_AFTER_UPLOAD` | When enabled, recordings uploaded successfully to a storage will be removed from disk. | "true" |
|
||||
| `AGENT_OFFLINE` | Makes sure no external connection is made. | "false" |
|
||||
| `AGENT_AUTO_CLEAN` | Cleans up the recordings directory. | "true" |
|
||||
| `AGENT_AUTO_CLEAN_MAX_SIZE` | If `AUTO_CLEAN` enabled, set the max size of the recordings directory (in MB). | "100" |
|
||||
| `AGENT_TIME` | Enable the timetable for Kerberos Agent | "false" |
|
||||
| `AGENT_TIMETABLE` | A (weekly) time table to specify when to make recordings "start1,end1,start2,end2;start1.. | "" |
|
||||
| `AGENT_REGION_POLYGON` | A single polygon set for motion detection: "x1,y1;x2,y2;x3,y3;... | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_RTSP` | Full-HD RTSP endpoint to the camera you're targetting. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_SUB_RTSP` | Sub-stream RTSP endpoint used for livestreaming (WebRTC). | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_BASE_WIDTH` | Force a specific width resolution for live view processing. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_BASE_HEIGHT` | Force a specific height resolution for live view processing. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF` | Mark as a compliant ONVIF device. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR` | ONVIF endpoint/address running on the camera. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME` | ONVIF username to authenticate against. | "" |
|
||||
| `AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD` | ONVIF password to authenticate against. | "" |
|
||||
| `AGENT_CAPTURE_MOTION` | Toggle for enabling or disabling motion. | "true" |
|
||||
| `AGENT_CAPTURE_LIVEVIEW` | Toggle for enabling or disabling liveview. | "true" |
|
||||
| `AGENT_CAPTURE_SNAPSHOTS` | Toggle for enabling or disabling snapshot generation. | "true" |
|
||||
| `AGENT_CAPTURE_RECORDING` | Toggle for enabling making recordings. | "true" |
|
||||
| `AGENT_CAPTURE_CONTINUOUS` | Toggle for enabling continuous "true" or motion "false". | "false" |
|
||||
| `AGENT_CAPTURE_PRERECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) before/after motion event. | "10" |
|
||||
| `AGENT_CAPTURE_POSTRECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) after motion event. | "20" |
|
||||
| `AGENT_CAPTURE_MAXLENGTH` | The maximum length of a single recording (seconds). | "30" |
|
||||
| `AGENT_CAPTURE_PIXEL_CHANGE` | If `CONTINUOUS` set to `false`, the number of pixel require to change before motion triggers. | "150" |
|
||||
| `AGENT_CAPTURE_FRAGMENTED` | Set the format of the recorded MP4 to fragmented (suitable for HLS). | "false" |
|
||||
| `AGENT_CAPTURE_FRAGMENTED_DURATION` | If `AGENT_CAPTURE_FRAGMENTED` set to `true`, define the duration (seconds) of a fragment. | "8" |
|
||||
| `AGENT_MQTT_URI` | An MQTT broker endpoint that is used for bi-directional communication (live view, onvif, etc) | "tcp://mqtt.kerberos.io:1883" |
|
||||
| `AGENT_MQTT_USERNAME` | Username of the MQTT broker. | "" |
|
||||
| `AGENT_MQTT_PASSWORD` | Password of the MQTT broker. | "" |
|
||||
| `AGENT_REALTIME_PROCESSING` | If `AGENT_REALTIME_PROCESSING` set to `true`, the agent will send key frames to the topic | "" |
|
||||
| `AGENT_REALTIME_PROCESSING_TOPIC` | The topic to which keyframes will be sent in base64 encoded format. | "" |
|
||||
| `AGENT_STUN_URI` | When using WebRTC, you'll need to provide a STUN server. | "stun:turn.kerberos.io:8443" |
|
||||
| `AGENT_FORCE_TURN` | Force using a TURN server, by generating relay candidates only. | "false" |
|
||||
| `AGENT_TURN_URI` | When using WebRTC, you'll need to provide a TURN server. | "turn:turn.kerberos.io:8443" |
|
||||
| `AGENT_TURN_USERNAME` | TURN username used for WebRTC. | "username1" |
|
||||
| `AGENT_TURN_PASSWORD` | TURN password used for WebRTC. | "password1" |
|
||||
| `AGENT_CLOUD` | Store recordings in Kerberos Hub (s3), Kerberos Vault (kstorage), or Dropbox (dropbox). | "s3" |
|
||||
| `AGENT_HUB_ENCRYPTION` | Turning on/off encryption of traffic from your Kerberos Agent to Kerberos Hub. | "true" |
|
||||
| `AGENT_HUB_URI` | The Kerberos Hub API, defaults to our Kerberos Hub SAAS. | "https://api.hub.domain.com" |
|
||||
| `AGENT_HUB_KEY` | The access key linked to your account in Kerberos Hub. | "" |
|
||||
| `AGENT_HUB_PRIVATE_KEY` | The secret access key linked to your account in Kerberos Hub. | "" |
|
||||
| `AGENT_HUB_REGION` | The Kerberos Hub region, to which you want to upload. | "" |
|
||||
| `AGENT_HUB_SITE` | The site ID of a site you've created in your Kerberos Hub account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_URI` | The Kerberos Vault API url. | "https://vault.domain.com/api" |
|
||||
| `AGENT_KERBEROSVAULT_ACCESS_KEY` | The access key of a Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECRET_KEY` | The secret key of a Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_PROVIDER` | A Kerberos Vault provider you have created (optional). | "" |
|
||||
| `AGENT_KERBEROSVAULT_DIRECTORY` | The directory, in the Kerberos vault, where the recordings will be stored. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_URI` | The Kerberos Vault API url. | "https://vault.domain.com/api" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_ACCESS_KEY` | The access key of a secondary Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_SECRET_KEY` | The secret key of a secondary Kerberos Vault account. | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_PROVIDER` | A secondary Kerberos Vault provider you have created (optional). | "" |
|
||||
| `AGENT_KERBEROSVAULT_SECONDARY_DIRECTORY` | The directory, in the secondary Kerberos vault, where the recordings will be stored. | "" |
|
||||
| `AGENT_DROPBOX_ACCESS_TOKEN` | The Access Token from your Dropbox app, that is used to leverage the Dropbox SDK. | "" |
|
||||
| `AGENT_DROPBOX_DIRECTORY` | The directory, in Dropbox, where the recordings will be stored. | "" |
|
||||
| `AGENT_ENCRYPTION` | Enable 'true' or disable 'false' end-to-end encryption for MQTT messages. | "false" |
|
||||
| `AGENT_ENCRYPTION_RECORDINGS` | Enable 'true' or disable 'false' end-to-end encryption for recordings. | "false" |
|
||||
| `AGENT_ENCRYPTION_FINGERPRINT` | The fingerprint of the keypair (public/private keys), so you know which one to use. | "" |
|
||||
| `AGENT_ENCRYPTION_PRIVATE_KEY` | The private key (assymetric/RSA) to decrypt and sign requests send over MQTT. | "" |
|
||||
| `AGENT_ENCRYPTION_SYMMETRIC_KEY` | The symmetric key (AES) to encrypt and decrypt requests sent over MQTT. | "" |
|
||||
| `AGENT_SIGNING` | Enable 'true' or disable 'false' for signing recordings. | "true" |
|
||||
| `AGENT_SIGNING_PRIVATE_KEY` | The private key (RSA) to sign the recordings fingerprint to validate origin. | "" - uses default one if empty |
|
||||
|
||||
|
||||
## Encryption
|
||||
|
||||
You can encrypt your recordings and outgoing MQTT messages with your own AES and RSA keys by enabling the encryption settings. Once enabled all your recordings will be encrypted using AES-256-CBC and your symmetric key. You can either use the default `openssl` toolchain to decrypt the recordings with your AES key, as following:
|
||||
You can encrypt your recordings and outgoing MQTT messages with your own AES and RSA keys by enabling the encryption settings. Once enabled, all your recordings will be encrypted using AES-256-CBC and your symmetric key. You can use the default `openssl` toolchain to decrypt the recordings with your AES key, as following:
|
||||
|
||||
openssl aes-256-cbc -d -md md5 -in encrypted.mp4 -out decrypted.mp4 -k your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
|
||||
|
||||
, and additionally you can decrypt a folder of recordings, using the Kerberos Agent binary as following:
|
||||
Or you can decrypt a folder of recordings, using the Kerberos Agent binary as following:
|
||||
|
||||
go run main.go -action decrypt ./data/recordings your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
|
||||
|
||||
or for a single file:
|
||||
Or for a single file:
|
||||
|
||||
go run main.go -action decrypt ./data/recordings/video.mp4 your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
|
||||
|
||||
@@ -263,8 +282,9 @@ or for a single file:
|
||||
If we talk about video encoders and decoders (codecs) there are 2 major video codecs on the market: H264 and H265. Taking into account your use case, you might use one over the other. We will provide an (not complete) overview of the advantages and disadvantages of each codec in the field of video surveillance and video analytics. If you would like to know more, you should look for additional resources on the internet (or if you like to read physical items, books still exists nowadays).
|
||||
|
||||
- H264 (also known as AVC or MPEG-4 Part 10)
|
||||
|
||||
- Is the most common one and most widely supported for IP cameras.
|
||||
- Supported in the majority of browsers, operating system and third-party applications.
|
||||
- Supported in the majority of browsers, operating system, and third-party applications.
|
||||
- Can be embedded in commercial and 3rd party applications.
|
||||
- Different levels of compression (high, medium, low, ..)
|
||||
- Better quality / compression ratio, shows less artifacts at medium compression ratios.
|
||||
@@ -278,14 +298,14 @@ If we talk about video encoders and decoders (codecs) there are 2 major video co
|
||||
- H265 shows artifacts in motion based environments (which is less with H264).
|
||||
- Recording the same video (resolution, duration and FPS) in H264 and H265 will result in approx 50% the file size.
|
||||
- Not supported in technologies such as WebRTC
|
||||
|
||||
|
||||
Conclusion: depending on the use case you might choose one over the other, and you can use both at the same time. For example you can use H264 (main stream) for livestreaming, and H265 (sub stream) for recording. If you wish to play recordings in a cross-platform and cross-browser environment, you might opt for H264 for better support.
|
||||
|
||||
## Contribute with Codespaces
|
||||
|
||||
One of the major blockers for letting you contribute to an Open Source project is to setup your local development machine. Why? Because you might have already some tools and libraries installed that are used for other projects, and the libraries you would need for Kerberos Agent, for example FFmpeg, might require a different version. Welcome to the dependency hell..
|
||||
One of the major blockers for letting you contribute to an Open Source project is to set up your local development machine. Why? Because you might already have some tools and libraries installed that are used for other projects, and the libraries you would need for Kerberos Agent, for example FFmpeg, might require a different version. Welcome to dependency hell...
|
||||
|
||||
By leveraging codespaces, which the Kerberos Agent repo supports, you will be able to setup the required development environment in a few minutes. By opening the `<> Code` tab on the top of the page, you will be able to create a codespace, [using the Kerberos Devcontainer](https://github.com/kerberos-io/devcontainer) base image. This image requires all the relevant dependencies: FFmpeg, OpenCV, Golang, Node, Yarn, etc.
|
||||
By leveraging codespaces, which the Kerberos Agent repo supports, you will be able to set up the required development environment in a few minutes. By opening the `<> Code` tab on the top of the page, you will be able to create a codespace, [using the Kerberos Devcontainer](https://github.com/kerberos-io/devcontainer) base image. This image requires all the relevant dependencies: FFmpeg, OpenCV, Golang, Node, Yarn, etc.
|
||||
|
||||

|
||||
|
||||
@@ -312,7 +332,7 @@ On opening of the GitHub Codespace, some dependencies will be installed. Once th
|
||||
WS_URL: `${websocketprotocol}//${externalHost}/ws`,
|
||||
};
|
||||
|
||||
Go and open two terminals one for the `ui` project and one for the `machinery` project.
|
||||
Go and open two terminals: one for the `ui` project and one for the `machinery` project.
|
||||
|
||||
1. Terminal A:
|
||||
|
||||
@@ -328,11 +348,11 @@ Once executed, a popup will show up mentioning `portforwarding`. You should see
|
||||
|
||||

|
||||
|
||||
As mentioned above, copy the hostname of the `machinery` DNS name, and past it in the `ui/src/config.json` file. Once done reload, the `ui` page in your browser, and you should be able to access the login page with the default credentials `root` and `root`.
|
||||
As mentioned above, copy the hostname of the `machinery` DNS name, and paste it in the `ui/src/config.json` file. Once done, reload the `ui` page in your browser, and you should be able to access the login page with the default credentials `root` and `root`.
|
||||
|
||||
## Develop and build
|
||||
|
||||
Kerberos Agent is divided in two parts a `machinery` and `web`. Both parts live in this repository in their relative folders. For development or running the application on your local machine, you have to run both the `machinery` and the `web` as described below. When running in production everything is shipped as only one artifact, read more about this at [Building for production](#building-for-production).
|
||||
The Kerberos Agent is divided in two parts: a `machinery` and `web` part. Both parts live in this repository in their relative folders. For development or running the application on your local machine, you have to run both the `machinery` and the `web` as described below. When running in production everything is shipped as only one artifact, read more about this at [Building for production](#building-for-production).
|
||||
|
||||
### UI
|
||||
|
||||
@@ -346,13 +366,13 @@ This will start a webserver and launches the web app on port `3000`.
|
||||
|
||||

|
||||
|
||||
Once signed in you'll see the dashboard page showing up. After successfull configuration of your agent, you'll should see a live view and possible events recorded to disk.
|
||||
Once signed in you'll see the dashboard page. After successfull configuration of your agent, you'll should see a live view and possible events recorded to disk.
|
||||
|
||||

|
||||
|
||||
### Machinery
|
||||
|
||||
The `machinery` is a **Golang** project which delivers two functions: it acts as the Kerberos Agent which is doing all the heavy lifting with camera processing and other kinds of logic, on the other hand it acts as a webserver (Rest API) that allows communication from the web (React) or any other custom application. The API is documented using `swagger`.
|
||||
The `machinery` is a **Golang** project which delivers two functions: it acts as the Kerberos Agent which is doing all the heavy lifting with camera processing and other kinds of logic and on the other hand it acts as a webserver (Rest API) that allows communication from the web (React) or any other custom application. The API is documented using `swagger`.
|
||||
|
||||
You can simply run the `machinery` using following commands.
|
||||
|
||||
@@ -360,13 +380,13 @@ You can simply run the `machinery` using following commands.
|
||||
cd machinery
|
||||
go run main.go -action run -port 80
|
||||
|
||||
This will launch the Kerberos Agent and run a webserver on port `80`. You can change the port by your own preference. We strongly support the usage of [Goland](https://www.jetbrains.com/go/) or [Visual Studio Code](https://code.visualstudio.com/), as it comes with all the debugging and linting features builtin.
|
||||
This will launch the Kerberos Agent and run a webserver on port `80`. You can change the port by your own preference. We strongly support the usage of [Goland](https://www.jetbrains.com/go/) or [Visual Studio Code](https://code.visualstudio.com/), as it comes with all the debugging and linting features built in.
|
||||
|
||||

|
||||
|
||||
## Building from source
|
||||
|
||||
Running Kerberos Agent in production only require a single binary to run. Nevertheless, we have two parts, the `machinery` and the `web`, we merge them during build time. So this is what happens.
|
||||
Running Kerberos Agent in production only requires a single binary to run. Nevertheless, we have two parts: the `machinery` and the `web`, we merge them during build time. So this is what happens.
|
||||
|
||||
### UI
|
||||
|
||||
@@ -377,7 +397,7 @@ To build the Kerberos Agent web app, you simply have to run the `build` command
|
||||
|
||||
### Machinery
|
||||
|
||||
Building the `machinery` is also super easy 🚀, by using `go build` you can create a single binary which ships it all; thank you Golang. After building you will endup with a binary called `main`, this is what contains everything you need to run Kerberos Agent.
|
||||
Building the `machinery` is also super easy 🚀, by using `go build` you can create a single binary which ships it all; thank you Golang. After building you will end up with a binary called `main`, this is what contains everything you need to run Kerberos Agent.
|
||||
|
||||
Remember the build step of the `web` part, during build time we move the build directory to the `machinery` directory. Inside the `machinery` web server [we reference the](https://github.com/kerberos-io/agent/blob/master/machinery/src/routers/http/Server.go#L44) `build` directory. This makes it possible to just a have single web server that runs it all.
|
||||
|
||||
@@ -386,8 +406,8 @@ Remember the build step of the `web` part, during build time we move the build d
|
||||
|
||||
## Building for Docker
|
||||
|
||||
Inside the root of this `agent` repository, you will find a `Dockerfile`. This file contains the instructions for building and shipping **Kerberos Agent**. Important to note is that start from a prebuild base image, `kerberos/base:xxx`.
|
||||
This base image contains already a couple of tools, such as Golang, FFmpeg and OpenCV. We do this for faster compilation times.
|
||||
Inside the root of this `agent` repository, you will find a `Dockerfile`. This file contains the instructions for building and shipping a **Kerberos Agent**. Important to note is that you start from a prebuilt base image, `kerberos/base:xxx`.
|
||||
This base image already contains a couple of tools, such as Golang, FFmpeg and OpenCV. We do this for faster compilation times.
|
||||
|
||||
By running the `docker build` command, you will create the Kerberos Agent Docker image. After building you can simply run the image as a Docker container.
|
||||
|
||||
@@ -403,7 +423,7 @@ Read more about this [at the FAQ](#faq) below.
|
||||
|
||||
## Contributors
|
||||
|
||||
This project exists thanks to all the people who contribute.
|
||||
This project exists thanks to all the people who contribute. Bravo!
|
||||
|
||||
<a href="https://github.com/kerberos-io/agent/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=kerberos-io/agent" />
|
||||
|
||||
2958
assets/img/edge-deployment-agent.svg
Normal file
2958
assets/img/edge-deployment-agent.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 696 KiB |
10
build.sh
10
build.sh
@@ -1,10 +0,0 @@
|
||||
export version=0.0.1
|
||||
export name=agent
|
||||
|
||||
docker build -t $name .
|
||||
|
||||
docker tag $name kerberos/$name:$version
|
||||
docker push kerberos/$name:$version
|
||||
|
||||
docker tag $name kerberos/$name:latest
|
||||
docker push kerberos/$name:latest
|
||||
@@ -9,7 +9,7 @@ Kerberos Agents are now also shipped as static binaries. Within the Docker image
|
||||
|
||||
You can run the binary as following on port `8080`:
|
||||
|
||||
main run cameraname 8080
|
||||
main -action=run -port=80
|
||||
|
||||
## Systemd
|
||||
|
||||
@@ -18,7 +18,7 @@ When running on a Linux OS you might consider to auto-start the Kerberos Agent u
|
||||
[Unit]
|
||||
Wants=network.target
|
||||
[Service]
|
||||
ExecStart=/home/pi/agent/main run camera 80
|
||||
ExecStart=/home/pi/agent/main -action=run -port=80
|
||||
WorkingDirectory=/home/pi/agent/
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -36,12 +36,12 @@ You attach a volume to your container by leveraging the `-v` option. To mount yo
|
||||
|
||||
docker run -p 80:80 --name mycamera \
|
||||
-v $(pwd)/agent/config:/home/agent/data/config \
|
||||
-v $(pwd)/agent/recordings:/home/agent/data/recordings\
|
||||
-d --restart=alwayskerberos/agent:latest
|
||||
-v $(pwd)/agent/recordings:/home/agent/data/recordings \
|
||||
-d --restart=always kerberos/agent:latest
|
||||
|
||||
### Override with environment variables
|
||||
|
||||
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deployments easier when leveraging `docker compose` or `kubernetes` deployments much easier and scalable. Using this approach we simplify automation through `ansible` and `terraform`. You'll find [the full list of environment variables on the main README.md file](https://github.com/kerberos-io/agent#override-with-environment-variables).
|
||||
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deployments when leveraging `docker compose` or `kubernetes` much easier and more scalable. Using this approach we simplify automation through `ansible` and `terraform`. You'll find [the full list of environment variables on the main README.md file](https://github.com/kerberos-io/agent#override-with-environment-variables).
|
||||
|
||||
### 2. Running multiple containers with Docker compose
|
||||
|
||||
|
||||
@@ -1,35 +1,38 @@
|
||||
version: "3.9"
|
||||
x-common-variables: &common-variables
|
||||
# Add variables here to add them to all agents
|
||||
AGENT_HUB_KEY: "xxxxx" # The access key linked to your account in Kerberos Hub.
|
||||
AGENT_HUB_PRIVATE_KEY: "xxxxx" # The secret access key linked to your account in Kerberos Hub.
|
||||
# find full list of environment variables here: https://github.com/kerberos-io/agent#override-with-environment-variables
|
||||
services:
|
||||
kerberos-agent1:
|
||||
image: "kerberos/agent:latest"
|
||||
ports:
|
||||
- "8081:80"
|
||||
environment:
|
||||
- AGENT_NAME=agent1
|
||||
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
|
||||
- AGENT_HUB_KEY=xxx
|
||||
- AGENT_HUB_PRIVATE_KEY=xxx
|
||||
- AGENT_CAPTURE_CONTINUOUS=true
|
||||
- AGENT_CAPTURE_PRERECORDING=10
|
||||
- AGENT_CAPTURE_POSTRECORDING=10
|
||||
- AGENT_CAPTURE_MAXLENGTH=60
|
||||
- AGENT_CAPTURE_PIXEL_CHANGE=150
|
||||
# find full list of environment variables here: https://github.com/kerberos-io/agent#override-with-environment-variables
|
||||
<<: *common-variables
|
||||
AGENT_NAME: agent1
|
||||
AGENT_CAPTURE_IPCAMERA_RTSP: rtsp://username:password@x.x.x.x/Streaming/Channels/101 # Hikvision camera RTSP url example
|
||||
AGENT_KEY: "1"
|
||||
kerberos-agent2:
|
||||
image: "kerberos/agent:latest"
|
||||
ports:
|
||||
- "8082:80"
|
||||
environment:
|
||||
- AGENT_NAME=agent2
|
||||
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
|
||||
- AGENT_HUB_KEY=yyy
|
||||
- AGENT_HUB_PRIVATE_KEY=yyy
|
||||
<<: *common-variables
|
||||
AGENT_NAME: agent2
|
||||
AGENT_CAPTURE_IPCAMERA_RTSP: rtsp://username:password@x.x.x.x/channel1 # Linksys camera RTSP url example
|
||||
AGENT_KEY: "2"
|
||||
kerberos-agent3:
|
||||
image: "kerberos/agent:latest"
|
||||
ports:
|
||||
- "8083:80"
|
||||
environment:
|
||||
- AGENT_NAME=agent3
|
||||
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
|
||||
- AGENT_HUB_KEY=zzz
|
||||
- AGENT_HUB_PRIVATE_KEY=zzz
|
||||
<<: *common-variables
|
||||
AGENT_NAME: agent3
|
||||
AGENT_CAPTURE_IPCAMERA_RTSP: rtsp://username:password@x.x.x.x/cam/realmonitor?channel=1&subtype=1 # Dahua camera RTSP url example
|
||||
AGENT_KEY: "3"
|
||||
networks:
|
||||
default:
|
||||
name: cluster-net
|
||||
external: true
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: agent
|
||||
image: kerberos/agent:latest
|
||||
image: kerberos/agent:3.2.3
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
@@ -50,4 +50,4 @@ spec:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: agent
|
||||
app: agent
|
||||
|
||||
BIN
machinery/.DS_Store
vendored
Normal file
BIN
machinery/.DS_Store
vendored
Normal file
Binary file not shown.
31
machinery/.env
Normal file
31
machinery/.env
Normal file
@@ -0,0 +1,31 @@
|
||||
AGENT_NAME=camera-name
|
||||
AGENT_KEY=uniq-camera-id
|
||||
AGENT_TIMEZONE=Europe/Brussels
|
||||
#AGENT_CAPTURE_CONTINUOUS=true
|
||||
#AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://fake.kerberos.io/stream
|
||||
#AGENT_CAPTURE_IPCAMERA_SUB_RTSP=rtsp://fake.kerberos.io/stream
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR=x.x.x.x
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME=xxx
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD=xxx
|
||||
AGENT_HUB_URI=https://api.cloud.kerberos.io
|
||||
AGENT_HUB_KEY=AKIXxxx4JBEI
|
||||
AGENT_HUB_PRIVATE_KEY=DIOXxxxAlYpaxxxxXioL0txxx
|
||||
AGENT_HUB_SITE=681xxxxxxx9bcda5
|
||||
|
||||
# By default will send to Hub (=S3), if you wish to send to Kerberos Vault, set to "kstorage"
|
||||
AGENT_CLOUD=s3
|
||||
AGENT_KERBEROSVAULT_URI=
|
||||
AGENT_KERBEROSVAULT_PROVIDER=
|
||||
AGENT_KERBEROSVAULT_DIRECTORY=
|
||||
AGENT_KERBEROSVAULT_ACCESS_KEY=
|
||||
AGENT_KERBEROSVAULT_SECRET_KEY=
|
||||
AGENT_KERBEROSVAULT_MAX_RETRIES=10
|
||||
AGENT_KERBEROSVAULT_TIMEOUT=120
|
||||
AGENT_KERBEROSVAULT_SECONDARY_URI=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_PROVIDER=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_DIRECTORY=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_ACCESS_KEY=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_SECRET_KEY=
|
||||
|
||||
# Open telemetry tracing endpoint
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT=
|
||||
18
machinery/.vscode/launch.json
vendored
18
machinery/.vscode/launch.json
vendored
@@ -1,18 +0,0 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Package",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "main.go",
|
||||
"args": ["-action", "run"],
|
||||
"envFile": "${workspaceFolder}/.env",
|
||||
"buildFlags": "--tags dynamic",
|
||||
},
|
||||
]
|
||||
}
|
||||
@@ -14,7 +14,9 @@
|
||||
"ipcamera": {
|
||||
"rtsp": "",
|
||||
"sub_rtsp": "",
|
||||
"fps": ""
|
||||
"fps": "",
|
||||
"base_width": 640,
|
||||
"base_height": 0
|
||||
},
|
||||
"usbcamera": {
|
||||
"device": ""
|
||||
@@ -26,6 +28,7 @@
|
||||
"recording": "true",
|
||||
"snapshots": "true",
|
||||
"liveview": "true",
|
||||
"liveview_chunking": "false",
|
||||
"motion": "true",
|
||||
"postrecording": 20,
|
||||
"prerecording": 10,
|
||||
@@ -98,19 +101,25 @@
|
||||
"region": "eu-west-1"
|
||||
},
|
||||
"kstorage": {},
|
||||
"kstorage_secondary": {},
|
||||
"dropbox": {},
|
||||
"mqtturi": "tcp://mqtt.kerberos.io:1883",
|
||||
"mqtt_username": "",
|
||||
"mqtt_password": "",
|
||||
"stunuri": "stun:turn.kerberos.io:8443",
|
||||
"turn_force": "false",
|
||||
"turnuri": "turn:turn.kerberos.io:8443",
|
||||
"turn_username": "username1",
|
||||
"turn_password": "password1",
|
||||
"heartbeaturi": "",
|
||||
"hub_encryption": "true",
|
||||
"hub_uri": "https://api.cloud.kerberos.io",
|
||||
"hub_key": "",
|
||||
"hub_private_key": "",
|
||||
"hub_site": "",
|
||||
"condition_uri": "",
|
||||
"encryption": {}
|
||||
"encryption": {},
|
||||
"signing": {},
|
||||
"realtimeprocessing": "false",
|
||||
"realtimeprocessing_topic": ""
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// Package docs GENERATED BY SWAG; DO NOT EDIT
|
||||
// This file was generated by swaggo/swag
|
||||
// Package docs Code generated by swaggo/swag. DO NOT EDIT
|
||||
package docs
|
||||
|
||||
import "github.com/swaggo/swag"
|
||||
@@ -279,6 +278,40 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/camera/onvif/verify": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Will verify the ONVIF connectivity.",
|
||||
"tags": [
|
||||
"onvif"
|
||||
],
|
||||
"summary": "Will verify the ONVIF connectivity.",
|
||||
"operationId": "verify-onvif",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "OnvifCredentials",
|
||||
"name": "config",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/models.OnvifCredentials"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/models.APIResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/camera/onvif/zoom": {
|
||||
"post": {
|
||||
"description": "Zooming in or out the camera.",
|
||||
@@ -354,7 +387,7 @@ const docTemplate = `{
|
||||
"operationId": "snapshot-base64",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -369,7 +402,7 @@ const docTemplate = `{
|
||||
"operationId": "snapshot-jpeg",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -442,7 +475,7 @@ const docTemplate = `{
|
||||
"operationId": "config",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -466,7 +499,7 @@ const docTemplate = `{
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -481,7 +514,7 @@ const docTemplate = `{
|
||||
"operationId": "dashboard",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -496,7 +529,7 @@ const docTemplate = `{
|
||||
"operationId": "days",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -556,7 +589,7 @@ const docTemplate = `{
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -590,40 +623,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/onvif/verify": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Will verify the ONVIF connectivity.",
|
||||
"tags": [
|
||||
"onvif"
|
||||
],
|
||||
"summary": "Will verify the ONVIF connectivity.",
|
||||
"operationId": "verify-onvif",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "OnvifCredentials",
|
||||
"name": "config",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/models.OnvifCredentials"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/models.APIResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/persistence/verify": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -803,6 +802,9 @@ const docTemplate = `{
|
||||
"description": "obsolete",
|
||||
"type": "string"
|
||||
},
|
||||
"hub_encryption": {
|
||||
"type": "string"
|
||||
},
|
||||
"hub_key": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -839,6 +841,12 @@ const docTemplate = `{
|
||||
"offline": {
|
||||
"type": "string"
|
||||
},
|
||||
"realtimeprocessing": {
|
||||
"type": "string"
|
||||
},
|
||||
"realtimeprocessing_topic": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"$ref": "#/definitions/models.Region"
|
||||
},
|
||||
@@ -863,6 +871,9 @@ const docTemplate = `{
|
||||
"timezone": {
|
||||
"type": "string"
|
||||
},
|
||||
"turn_force": {
|
||||
"type": "string"
|
||||
},
|
||||
"turn_password": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -957,9 +968,18 @@ const docTemplate = `{
|
||||
"rtsp": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_fps": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_height": {
|
||||
"type": "integer"
|
||||
},
|
||||
"sub_rtsp": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_width": {
|
||||
"type": "integer"
|
||||
},
|
||||
"width": {
|
||||
"type": "integer"
|
||||
}
|
||||
@@ -1166,6 +1186,8 @@ var SwaggerInfo = &swag.Spec{
|
||||
Description: "This is the API for using and configure Kerberos Agent.",
|
||||
InfoInstanceName: "swagger",
|
||||
SwaggerTemplate: docTemplate,
|
||||
LeftDelim: "{{",
|
||||
RightDelim: "}}",
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -271,6 +271,40 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/camera/onvif/verify": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Will verify the ONVIF connectivity.",
|
||||
"tags": [
|
||||
"onvif"
|
||||
],
|
||||
"summary": "Will verify the ONVIF connectivity.",
|
||||
"operationId": "verify-onvif",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "OnvifCredentials",
|
||||
"name": "config",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/models.OnvifCredentials"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/models.APIResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/camera/onvif/zoom": {
|
||||
"post": {
|
||||
"description": "Zooming in or out the camera.",
|
||||
@@ -346,7 +380,7 @@
|
||||
"operationId": "snapshot-base64",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -361,7 +395,7 @@
|
||||
"operationId": "snapshot-jpeg",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -434,7 +468,7 @@
|
||||
"operationId": "config",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -458,7 +492,7 @@
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -473,7 +507,7 @@
|
||||
"operationId": "dashboard",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -488,7 +522,7 @@
|
||||
"operationId": "days",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -548,7 +582,7 @@
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": ""
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -582,40 +616,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/onvif/verify": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Will verify the ONVIF connectivity.",
|
||||
"tags": [
|
||||
"onvif"
|
||||
],
|
||||
"summary": "Will verify the ONVIF connectivity.",
|
||||
"operationId": "verify-onvif",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "OnvifCredentials",
|
||||
"name": "config",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/models.OnvifCredentials"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/models.APIResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/persistence/verify": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -795,6 +795,9 @@
|
||||
"description": "obsolete",
|
||||
"type": "string"
|
||||
},
|
||||
"hub_encryption": {
|
||||
"type": "string"
|
||||
},
|
||||
"hub_key": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -831,6 +834,12 @@
|
||||
"offline": {
|
||||
"type": "string"
|
||||
},
|
||||
"realtimeprocessing": {
|
||||
"type": "string"
|
||||
},
|
||||
"realtimeprocessing_topic": {
|
||||
"type": "string"
|
||||
},
|
||||
"region": {
|
||||
"$ref": "#/definitions/models.Region"
|
||||
},
|
||||
@@ -855,6 +864,9 @@
|
||||
"timezone": {
|
||||
"type": "string"
|
||||
},
|
||||
"turn_force": {
|
||||
"type": "string"
|
||||
},
|
||||
"turn_password": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -949,9 +961,18 @@
|
||||
"rtsp": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_fps": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_height": {
|
||||
"type": "integer"
|
||||
},
|
||||
"sub_rtsp": {
|
||||
"type": "string"
|
||||
},
|
||||
"sub_width": {
|
||||
"type": "integer"
|
||||
},
|
||||
"width": {
|
||||
"type": "integer"
|
||||
}
|
||||
|
||||
@@ -95,6 +95,8 @@ definitions:
|
||||
heartbeaturi:
|
||||
description: obsolete
|
||||
type: string
|
||||
hub_encryption:
|
||||
type: string
|
||||
hub_key:
|
||||
type: string
|
||||
hub_private_key:
|
||||
@@ -119,6 +121,10 @@ definitions:
|
||||
type: string
|
||||
offline:
|
||||
type: string
|
||||
realtimeprocessing:
|
||||
type: string
|
||||
realtimeprocessing_topic:
|
||||
type: string
|
||||
region:
|
||||
$ref: '#/definitions/models.Region'
|
||||
remove_after_upload:
|
||||
@@ -135,6 +141,8 @@ definitions:
|
||||
type: array
|
||||
timezone:
|
||||
type: string
|
||||
turn_force:
|
||||
type: string
|
||||
turn_password:
|
||||
type: string
|
||||
turn_username:
|
||||
@@ -196,8 +204,14 @@ definitions:
|
||||
type: string
|
||||
rtsp:
|
||||
type: string
|
||||
sub_fps:
|
||||
type: string
|
||||
sub_height:
|
||||
type: integer
|
||||
sub_rtsp:
|
||||
type: string
|
||||
sub_width:
|
||||
type: integer
|
||||
width:
|
||||
type: integer
|
||||
type: object
|
||||
@@ -494,6 +508,27 @@ paths:
|
||||
summary: Will return the ONVIF presets for the specific camera.
|
||||
tags:
|
||||
- onvif
|
||||
/api/camera/onvif/verify:
|
||||
post:
|
||||
description: Will verify the ONVIF connectivity.
|
||||
operationId: verify-onvif
|
||||
parameters:
|
||||
- description: OnvifCredentials
|
||||
in: body
|
||||
name: config
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/models.OnvifCredentials'
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/models.APIResponse'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Will verify the ONVIF connectivity.
|
||||
tags:
|
||||
- onvif
|
||||
/api/camera/onvif/zoom:
|
||||
post:
|
||||
description: Zooming in or out the camera.
|
||||
@@ -543,7 +578,7 @@ paths:
|
||||
operationId: snapshot-base64
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get a snapshot from the camera in base64.
|
||||
tags:
|
||||
- camera
|
||||
@@ -553,7 +588,7 @@ paths:
|
||||
operationId: snapshot-jpeg
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get a snapshot from the camera in jpeg format.
|
||||
tags:
|
||||
- camera
|
||||
@@ -603,7 +638,7 @@ paths:
|
||||
operationId: config
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get the current configuration.
|
||||
tags:
|
||||
- config
|
||||
@@ -619,7 +654,7 @@ paths:
|
||||
$ref: '#/definitions/models.Config'
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Update the current configuration.
|
||||
tags:
|
||||
- config
|
||||
@@ -629,7 +664,7 @@ paths:
|
||||
operationId: dashboard
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get all information showed on the dashboard.
|
||||
tags:
|
||||
- general
|
||||
@@ -639,7 +674,7 @@ paths:
|
||||
operationId: days
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get all days stored in the recordings directory.
|
||||
tags:
|
||||
- general
|
||||
@@ -677,7 +712,7 @@ paths:
|
||||
$ref: '#/definitions/models.EventFilter'
|
||||
responses:
|
||||
"200":
|
||||
description: ""
|
||||
description: OK
|
||||
summary: Get the latest recordings (events) from the recordings directory.
|
||||
tags:
|
||||
- general
|
||||
@@ -700,27 +735,6 @@ paths:
|
||||
summary: Get Authorization token.
|
||||
tags:
|
||||
- authentication
|
||||
/api/onvif/verify:
|
||||
post:
|
||||
description: Will verify the ONVIF connectivity.
|
||||
operationId: verify-onvif
|
||||
parameters:
|
||||
- description: OnvifCredentials
|
||||
in: body
|
||||
name: config
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/models.OnvifCredentials'
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/models.APIResponse'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Will verify the ONVIF connectivity.
|
||||
tags:
|
||||
- onvif
|
||||
/api/persistence/verify:
|
||||
post:
|
||||
description: Will verify the persistence.
|
||||
|
||||
194
machinery/go.mod
194
machinery/go.mod
@@ -1,152 +1,140 @@
|
||||
module github.com/kerberos-io/agent/machinery
|
||||
|
||||
go 1.20
|
||||
go 1.24.2
|
||||
|
||||
//replace github.com/kerberos-io/joy4 v1.0.63 => ../../../../github.com/kerberos-io/joy4
|
||||
|
||||
//replace github.com/kerberos-io/onvif v0.0.10 => ../../../../github.com/kerberos-io/onvif
|
||||
replace google.golang.org/genproto => google.golang.org/genproto v0.0.0-20250519155744-55703ea1f237
|
||||
|
||||
require (
|
||||
github.com/Eyevinn/mp4ff v0.48.0
|
||||
github.com/InVisionApp/conjungo v1.1.0
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1
|
||||
github.com/bluenviron/gortsplib/v4 v4.6.1
|
||||
github.com/bluenviron/mediacommon v1.5.1
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.3
|
||||
github.com/bluenviron/gortsplib/v4 v4.14.1
|
||||
github.com/bluenviron/mediacommon v1.14.0
|
||||
github.com/cedricve/go-onvif v0.0.0-20200222191200-567e8ce298f6
|
||||
github.com/dromara/carbon/v2 v2.6.8
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/eclipse/paho.mqtt.golang v1.4.2
|
||||
github.com/elastic/go-sysinfo v1.9.0
|
||||
github.com/gin-contrib/cors v1.4.0
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/contrib v0.0.0-20221130124618-7e01895a63f2
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/eclipse/paho.mqtt.golang v1.5.0
|
||||
github.com/elastic/go-sysinfo v1.15.3
|
||||
github.com/gin-contrib/cors v1.7.5
|
||||
github.com/gin-contrib/pprof v1.5.3
|
||||
github.com/gin-gonic/contrib v0.0.0-20250521004450-2b1292699c15
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/gofrs/uuid v4.4.0+incompatible
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3
|
||||
github.com/golang-module/carbon/v2 v2.2.3
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/kellydunn/golang-geo v0.7.0
|
||||
github.com/kerberos-io/joy4 v1.0.64
|
||||
github.com/kerberos-io/onvif v0.0.14
|
||||
github.com/kerberos-io/onvif v1.0.0
|
||||
github.com/minio/minio-go/v6 v6.0.57
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/pion/rtp v1.8.3
|
||||
github.com/pion/webrtc/v3 v3.1.50
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/swaggo/files v1.0.0
|
||||
github.com/swaggo/gin-swagger v1.5.3
|
||||
github.com/swaggo/swag v1.8.9
|
||||
github.com/pion/interceptor v0.1.40
|
||||
github.com/pion/rtp v1.8.19
|
||||
github.com/pion/webrtc/v4 v4.1.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/swaggo/files v1.0.1
|
||||
github.com/swaggo/gin-swagger v1.6.0
|
||||
github.com/swaggo/swag v1.16.4
|
||||
github.com/tevino/abool v1.2.0
|
||||
github.com/yapingcat/gomedia v0.0.0-20231203152327-9078d4068ce7
|
||||
github.com/zaf/g711 v0.0.0-20220109202201-cf0017bf0359
|
||||
go.mongodb.org/mongo-driver v1.7.5
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.46.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||
github.com/zaf/g711 v1.4.0
|
||||
go.mongodb.org/mongo-driver v1.17.3
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.0.0-20211129110424-6491aa3bf583 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0-rc.1 // indirect
|
||||
github.com/DataDog/datadog-go v4.8.2+incompatible // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.0.2 // indirect
|
||||
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
|
||||
github.com/DataDog/gostackparse v0.5.0 // indirect
|
||||
github.com/DataDog/sketches-go v1.2.1 // indirect
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beevik/etree v1.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/bluenviron/mediacommon/v2 v2.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.13.2 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/clbanning/mxj v1.8.4 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.7.0 // indirect
|
||||
github.com/dgraph-io/ristretto v0.1.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/elastic/go-windows v1.0.0 // indirect
|
||||
github.com/elgs/gostrgen v0.0.0-20220325073726-0c3e00d082f6 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/elastic/go-windows v1.0.2 // indirect
|
||||
github.com/elgs/gostrgen v0.0.0-20161222160715-9d61ae07eeae // indirect
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/gin-contrib/sse v1.0.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/spec v0.20.4 // indirect
|
||||
github.com/go-openapi/swag v0.19.15 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/go-stack/stack v1.8.0 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/pprof v0.0.0-20210423192551-a2663126120b // indirect
|
||||
github.com/google/uuid v1.4.0 // indirect
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/icholy/digest v0.1.23 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.0 // indirect
|
||||
github.com/juju/errors v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/klauspost/cpuid v1.2.3 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/kylelemons/go-gypsy v1.0.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.0 // indirect
|
||||
github.com/minio/sha256-simd v0.1.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/onsi/gomega v1.27.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/philhofer/fwd v1.1.1 // indirect
|
||||
github.com/pion/datachannel v1.5.5 // indirect
|
||||
github.com/pion/dtls/v2 v2.1.5 // indirect
|
||||
github.com/pion/ice/v2 v2.2.12 // indirect
|
||||
github.com/pion/interceptor v0.1.11 // indirect
|
||||
github.com/pion/logging v0.2.2 // indirect
|
||||
github.com/pion/mdns v0.0.5 // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/logging v0.2.3 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.12 // indirect
|
||||
github.com/pion/sctp v1.8.5 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.6 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.10 // indirect
|
||||
github.com/pion/stun v0.3.5 // indirect
|
||||
github.com/pion/transport v0.14.1 // indirect
|
||||
github.com/pion/turn/v2 v2.0.8 // indirect
|
||||
github.com/pion/udp v0.1.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/tinylib/msgp v1.1.6 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.5 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.0.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.2 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
github.com/ziutek/mymysql v1.5.4 // indirect
|
||||
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/net v0.19.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.15.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.6 // indirect
|
||||
google.golang.org/grpc v1.32.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
golang.org/x/arch v0.16.0 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/grpc v1.72.1 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/ini.v1 v1.42.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
inet.af/netaddr v0.0.0-20220617031823-097006376321 // indirect
|
||||
)
|
||||
|
||||
1948
machinery/go.sum
1948
machinery/go.sum
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -11,47 +12,61 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/onvif"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
|
||||
configService "github.com/kerberos-io/agent/machinery/src/config"
|
||||
"github.com/kerberos-io/agent/machinery/src/routers"
|
||||
"github.com/kerberos-io/agent/machinery/src/utils"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/profiler"
|
||||
)
|
||||
|
||||
var VERSION = "3.0.0"
|
||||
var VERSION = utils.VERSION
|
||||
|
||||
func startTracing(agentKey string, otelEndpoint string) (*trace.TracerProvider, error) {
|
||||
serviceName := "agent-" + agentKey
|
||||
headers := map[string]string{
|
||||
"content-type": "application/json",
|
||||
}
|
||||
|
||||
exporter, err := otlptrace.New(
|
||||
context.Background(),
|
||||
otlptracehttp.NewClient(
|
||||
otlptracehttp.WithEndpoint(otelEndpoint),
|
||||
otlptracehttp.WithHeaders(headers),
|
||||
otlptracehttp.WithInsecure(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating new exporter: %w", err)
|
||||
}
|
||||
|
||||
tracerprovider := trace.NewTracerProvider(
|
||||
trace.WithBatcher(
|
||||
exporter,
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
trace.WithBatchTimeout(trace.DefaultScheduleDelay*time.Millisecond),
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
),
|
||||
trace.WithResource(
|
||||
resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String(serviceName),
|
||||
attribute.String("environment", "develop"),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
otel.SetTracerProvider(tracerprovider)
|
||||
|
||||
return tracerprovider, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
// You might be interested in debugging the agent.
|
||||
if os.Getenv("DATADOG_AGENT_ENABLED") == "true" {
|
||||
if os.Getenv("DATADOG_AGENT_K8S_ENABLED") == "true" {
|
||||
tracer.Start()
|
||||
defer tracer.Stop()
|
||||
} else {
|
||||
service := os.Getenv("DATADOG_AGENT_SERVICE")
|
||||
environment := os.Getenv("DATADOG_AGENT_ENVIRONMENT")
|
||||
log.Log.Info("Starting Datadog Agent with service: " + service + " and environment: " + environment)
|
||||
rules := []tracer.SamplingRule{tracer.RateRule(1)}
|
||||
tracer.Start(
|
||||
tracer.WithSamplingRules(rules),
|
||||
tracer.WithService(service),
|
||||
tracer.WithEnv(environment),
|
||||
)
|
||||
defer tracer.Stop()
|
||||
err := profiler.Start(
|
||||
profiler.WithService(service),
|
||||
profiler.WithEnv(environment),
|
||||
profiler.WithProfileTypes(
|
||||
profiler.CPUProfile,
|
||||
profiler.HeapProfile,
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
log.Log.Fatal(err.Error())
|
||||
}
|
||||
defer profiler.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// Start the show ;)
|
||||
// We'll parse the flags (named variables), and start the agent.
|
||||
@@ -86,35 +101,39 @@ func main() {
|
||||
switch action {
|
||||
|
||||
case "version":
|
||||
log.Log.Info("main.Main(): You are currrently running Kerberos Agent " + VERSION)
|
||||
|
||||
{
|
||||
log.Log.Info("main.Main(): You are currrently running Kerberos Agent " + VERSION)
|
||||
}
|
||||
case "discover":
|
||||
// Convert duration to int
|
||||
timeout, err := time.ParseDuration(timeout + "ms")
|
||||
if err != nil {
|
||||
log.Log.Fatal("main.Main(): could not parse timeout: " + err.Error())
|
||||
return
|
||||
{
|
||||
// Convert duration to int
|
||||
timeout, err := time.ParseDuration(timeout + "ms")
|
||||
if err != nil {
|
||||
log.Log.Fatal("main.Main(): could not parse timeout: " + err.Error())
|
||||
return
|
||||
}
|
||||
onvif.Discover(timeout)
|
||||
}
|
||||
onvif.Discover(timeout)
|
||||
|
||||
case "decrypt":
|
||||
log.Log.Info("main.Main(): Decrypting: " + flag.Arg(0) + " with key: " + flag.Arg(1))
|
||||
symmetricKey := []byte(flag.Arg(1))
|
||||
{
|
||||
log.Log.Info("main.Main(): Decrypting: " + flag.Arg(0) + " with key: " + flag.Arg(1))
|
||||
symmetricKey := []byte(flag.Arg(1))
|
||||
|
||||
if symmetricKey == nil || len(symmetricKey) == 0 {
|
||||
log.Log.Fatal("main.Main(): symmetric key should not be empty")
|
||||
return
|
||||
}
|
||||
if len(symmetricKey) != 32 {
|
||||
log.Log.Fatal("main.Main(): symmetric key should be 32 bytes")
|
||||
return
|
||||
}
|
||||
if len(symmetricKey) == 0 {
|
||||
log.Log.Fatal("main.Main(): symmetric key should not be empty")
|
||||
return
|
||||
}
|
||||
if len(symmetricKey) != 32 {
|
||||
log.Log.Fatal("main.Main(): symmetric key should be 32 bytes")
|
||||
return
|
||||
}
|
||||
|
||||
utils.Decrypt(flag.Arg(0), symmetricKey)
|
||||
utils.Decrypt(flag.Arg(0), symmetricKey)
|
||||
}
|
||||
|
||||
case "run":
|
||||
{
|
||||
// Print Kerberos.io ASCII art
|
||||
// Print Agent ASCII art
|
||||
utils.PrintASCIIArt()
|
||||
|
||||
// Print the environment variables which include "AGENT_" as prefix.
|
||||
@@ -127,12 +146,29 @@ func main() {
|
||||
configuration.Name = name
|
||||
configuration.Port = port
|
||||
|
||||
// Open this configuration either from Kerberos Agent or Kerberos Factory.
|
||||
// Open this configuration either from Agent or Factory.
|
||||
configService.OpenConfig(configDirectory, &configuration)
|
||||
|
||||
// We will override the configuration with the environment variables
|
||||
configService.OverrideWithEnvironmentVariables(&configuration)
|
||||
|
||||
// Start OpenTelemetry tracing
|
||||
if otelEndpoint := os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT"); otelEndpoint == "" {
|
||||
log.Log.Info("main.Main(): No OpenTelemetry endpoint provided, skipping tracing")
|
||||
} else {
|
||||
log.Log.Info("main.Main(): Starting OpenTelemetry tracing with endpoint: " + otelEndpoint)
|
||||
agentKey := configuration.Config.Key
|
||||
traceProvider, err := startTracing(agentKey, otelEndpoint)
|
||||
if err != nil {
|
||||
log.Log.Error("traceprovider: " + err.Error())
|
||||
}
|
||||
defer func() {
|
||||
if err := traceProvider.Shutdown(context.Background()); err != nil {
|
||||
log.Log.Error("traceprovider: " + err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Printing final configuration
|
||||
utils.PrintConfiguration(&configuration)
|
||||
|
||||
@@ -175,12 +211,14 @@ func main() {
|
||||
HandleBootstrap: make(chan string, 1),
|
||||
}
|
||||
|
||||
go components.Bootstrap(configDirectory, &configuration, &communication, &capture)
|
||||
go components.Bootstrap(ctx, configDirectory, &configuration, &communication, &capture)
|
||||
|
||||
// Start the REST API.
|
||||
routers.StartWebserver(configDirectory, &configuration, &communication, &capture)
|
||||
}
|
||||
default:
|
||||
log.Log.Error("main.Main(): Sorry I don't understand :(")
|
||||
{
|
||||
log.Log.Error("main.Main(): Sorry I don't understand :(")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,16 +38,16 @@ func (c *Capture) SetBackChannelClient(rtspUrl string) *Golibrtsp {
|
||||
// RTSPClient is a interface that abstracts the RTSP client implementation.
|
||||
type RTSPClient interface {
|
||||
// Connect to the RTSP server.
|
||||
Connect(ctx context.Context) error
|
||||
Connect(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Connect to a backchannel RTSP server.
|
||||
ConnectBackChannel(ctx context.Context) error
|
||||
ConnectBackChannel(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
Start(ctx context.Context, queue *packets.Queue, configuration *models.Configuration, communication *models.Communication) error
|
||||
Start(ctx context.Context, streamType string, queue *packets.Queue, configuration *models.Configuration, communication *models.Communication) error
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
StartBackChannel(ctx context.Context) (err error)
|
||||
StartBackChannel(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Decode a packet into a image.
|
||||
DecodePacket(pkt packets.Packet) (image.YCbCr, error)
|
||||
@@ -59,7 +59,7 @@ type RTSPClient interface {
|
||||
WritePacket(pkt packets.Packet) error
|
||||
|
||||
// Close the connection to the RTSP server.
|
||||
Close() error
|
||||
Close(ctx context.Context) error
|
||||
|
||||
// Get a list of streams from the RTSP server.
|
||||
GetStreams() ([]packets.Stream, error)
|
||||
|
||||
@@ -33,8 +33,11 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/packets"
|
||||
"github.com/pion/rtp"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
var tracer = otel.Tracer("github.com/kerberos-io/agent/machinery/src/capture")
|
||||
|
||||
// Implements the RTSPClient interface.
|
||||
type Golibrtsp struct {
|
||||
RTSPClient
|
||||
@@ -63,7 +66,12 @@ type Golibrtsp struct {
|
||||
AudioG711Index int8
|
||||
AudioG711Media *description.Media
|
||||
AudioG711Forma *format.G711
|
||||
AudioG711Decoder *rtpsimpleaudio.Decoder
|
||||
AudioG711Decoder *rtplpcm.Decoder
|
||||
|
||||
AudioOpusIndex int8
|
||||
AudioOpusMedia *description.Media
|
||||
AudioOpusForma *format.Opus
|
||||
AudioOpusDecoder *rtpsimpleaudio.Decoder
|
||||
|
||||
HasBackChannel bool
|
||||
AudioG711IndexBackChannel int8
|
||||
@@ -76,10 +84,115 @@ type Golibrtsp struct {
|
||||
AudioMPEG4Decoder *rtpmpeg4audio.Decoder
|
||||
|
||||
Streams []packets.Stream
|
||||
|
||||
// Per-stream FPS calculation (keyed by stream index)
|
||||
fpsTrackers map[int8]*fpsTracker
|
||||
|
||||
// I-frame interval tracking fields
|
||||
packetsSinceLastKeyframe int
|
||||
lastKeyframePacketCount int
|
||||
keyframeIntervals []int
|
||||
keyframeBufferSize int
|
||||
keyframeBufferIndex int
|
||||
keyframeMutex sync.Mutex
|
||||
}
|
||||
|
||||
// fpsTracker holds per-stream state for PTS-based FPS calculation.
|
||||
// Each video stream (H264 / H265) gets its own tracker so PTS
|
||||
// samples from different codecs never interleave.
|
||||
type fpsTracker struct {
|
||||
mu sync.Mutex
|
||||
lastPTS time.Duration
|
||||
hasPTS bool
|
||||
frameTimeBuffer []time.Duration
|
||||
bufferSize int
|
||||
bufferIndex int
|
||||
cachedFPS float64 // latest computed FPS
|
||||
}
|
||||
|
||||
func newFPSTracker(bufferSize int) *fpsTracker {
|
||||
return &fpsTracker{
|
||||
frameTimeBuffer: make([]time.Duration, bufferSize),
|
||||
bufferSize: bufferSize,
|
||||
}
|
||||
}
|
||||
|
||||
// update records a new PTS sample and returns the latest FPS estimate.
|
||||
// It must be called once per complete decoded frame (after Decode()
|
||||
// succeeds), not on every RTP packet fragment.
|
||||
func (ft *fpsTracker) update(pts time.Duration) float64 {
|
||||
ft.mu.Lock()
|
||||
defer ft.mu.Unlock()
|
||||
|
||||
if !ft.hasPTS {
|
||||
ft.lastPTS = pts
|
||||
ft.hasPTS = true
|
||||
return 0
|
||||
}
|
||||
|
||||
interval := pts - ft.lastPTS
|
||||
ft.lastPTS = pts
|
||||
|
||||
// Skip invalid intervals (zero, negative, or very large which
|
||||
// indicate a PTS discontinuity or wrap).
|
||||
if interval <= 0 || interval > 5*time.Second {
|
||||
return ft.cachedFPS
|
||||
}
|
||||
|
||||
ft.frameTimeBuffer[ft.bufferIndex] = interval
|
||||
ft.bufferIndex = (ft.bufferIndex + 1) % ft.bufferSize
|
||||
|
||||
var totalInterval time.Duration
|
||||
validSamples := 0
|
||||
for _, iv := range ft.frameTimeBuffer {
|
||||
if iv > 0 {
|
||||
totalInterval += iv
|
||||
validSamples++
|
||||
}
|
||||
}
|
||||
if validSamples == 0 {
|
||||
return ft.cachedFPS
|
||||
}
|
||||
avgInterval := totalInterval / time.Duration(validSamples)
|
||||
if avgInterval == 0 {
|
||||
return ft.cachedFPS
|
||||
}
|
||||
|
||||
ft.cachedFPS = float64(time.Second) / float64(avgInterval)
|
||||
return ft.cachedFPS
|
||||
}
|
||||
|
||||
// fps returns the most recent FPS estimate without recording a new sample.
|
||||
func (ft *fpsTracker) fps() float64 {
|
||||
ft.mu.Lock()
|
||||
defer ft.mu.Unlock()
|
||||
return ft.cachedFPS
|
||||
}
|
||||
|
||||
// Init function
|
||||
var H264FrameDecoder *Decoder
|
||||
var H265FrameDecoder *Decoder
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
// setup H264 -> raw frames decoder
|
||||
H264FrameDecoder, err = newDecoder("H264")
|
||||
if err != nil {
|
||||
log.Log.Error("capture.golibrtsp.init(): " + err.Error())
|
||||
}
|
||||
|
||||
// setup H265 -> raw frames decoder
|
||||
H265FrameDecoder, err = newDecoder("H265")
|
||||
if err != nil {
|
||||
log.Log.Error("capture.golibrtsp.init(): " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Connect to the RTSP server.
|
||||
func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) Connect(ctx context.Context, ctxOtel context.Context) (err error) {
|
||||
|
||||
_, span := tracer.Start(ctxOtel, "Connect")
|
||||
defer span.End()
|
||||
|
||||
transport := gortsplib.TransportTCP
|
||||
g.Client = gortsplib.Client{
|
||||
@@ -107,8 +220,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Iniatlise the mutex.
|
||||
// Initialize the mutex and FPS calculation.
|
||||
g.VideoDecoderMutex = &sync.Mutex{}
|
||||
g.initFPSCalculation()
|
||||
|
||||
// find the H264 media and format
|
||||
var formaH264 *format.H264
|
||||
@@ -124,43 +238,53 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(H264): " + err.Error())
|
||||
} else {
|
||||
// Get SPS from the SDP
|
||||
// Get SPS and PPS from the SDP
|
||||
// Calculate the width and height of the video
|
||||
var sps h264.SPS
|
||||
err = sps.Unmarshal(formaH264.SPS)
|
||||
if err != nil {
|
||||
log.Log.Debug("capture.golibrtsp.Connect(H264): " + err.Error())
|
||||
return
|
||||
errSPS := sps.Unmarshal(formaH264.SPS)
|
||||
// It might be that the SPS is not available yet, so we'll proceed,
|
||||
// but try to fetch it later on.
|
||||
if errSPS != nil {
|
||||
log.Log.Debug("capture.golibrtsp.Connect(H264): " + errSPS.Error())
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH264.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
SPS: []byte{},
|
||||
PPS: []byte{},
|
||||
Width: 0,
|
||||
Height: 0,
|
||||
FPS: 0,
|
||||
IsBackChannel: false,
|
||||
})
|
||||
} else {
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH264.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
SPS: formaH264.SPS,
|
||||
PPS: formaH264.PPS,
|
||||
Width: sps.Width(),
|
||||
Height: sps.Height(),
|
||||
FPS: sps.FPS(),
|
||||
IsBackChannel: false,
|
||||
})
|
||||
}
|
||||
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Name: formaH264.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
SPS: formaH264.SPS,
|
||||
PPS: formaH264.PPS,
|
||||
Width: sps.Width(),
|
||||
Height: sps.Height(),
|
||||
FPS: sps.FPS(),
|
||||
IsBackChannel: false,
|
||||
})
|
||||
|
||||
// Set the index for the video
|
||||
g.VideoH264Index = int8(len(g.Streams)) - 1
|
||||
|
||||
// setup RTP/H264 -> H264 decoder
|
||||
rtpDec, err := formaH264.CreateDecoder()
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(H264): " + err.Error())
|
||||
}
|
||||
g.VideoH264Decoder = rtpDec
|
||||
|
||||
// setup H264 -> raw frames decoder
|
||||
frameDec, err := newDecoder("H264")
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
}
|
||||
g.VideoH264FrameDecoder = frameDec
|
||||
g.VideoH264FrameDecoder = H264FrameDecoder
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,8 +310,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
log.Log.Info("capture.golibrtsp.Connect(H265): " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH265.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
@@ -206,16 +331,11 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
// setup RTP/H265 -> H265 decoder
|
||||
rtpDec, err := formaH265.CreateDecoder()
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(H265): " + err.Error())
|
||||
}
|
||||
g.VideoH265Decoder = rtpDec
|
||||
|
||||
// setup H265 -> raw frames decoder
|
||||
frameDec, err := newDecoder("H265")
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
}
|
||||
g.VideoH265FrameDecoder = frameDec
|
||||
g.VideoH265FrameDecoder = H265FrameDecoder
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,8 +360,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
log.Log.Error("capture.golibrtsp.Connect(G711): " + err.Error())
|
||||
} else {
|
||||
g.AudioG711Decoder = audiortpDec
|
||||
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "PCM_MULAW",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -254,6 +375,42 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Look for audio stream.
|
||||
// find the Opus media and format
|
||||
audioFormaOpus, audioMediOpus := FindOPUS(desc, false)
|
||||
g.AudioOpusMedia = audioMediOpus
|
||||
g.AudioOpusForma = audioFormaOpus
|
||||
if audioMediOpus == nil {
|
||||
log.Log.Debug("capture.golibrtsp.Connect(Opus): " + "audio media not found")
|
||||
} else {
|
||||
// setup a audio media
|
||||
_, err = g.Client.Setup(desc.BaseURL, audioMediOpus, 0, 0)
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(Opus): " + err.Error())
|
||||
} else {
|
||||
// create decoder
|
||||
audiortpDec, err := audioFormaOpus.CreateDecoder()
|
||||
if err != nil {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(Opus): " + err.Error())
|
||||
} else {
|
||||
g.AudioOpusDecoder = audiortpDec
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "OPUS",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
IsBackChannel: false,
|
||||
})
|
||||
|
||||
// Set the index for the audio
|
||||
g.AudioOpusIndex = int8(len(g.Streams)) - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look for audio stream.
|
||||
// find the AAC media and format
|
||||
audioFormaMPEG4, audioMediMPEG4 := FindMPEG4Audio(desc, false)
|
||||
@@ -268,11 +425,15 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(MPEG4): " + err.Error())
|
||||
} else {
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "AAC",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
IsBackChannel: false,
|
||||
SampleRate: audioFormaMPEG4.Config.SampleRate,
|
||||
Channels: audioFormaMPEG4.Config.ChannelCount,
|
||||
})
|
||||
|
||||
// Set the index for the audio
|
||||
@@ -292,7 +453,11 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (g *Golibrtsp) ConnectBackChannel(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) ConnectBackChannel(ctx context.Context, ctxRunAgent context.Context) (err error) {
|
||||
|
||||
_, span := tracer.Start(ctxRunAgent, "ConnectBackChannel")
|
||||
defer span.End()
|
||||
|
||||
// Transport TCP
|
||||
transport := gortsplib.TransportTCP
|
||||
g.Client = gortsplib.Client{
|
||||
@@ -337,7 +502,9 @@ func (g *Golibrtsp) ConnectBackChannel(ctx context.Context) (err error) {
|
||||
g.HasBackChannel = false
|
||||
} else {
|
||||
g.HasBackChannel = true
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "PCM_MULAW",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -351,16 +518,17 @@ func (g *Golibrtsp) ConnectBackChannel(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configuration *models.Configuration, communication *models.Communication) (err error) {
|
||||
func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets.Queue, configuration *models.Configuration, communication *models.Communication) (err error) {
|
||||
log.Log.Debug("capture.golibrtsp.Start(): started")
|
||||
|
||||
// called when a MULAW audio RTP packet arrives
|
||||
if g.AudioG711Media != nil && g.AudioG711Forma != nil {
|
||||
g.Client.OnPacketRTP(g.AudioG711Media, g.AudioG711Forma, func(rtppkt *rtp.Packet) {
|
||||
// decode timestamp
|
||||
pts, ok := g.Client.PacketPTS(g.AudioG711Media, rtppkt)
|
||||
// decode timestamp
|
||||
pts2, ok := g.Client.PacketPTS2(g.AudioG711Media, rtppkt)
|
||||
if !ok {
|
||||
log.Log.Warning("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
log.Log.Debug("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -375,8 +543,10 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
IsKeyFrame: false,
|
||||
Packet: rtppkt,
|
||||
Data: op,
|
||||
Time: pts,
|
||||
CompositionTime: pts,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CompositionTime: pts2,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
Idx: g.AudioG711Index,
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -391,6 +561,7 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
g.Client.OnPacketRTP(g.AudioMPEG4Media, g.AudioMPEG4Forma, func(rtppkt *rtp.Packet) {
|
||||
// decode timestamp
|
||||
pts, ok := g.Client.PacketPTS(g.AudioMPEG4Media, rtppkt)
|
||||
pts2, ok := g.Client.PacketPTS2(g.AudioMPEG4Media, rtppkt)
|
||||
if !ok {
|
||||
log.Log.Error("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
return
|
||||
@@ -414,8 +585,10 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
IsKeyFrame: false,
|
||||
Packet: rtppkt,
|
||||
Data: enc,
|
||||
Time: pts,
|
||||
CompositionTime: pts,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CompositionTime: pts2,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
Idx: g.AudioG711Index,
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -428,6 +601,9 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
// called when a video RTP packet arrives for H264
|
||||
var filteredAU [][]byte
|
||||
if g.VideoH264Media != nil && g.VideoH264Forma != nil {
|
||||
|
||||
//dtsExtractor := h264.NewDTSExtractor2()
|
||||
|
||||
g.Client.OnPacketRTP(g.VideoH264Media, g.VideoH264Forma, func(rtppkt *rtp.Packet) {
|
||||
|
||||
// This will check if we need to stop the thread,
|
||||
@@ -440,17 +616,17 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
|
||||
if len(rtppkt.Payload) > 0 {
|
||||
|
||||
// decode timestamp
|
||||
pts, ok := g.Client.PacketPTS(g.VideoH264Media, rtppkt)
|
||||
if !ok {
|
||||
log.Log.Warning("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
// decode timestamps — validate each call separately
|
||||
pts, okPTS := g.Client.PacketPTS(g.VideoH264Media, rtppkt)
|
||||
pts2, okPTS2 := g.Client.PacketPTS2(g.VideoH264Media, rtppkt)
|
||||
if !okPTS2 {
|
||||
log.Log.Debug("capture.golibrtsp.Start(): unable to get PTS2 from PacketPTS2")
|
||||
return
|
||||
}
|
||||
|
||||
// Extract access units from RTP packets
|
||||
// We need to do this, because the decoder expects a full
|
||||
// access unit. Once we have a full access unit, we can
|
||||
// decode it, and know if it's a keyframe or not.
|
||||
// Extract access units from RTP packets.
|
||||
// We need a complete access unit to determine whether
|
||||
// this is a keyframe.
|
||||
au, errDecode := g.VideoH264Decoder.Decode(rtppkt)
|
||||
if errDecode != nil {
|
||||
if errDecode != rtph264.ErrNonStartingPacketAndNoPrevious && errDecode != rtph264.ErrMorePacketsNeeded {
|
||||
@@ -459,6 +635,18 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
return
|
||||
}
|
||||
|
||||
// Frame is complete — update per-stream FPS from PTS.
|
||||
if okPTS {
|
||||
ft := g.fpsTrackers[g.VideoH264Index]
|
||||
if ft == nil {
|
||||
ft = newFPSTracker(30)
|
||||
g.fpsTrackers[g.VideoH264Index] = ft
|
||||
}
|
||||
if ptsFPS := ft.update(pts); ptsFPS > 0 && ptsFPS <= 120 {
|
||||
g.Streams[g.VideoH264Index].FPS = ptsFPS
|
||||
}
|
||||
}
|
||||
|
||||
// We'll need to read out a few things.
|
||||
// prepend an AUD. This is required by some players
|
||||
filteredAU = [][]byte{
|
||||
@@ -468,8 +656,11 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
// Check if we have a keyframe.
|
||||
nonIDRPresent := false
|
||||
idrPresent := false
|
||||
|
||||
var naluTypes []string
|
||||
for _, nalu := range au {
|
||||
typ := h264.NALUType(nalu[0] & 0x1F)
|
||||
naluTypes = append(naluTypes, fmt.Sprintf("%s(%d,sz=%d)", typ.String(), int(typ), len(nalu)))
|
||||
switch typ {
|
||||
case h264.NALUTypeAccessUnitDelimiter:
|
||||
continue
|
||||
@@ -477,14 +668,73 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
idrPresent = true
|
||||
case h264.NALUTypeNonIDR:
|
||||
nonIDRPresent = true
|
||||
case h264.NALUTypeSPS:
|
||||
// Read out sps
|
||||
var sps h264.SPS
|
||||
errSPS := sps.Unmarshal(nalu)
|
||||
if errSPS == nil {
|
||||
// Debug SPS information
|
||||
g.debugSPSInfo(&sps, streamType)
|
||||
|
||||
// Get width
|
||||
g.Streams[g.VideoH264Index].Width = sps.Width()
|
||||
if streamType == "main" {
|
||||
configuration.Config.Capture.IPCamera.Width = sps.Width()
|
||||
} else if streamType == "sub" {
|
||||
configuration.Config.Capture.IPCamera.SubWidth = sps.Width()
|
||||
}
|
||||
// Get height
|
||||
g.Streams[g.VideoH264Index].Height = sps.Height()
|
||||
if streamType == "main" {
|
||||
configuration.Config.Capture.IPCamera.Height = sps.Height()
|
||||
} else if streamType == "sub" {
|
||||
configuration.Config.Capture.IPCamera.SubHeight = sps.Height()
|
||||
}
|
||||
// Get FPS using enhanced method
|
||||
fps := g.getEnhancedFPS(&sps, g.VideoH264Index)
|
||||
g.Streams[g.VideoH264Index].FPS = fps
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.Start(%s): Final FPS=%.2f", streamType, fps))
|
||||
g.VideoH264Forma.SPS = nalu
|
||||
if streamType == "main" && len(nalu) > 0 {
|
||||
// Fallback: store SPS from in-band NALUs when SDP was missing it.
|
||||
configuration.Config.Capture.IPCamera.SPSNALUs = [][]byte{nalu}
|
||||
}
|
||||
|
||||
}
|
||||
case h264.NALUTypePPS:
|
||||
g.VideoH264Forma.PPS = nalu
|
||||
if streamType == "main" && len(nalu) > 0 {
|
||||
// Fallback: store PPS from in-band NALUs when SDP was missing it.
|
||||
configuration.Config.Capture.IPCamera.PPSNALUs = [][]byte{nalu}
|
||||
}
|
||||
}
|
||||
filteredAU = append(filteredAU, nalu)
|
||||
}
|
||||
|
||||
if idrPresent && streamType == "main" {
|
||||
// Ensure config has parameter sets before recordings start.
|
||||
if len(configuration.Config.Capture.IPCamera.SPSNALUs) == 0 && len(g.VideoH264Forma.SPS) > 0 {
|
||||
configuration.Config.Capture.IPCamera.SPSNALUs = [][]byte{g.VideoH264Forma.SPS}
|
||||
log.Log.Warning("capture.golibrtsp.Start(main): fallback SPS set from keyframe")
|
||||
}
|
||||
if len(configuration.Config.Capture.IPCamera.PPSNALUs) == 0 && len(g.VideoH264Forma.PPS) > 0 {
|
||||
configuration.Config.Capture.IPCamera.PPSNALUs = [][]byte{g.VideoH264Forma.PPS}
|
||||
log.Log.Warning("capture.golibrtsp.Start(main): fallback PPS set from keyframe")
|
||||
}
|
||||
if len(configuration.Config.Capture.IPCamera.SPSNALUs) == 0 || len(configuration.Config.Capture.IPCamera.PPSNALUs) == 0 {
|
||||
log.Log.Warning("capture.golibrtsp.Start(main): SPS/PPS still missing after IDR keyframe")
|
||||
}
|
||||
}
|
||||
|
||||
if len(filteredAU) <= 1 || (!nonIDRPresent && !idrPresent) {
|
||||
return
|
||||
}
|
||||
|
||||
if idrPresent {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.Start(%s): IDR frame NALUs: [%s]",
|
||||
streamType, fmt.Sprintf("%v", naluTypes)))
|
||||
}
|
||||
|
||||
// Convert to packet.
|
||||
enc, err := h264.AnnexBMarshal(filteredAU)
|
||||
if err != nil {
|
||||
@@ -496,14 +746,35 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
IsKeyFrame: idrPresent,
|
||||
Packet: rtppkt,
|
||||
Data: enc,
|
||||
Time: pts,
|
||||
CompositionTime: pts,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
CompositionTime: pts2,
|
||||
Idx: g.VideoH264Index,
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
Codec: "H264",
|
||||
}
|
||||
|
||||
// Track keyframe intervals
|
||||
keyframeInterval := g.trackKeyframeInterval(idrPresent)
|
||||
if idrPresent && keyframeInterval > 0 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
fps := g.Streams[g.VideoH264Index].FPS
|
||||
if fps <= 0 {
|
||||
fps = 25.0 // Default fallback FPS
|
||||
}
|
||||
gopDuration := float64(keyframeInterval) / fps
|
||||
gopSize := int(avgInterval) // Store GOP size in a separate variable
|
||||
g.Streams[g.VideoH264Index].GopSize = gopSize
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.Start(%s): Keyframe interval=%d packets, Avg=%.1f, GOP=%.1fs, GOPSize=%d",
|
||||
streamType, keyframeInterval, avgInterval, gopDuration, gopSize))
|
||||
preRecording := configuration.Config.Capture.PreRecording
|
||||
if preRecording > 0 && int(gopDuration) > 0 {
|
||||
queue.SetMaxGopCount(int(preRecording)/int(gopDuration) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
pkt.Data = pkt.Data[4:]
|
||||
if pkt.IsKeyFrame {
|
||||
annexbNALUStartCode := func() []byte { return []byte{0x00, 0x00, 0x00, 0x01} }
|
||||
@@ -527,10 +798,17 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
if idrPresent {
|
||||
// Increment packets, so we know the device
|
||||
// is not blocking.
|
||||
r := communication.PackageCounter.Load().(int64)
|
||||
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
|
||||
communication.PackageCounter.Store((r + 1) % 1000)
|
||||
communication.LastPacketTimer.Store(time.Now().Unix())
|
||||
if streamType == "main" {
|
||||
r := communication.PackageCounter.Load().(int64)
|
||||
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
|
||||
communication.PackageCounter.Store((r + 1) % 1000)
|
||||
communication.LastPacketTimer.Store(time.Now().Unix())
|
||||
} else if streamType == "sub" {
|
||||
r := communication.PackageCounterSub.Load().(int64)
|
||||
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
|
||||
communication.PackageCounterSub.Store((r + 1) % 1000)
|
||||
communication.LastPacketTimerSub.Store(time.Now().Unix())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -551,17 +829,17 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
|
||||
if len(rtppkt.Payload) > 0 {
|
||||
|
||||
// decode timestamp
|
||||
pts, ok := g.Client.PacketPTS(g.VideoH265Media, rtppkt)
|
||||
if !ok {
|
||||
log.Log.Warning("capture.golibrtsp.Start(): " + "unable to get PTS")
|
||||
// decode timestamps — validate each call separately
|
||||
pts, okPTS := g.Client.PacketPTS(g.VideoH265Media, rtppkt)
|
||||
pts2, okPTS2 := g.Client.PacketPTS2(g.VideoH265Media, rtppkt)
|
||||
if !okPTS2 {
|
||||
log.Log.Debug("capture.golibrtsp.Start(): unable to get PTS")
|
||||
return
|
||||
}
|
||||
|
||||
// Extract access units from RTP packets
|
||||
// We need to do this, because the decoder expects a full
|
||||
// access unit. Once we have a full access unit, we can
|
||||
// decode it, and know if it's a keyframe or not.
|
||||
// Extract access units from RTP packets.
|
||||
// We need a complete access unit to determine whether
|
||||
// this is a keyframe.
|
||||
au, errDecode := g.VideoH265Decoder.Decode(rtppkt)
|
||||
if errDecode != nil {
|
||||
if errDecode != rtph265.ErrNonStartingPacketAndNoPrevious && errDecode != rtph265.ErrMorePacketsNeeded {
|
||||
@@ -570,6 +848,18 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
return
|
||||
}
|
||||
|
||||
// Frame is complete — update per-stream FPS from PTS.
|
||||
if okPTS {
|
||||
ft := g.fpsTrackers[g.VideoH265Index]
|
||||
if ft == nil {
|
||||
ft = newFPSTracker(30)
|
||||
g.fpsTrackers[g.VideoH265Index] = ft
|
||||
}
|
||||
if ptsFPS := ft.update(pts); ptsFPS > 0 && ptsFPS <= 120 {
|
||||
g.Streams[g.VideoH265Index].FPS = ptsFPS
|
||||
}
|
||||
}
|
||||
|
||||
filteredAU = [][]byte{
|
||||
{byte(h265.NALUType_AUD_NUT) << 1, 1, 0x50},
|
||||
}
|
||||
@@ -616,14 +906,35 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
IsKeyFrame: isRandomAccess,
|
||||
Packet: rtppkt,
|
||||
Data: enc,
|
||||
Time: pts,
|
||||
CompositionTime: pts,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
CompositionTime: pts2,
|
||||
Idx: g.VideoH265Index,
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
Codec: "H265",
|
||||
}
|
||||
|
||||
// Track keyframe intervals for H265
|
||||
keyframeInterval := g.trackKeyframeInterval(isRandomAccess)
|
||||
if isRandomAccess && keyframeInterval > 0 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
fps := g.Streams[g.VideoH265Index].FPS
|
||||
if fps <= 0 {
|
||||
fps = 25.0 // Default fallback FPS
|
||||
}
|
||||
gopDuration := float64(keyframeInterval) / fps
|
||||
gopSize := int(avgInterval) // Store GOP size in a separate variable
|
||||
g.Streams[g.VideoH265Index].GopSize = gopSize
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.Start(%s): Keyframe interval=%d packets, Avg=%.1f, GOP=%.1fs, GOPSize=%d",
|
||||
streamType, keyframeInterval, avgInterval, gopDuration, gopSize))
|
||||
preRecording := configuration.Config.Capture.PreRecording
|
||||
if preRecording > 0 && int(gopDuration) > 0 {
|
||||
queue.SetMaxGopCount(int(preRecording)/int(gopDuration) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
queue.WritePacket(pkt)
|
||||
|
||||
// This will check if we need to stop the thread,
|
||||
@@ -637,10 +948,17 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
if isRandomAccess {
|
||||
// Increment packets, so we know the device
|
||||
// is not blocking.
|
||||
r := communication.PackageCounter.Load().(int64)
|
||||
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
|
||||
communication.PackageCounter.Store((r + 1) % 1000)
|
||||
communication.LastPacketTimer.Store(time.Now().Unix())
|
||||
if streamType == "main" {
|
||||
r := communication.PackageCounter.Load().(int64)
|
||||
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
|
||||
communication.PackageCounter.Store((r + 1) % 1000)
|
||||
communication.LastPacketTimer.Store(time.Now().Unix())
|
||||
} else if streamType == "sub" {
|
||||
r := communication.PackageCounterSub.Load().(int64)
|
||||
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
|
||||
communication.PackageCounterSub.Store((r + 1) % 1000)
|
||||
communication.LastPacketTimerSub.Store(time.Now().Unix())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -659,7 +977,7 @@ func (g *Golibrtsp) Start(ctx context.Context, queue *packets.Queue, configurati
|
||||
}
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
func (g *Golibrtsp) StartBackChannel(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) StartBackChannel(ctx context.Context, ctxRunAgent context.Context) (err error) {
|
||||
log.Log.Info("capture.golibrtsp.StartBackChannel(): started")
|
||||
// Wait for a second, so we can be sure the stream is playing.
|
||||
time.Sleep(1 * time.Second)
|
||||
@@ -741,8 +1059,8 @@ func (g *Golibrtsp) DecodePacketRaw(pkt packets.Packet) (image.Gray, error) {
|
||||
}
|
||||
|
||||
// Get a list of streams from the RTSP server.
|
||||
func (j *Golibrtsp) GetStreams() ([]packets.Stream, error) {
|
||||
return j.Streams, nil
|
||||
func (g *Golibrtsp) GetStreams() ([]packets.Stream, error) {
|
||||
return g.Streams, nil
|
||||
}
|
||||
|
||||
// Get a list of video streams from the RTSP server.
|
||||
@@ -768,15 +1086,22 @@ func (g *Golibrtsp) GetAudioStreams() ([]packets.Stream, error) {
|
||||
}
|
||||
|
||||
// Close the connection to the RTSP server.
|
||||
func (g *Golibrtsp) Close() error {
|
||||
func (g *Golibrtsp) Close(ctxOtel context.Context) error {
|
||||
|
||||
_, span := tracer.Start(ctxOtel, "Close")
|
||||
defer span.End()
|
||||
|
||||
// Close the demuxer.
|
||||
g.Client.Close()
|
||||
if g.VideoH264Decoder != nil {
|
||||
g.VideoH264FrameDecoder.Close()
|
||||
}
|
||||
if g.VideoH265FrameDecoder != nil {
|
||||
g.VideoH265FrameDecoder.Close()
|
||||
}
|
||||
|
||||
// We will have created the decoders globally, so we don't need to close them here.
|
||||
|
||||
//if g.VideoH264Decoder != nil {
|
||||
// g.VideoH264FrameDecoder.Close()
|
||||
//}
|
||||
//if g.VideoH265FrameDecoder != nil {
|
||||
// g.VideoH265FrameDecoder.Close()
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -934,6 +1259,21 @@ func FindPCMU(desc *description.Session, isBackChannel bool) (*format.G711, *des
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func FindOPUS(desc *description.Session, isBackChannel bool) (*format.Opus, *description.Media) {
|
||||
for _, media := range desc.Medias {
|
||||
if media.IsBackChannel == isBackChannel {
|
||||
for _, forma := range media.Formats {
|
||||
if opus, ok := forma.(*format.Opus); ok {
|
||||
if opus.ChannelCount > 0 {
|
||||
return opus, media
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func FindMPEG4Audio(desc *description.Session, isBackChannel bool) (*format.MPEG4Audio, *description.Media) {
|
||||
for _, media := range desc.Medias {
|
||||
if media.IsBackChannel == isBackChannel {
|
||||
@@ -952,7 +1292,7 @@ func WriteMPEG4Audio(forma *format.MPEG4Audio, aus [][]byte) ([]byte, error) {
|
||||
pkts := make(mpeg4audio.ADTSPackets, len(aus))
|
||||
for i, au := range aus {
|
||||
pkts[i] = &mpeg4audio.ADTSPacket{
|
||||
Type: forma.Config.Type,
|
||||
Type: mpeg4audio.ObjectType(forma.Config.Type),
|
||||
SampleRate: forma.Config.SampleRate,
|
||||
ChannelCount: forma.Config.ChannelCount,
|
||||
AU: au,
|
||||
@@ -964,3 +1304,149 @@ func WriteMPEG4Audio(forma *format.MPEG4Audio, aus [][]byte) ([]byte, error) {
|
||||
}
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
// Initialize FPS calculation buffers
|
||||
func (g *Golibrtsp) initFPSCalculation() {
|
||||
// Ensure the per-stream FPS trackers map exists. Individual trackers
|
||||
// can be created lazily when a given stream index is first used.
|
||||
if g.fpsTrackers == nil {
|
||||
g.fpsTrackers = make(map[int8]*fpsTracker)
|
||||
}
|
||||
|
||||
// Initialize I-frame interval tracking
|
||||
g.keyframeBufferSize = 10 // Store last 10 keyframe intervals
|
||||
g.keyframeIntervals = make([]int, g.keyframeBufferSize)
|
||||
g.keyframeBufferIndex = 0
|
||||
g.packetsSinceLastKeyframe = 0
|
||||
g.lastKeyframePacketCount = 0
|
||||
}
|
||||
|
||||
// Get enhanced FPS information from SPS with fallback to PTS-based calculation.
|
||||
// The PTS-based FPS is computed per completed frame via fpsTracker.update(),
|
||||
// so by the time this is called we already have a good estimate.
|
||||
func (g *Golibrtsp) getEnhancedFPS(sps *h264.SPS, streamIndex int8) float64 {
|
||||
// First try to get FPS from SPS VUI parameters
|
||||
spsFPS := sps.FPS()
|
||||
|
||||
// Check if SPS FPS is reasonable (between 1 and 120 fps)
|
||||
if spsFPS > 0 && spsFPS <= 120 {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.getEnhancedFPS(): SPS FPS: %.2f", spsFPS))
|
||||
return spsFPS
|
||||
}
|
||||
|
||||
// Fallback to PTS-based FPS (already calculated per-frame)
|
||||
if ft := g.fpsTrackers[streamIndex]; ft != nil {
|
||||
ptsFPS := ft.fps()
|
||||
if ptsFPS > 0 && ptsFPS <= 120 {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.getEnhancedFPS(): PTS FPS: %.2f", ptsFPS))
|
||||
return ptsFPS
|
||||
}
|
||||
}
|
||||
|
||||
// Return SPS FPS even if it seems unreasonable, or default
|
||||
if spsFPS > 0 {
|
||||
return spsFPS
|
||||
}
|
||||
|
||||
return 25.0 // Default fallback FPS
|
||||
}
|
||||
|
||||
// Track I-frame intervals by counting packets between keyframes
|
||||
func (g *Golibrtsp) trackKeyframeInterval(isKeyframe bool) int {
|
||||
g.keyframeMutex.Lock()
|
||||
defer g.keyframeMutex.Unlock()
|
||||
|
||||
g.packetsSinceLastKeyframe++
|
||||
|
||||
if isKeyframe {
|
||||
// Store the interval since the last keyframe
|
||||
if g.lastKeyframePacketCount > 0 {
|
||||
interval := g.packetsSinceLastKeyframe
|
||||
g.keyframeIntervals[g.keyframeBufferIndex] = interval
|
||||
g.keyframeBufferIndex = (g.keyframeBufferIndex + 1) % g.keyframeBufferSize
|
||||
}
|
||||
|
||||
// Reset counter for next interval
|
||||
g.lastKeyframePacketCount = g.packetsSinceLastKeyframe
|
||||
g.packetsSinceLastKeyframe = 0
|
||||
|
||||
return g.lastKeyframePacketCount
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Get average keyframe interval (GOP size)
|
||||
func (g *Golibrtsp) getAverageKeyframeInterval() float64 {
|
||||
g.keyframeMutex.Lock()
|
||||
defer g.keyframeMutex.Unlock()
|
||||
|
||||
var totalInterval int
|
||||
validSamples := 0
|
||||
|
||||
for _, interval := range g.keyframeIntervals {
|
||||
if interval > 0 {
|
||||
totalInterval += interval
|
||||
validSamples++
|
||||
}
|
||||
}
|
||||
|
||||
if validSamples == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return float64(totalInterval) / float64(validSamples)
|
||||
}
|
||||
|
||||
// Calculate GOP size in seconds based on FPS and keyframe interval
|
||||
func (g *Golibrtsp) getGOPDuration(fps float64) float64 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
if avgInterval > 0 && fps > 0 {
|
||||
return avgInterval / fps
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Get detailed SPS timing information
|
||||
func (g *Golibrtsp) getSPSTimingInfo(sps *h264.SPS) (hasVUI bool, timeScale uint32, numUnitsInTick uint32, fps float64) {
|
||||
// Try to get FPS from SPS
|
||||
fps = sps.FPS()
|
||||
|
||||
// Note: The gortsplib SPS struct may not expose VUI parameters directly
|
||||
// but we can still work with the calculated FPS
|
||||
if fps > 0 {
|
||||
hasVUI = true
|
||||
// These are estimated values based on common patterns
|
||||
if fps == 25.0 {
|
||||
timeScale = 50
|
||||
numUnitsInTick = 1
|
||||
} else if fps == 30.0 {
|
||||
timeScale = 60
|
||||
numUnitsInTick = 1
|
||||
} else if fps == 24.0 {
|
||||
timeScale = 48
|
||||
numUnitsInTick = 1
|
||||
} else {
|
||||
// Generic calculation
|
||||
timeScale = uint32(fps * 2)
|
||||
numUnitsInTick = 1
|
||||
}
|
||||
}
|
||||
|
||||
return hasVUI, timeScale, numUnitsInTick, fps
|
||||
}
|
||||
|
||||
// Debug SPS information
|
||||
func (g *Golibrtsp) debugSPSInfo(sps *h264.SPS, streamType string) {
|
||||
hasVUI, timeScale, numUnitsInTick, fps := g.getSPSTimingInfo(sps)
|
||||
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): Width=%d, Height=%d",
|
||||
streamType, sps.Width(), sps.Height()))
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): HasVUI=%t, FPS=%.2f",
|
||||
streamType, hasVUI, fps))
|
||||
|
||||
if hasVUI {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): TimeScale=%d, NumUnitsInTick=%d",
|
||||
streamType, timeScale, numUnitsInTick))
|
||||
}
|
||||
}
|
||||
@@ -16,7 +16,8 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/packets"
|
||||
"github.com/kerberos-io/agent/machinery/src/utils"
|
||||
"github.com/yapingcat/gomedia/go-mp4"
|
||||
"github.com/kerberos-io/agent/machinery/src/video"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func CleanupRecordingDirectory(configDirectory string, configuration *models.Configuration) {
|
||||
@@ -63,22 +64,46 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
} else {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(): started")
|
||||
|
||||
recordingPeriod := config.Capture.PostRecording // number of seconds to record.
|
||||
maxRecordingPeriod := config.Capture.MaxLengthRecording // maximum number of seconds to record.
|
||||
preRecording := config.Capture.PreRecording * 1000
|
||||
postRecording := config.Capture.PostRecording * 1000 // number of seconds to record.
|
||||
maxRecordingPeriod := config.Capture.MaxLengthRecording * 1000 // maximum number of seconds to record.
|
||||
|
||||
// Synchronise the last synced time
|
||||
now := time.Now().Unix()
|
||||
startRecording := now
|
||||
timestamp := now
|
||||
// We will calculate the maxRecordingPeriod based on the preRecording and postRecording values.
|
||||
if maxRecordingPeriod == 0 {
|
||||
// If maxRecordingPeriod is not set, we will use the preRecording and postRecording values
|
||||
maxRecordingPeriod = preRecording + postRecording
|
||||
}
|
||||
|
||||
// For continuous and motion based recording we will use a single file.
|
||||
var file *os.File
|
||||
if maxRecordingPeriod < preRecording+postRecording {
|
||||
log.Log.Error("capture.main.HandleRecordStream(): maxRecordingPeriod is less than preRecording + postRecording, this is not allowed. Setting maxRecordingPeriod to preRecording + postRecording.")
|
||||
maxRecordingPeriod = preRecording + postRecording
|
||||
}
|
||||
|
||||
if config.FriendlyName != "" {
|
||||
config.Name = config.FriendlyName
|
||||
}
|
||||
|
||||
// Get the audio and video codec from the camera.
|
||||
// We only expect one audio and one video codec.
|
||||
// If there are multiple audio or video streams, we will use the first one.
|
||||
audioCodec := ""
|
||||
videoCodec := ""
|
||||
audioStreams, _ := rtspClient.GetAudioStreams()
|
||||
videoStreams, _ := rtspClient.GetVideoStreams()
|
||||
if len(audioStreams) > 0 {
|
||||
audioCodec = audioStreams[0].Name
|
||||
config.Capture.IPCamera.SampleRate = audioStreams[0].SampleRate
|
||||
config.Capture.IPCamera.Channels = audioStreams[0].Channels
|
||||
}
|
||||
if len(videoStreams) > 0 {
|
||||
videoCodec = videoStreams[0].Name
|
||||
}
|
||||
|
||||
// Check if continuous recording.
|
||||
if config.Capture.Continuous == "true" {
|
||||
|
||||
//var cws *cacheWriterSeeker
|
||||
var myMuxer *mp4.Movmuxer
|
||||
var mp4Video *video.MP4
|
||||
var videoTrack uint32
|
||||
var audioTrack uint32
|
||||
var name string
|
||||
@@ -86,15 +111,15 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// Do not do anything!
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): start recording")
|
||||
|
||||
now = time.Now().Unix()
|
||||
timestamp = now
|
||||
start := false
|
||||
|
||||
// If continuous record the full length
|
||||
recordingPeriod = maxRecordingPeriod
|
||||
postRecording = maxRecordingPeriod
|
||||
// Recording file name
|
||||
fullName := ""
|
||||
|
||||
var startRecording int64 = 0 // start recording timestamp in milliseconds
|
||||
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
@@ -110,20 +135,21 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
|
||||
nextPkt, cursorError = recordingCursor.ReadPacket()
|
||||
|
||||
now := time.Now().Unix()
|
||||
now := time.Now().UnixMilli()
|
||||
|
||||
if start && // If already recording and current frame is a keyframe and we should stop recording
|
||||
nextPkt.IsKeyFrame && (timestamp+recordingPeriod-now <= 0 || now-startRecording >= maxRecordingPeriod) {
|
||||
nextPkt.IsKeyFrame && (startRecording+postRecording-now <= 0 || now-startRecording > maxRecordingPeriod-500) {
|
||||
|
||||
// Write the last packet
|
||||
ttime := convertPTS(pkt.Time)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
// Write the last packet
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
// Write the last packet
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
@@ -132,21 +158,57 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
}
|
||||
|
||||
// This will write the trailer a well.
|
||||
if err := myMuxer.WriteTrailer(); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
// Close mp4
|
||||
if len(mp4Video.SPSNALUs) == 0 && len(configuration.Config.Capture.IPCamera.SPSNALUs) > 0 {
|
||||
mp4Video.SPSNALUs = configuration.Config.Capture.IPCamera.SPSNALUs
|
||||
}
|
||||
|
||||
if len(mp4Video.PPSNALUs) == 0 && len(configuration.Config.Capture.IPCamera.PPSNALUs) > 0 {
|
||||
mp4Video.PPSNALUs = configuration.Config.Capture.IPCamera.PPSNALUs
|
||||
}
|
||||
if len(mp4Video.VPSNALUs) == 0 && len(configuration.Config.Capture.IPCamera.VPSNALUs) > 0 {
|
||||
mp4Video.VPSNALUs = configuration.Config.Capture.IPCamera.VPSNALUs
|
||||
}
|
||||
if (videoCodec == "H264" && (len(mp4Video.SPSNALUs) == 0 || len(mp4Video.PPSNALUs) == 0)) ||
|
||||
(videoCodec == "H265" && (len(mp4Video.VPSNALUs) == 0 || len(mp4Video.SPSNALUs) == 0 || len(mp4Video.PPSNALUs) == 0)) {
|
||||
log.Log.Warning("capture.main.HandleRecordStream(continuous): closing MP4 without full parameter sets, moov may be incomplete")
|
||||
}
|
||||
mp4Video.Close(&config)
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): recording finished: file save: " + name)
|
||||
|
||||
// Cleanup muxer
|
||||
start = false
|
||||
file.Close()
|
||||
file = nil
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
// Update the name with the duration in milliseconds.
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
"0-0-0-0" + "_" + // region coordinates, we
|
||||
"-1" + "_" + // token
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -193,7 +255,6 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
|
||||
start = true
|
||||
timestamp = now
|
||||
|
||||
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
|
||||
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
|
||||
@@ -204,13 +265,17 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// - Number of changes
|
||||
// - Token
|
||||
|
||||
startRecording = time.Now().Unix() // we mark the current time when the record started.ss
|
||||
s := strconv.FormatInt(startRecording, 10) + "_" +
|
||||
"6" + "-" +
|
||||
"967003" + "_" +
|
||||
config.Name + "_" +
|
||||
"200-200-400-400" + "_0_" +
|
||||
"769"
|
||||
startRecording = pkt.CurrentTime
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" + // start timestamp in seconds
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" + // length of milliseconds
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" + // milliseconds
|
||||
config.Name + "_" + // device name
|
||||
"0-0-0-0" + "_" + // region coordinates, we will not use this for continuous recording
|
||||
"0" + "_" + // token
|
||||
"0" + "_" //+ // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
@@ -218,49 +283,64 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// Running...
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): recording started")
|
||||
|
||||
file, err = os.Create(fullName)
|
||||
if err == nil {
|
||||
//cws = newCacheWriterSeeker(4096)
|
||||
myMuxer, _ = mp4.CreateMp4Muxer(file)
|
||||
// We choose between H264 and H265
|
||||
if pkt.Codec == "H264" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H264)
|
||||
} else if pkt.Codec == "H265" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H265)
|
||||
}
|
||||
// For an MP4 container, AAC is the only audio codec supported.
|
||||
audioTrack = myMuxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
|
||||
} else {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
// Get width and height from the camera.
|
||||
width := configuration.Config.Capture.IPCamera.Width
|
||||
height := configuration.Config.Capture.IPCamera.Height
|
||||
|
||||
// Get SPS and PPS NALUs from the camera.
|
||||
spsNALUS := configuration.Config.Capture.IPCamera.SPSNALUs
|
||||
ppsNALUS := configuration.Config.Capture.IPCamera.PPSNALUs
|
||||
vpsNALUS := configuration.Config.Capture.IPCamera.VPSNALUs
|
||||
|
||||
if len(spsNALUS) == 0 || len(ppsNALUS) == 0 {
|
||||
log.Log.Warning("capture.main.HandleRecordStream(continuous): missing SPS/PPS at recording start")
|
||||
}
|
||||
// Create a video file, and set the dimensions.
|
||||
mp4Video = video.NewMP4(fullName, spsNALUS, ppsNALUS, vpsNALUS, configuration.Config.Capture.MaxLengthRecording)
|
||||
mp4Video.SetWidth(width)
|
||||
mp4Video.SetHeight(height)
|
||||
|
||||
if videoCodec == "H264" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H264")
|
||||
} else if videoCodec == "H265" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H265")
|
||||
}
|
||||
if audioCodec == "AAC" {
|
||||
audioTrack = mp4Video.AddAudioTrack("AAC")
|
||||
} else if audioCodec == "PCM_MULAW" {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
|
||||
ttime := convertPTS(pkt.Time)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
// TODO: transcode to AAC, some work to do..
|
||||
// We might need to use ffmpeg to transcode the audio to AAC.
|
||||
// For now we will skip the audio track.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
}
|
||||
|
||||
recordingStatus = "started"
|
||||
|
||||
} else if start {
|
||||
ttime := convertPTS(pkt.Time)
|
||||
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
// New method using new mp4 library
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
@@ -269,7 +349,6 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pkt = nextPkt
|
||||
}
|
||||
|
||||
@@ -277,21 +356,43 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// If this happens we need to check to properly close the recording.
|
||||
if cursorError != nil {
|
||||
if recordingStatus == "started" {
|
||||
// This will write the trailer a well.
|
||||
if err := myMuxer.WriteTrailer(); err != nil {
|
||||
log.Log.Error(err.Error())
|
||||
}
|
||||
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): Recording finished: file save: " + name)
|
||||
|
||||
// Cleanup muxer
|
||||
start = false
|
||||
file.Close()
|
||||
file = nil
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
// Update the name with the duration in milliseconds.
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
"0-0-0-0" + "_" + // region coordinates, we
|
||||
"-1" + "_" + // token
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -329,33 +430,44 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): Start motion based recording ")
|
||||
|
||||
var lastDuration time.Duration
|
||||
var lastRecordingTime int64
|
||||
var lastRecordingTime int64 = 0 // last recording timestamp in milliseconds
|
||||
var displayTime int64 = 0 // display time in milliseconds
|
||||
|
||||
//var cws *cacheWriterSeeker
|
||||
var myMuxer *mp4.Movmuxer
|
||||
var videoTrack uint32
|
||||
var audioTrack uint32
|
||||
|
||||
for motion := range communication.HandleMotion {
|
||||
|
||||
timestamp = time.Now().Unix()
|
||||
startRecording = time.Now().Unix() // we mark the current time when the record started.
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var nextPkt packets.Packet
|
||||
recordingCursor := queue.Oldest() // Start from the latest packet in the queue)
|
||||
|
||||
// If we have prerecording we will substract the number of seconds.
|
||||
// Taking into account FPS = GOP size (Keyfram interval)
|
||||
if config.Capture.PreRecording > 0 {
|
||||
now := time.Now().UnixMilli()
|
||||
motionTimestamp := now
|
||||
|
||||
// Might be that recordings are coming short after each other.
|
||||
// Therefore we do some math with the current time and the last recording time.
|
||||
start := false
|
||||
|
||||
timeBetweenNowAndLastRecording := startRecording - lastRecordingTime
|
||||
if timeBetweenNowAndLastRecording > int64(config.Capture.PreRecording) {
|
||||
startRecording = startRecording - int64(config.Capture.PreRecording) + 1
|
||||
} else {
|
||||
startRecording = startRecording - timeBetweenNowAndLastRecording
|
||||
}
|
||||
if cursorError == nil {
|
||||
pkt, cursorError = recordingCursor.ReadPacket()
|
||||
}
|
||||
|
||||
displayTime = pkt.CurrentTime
|
||||
startRecording := pkt.CurrentTime
|
||||
|
||||
// We have more packets in the queue (which might still be older than where we close the previous recording).
|
||||
// In that case we will use the last recording time to determine the start time of the recording, otherwise
|
||||
// we will have duplicate frames in the recording.
|
||||
if startRecording < lastRecordingTime {
|
||||
displayTime = lastRecordingTime
|
||||
startRecording = lastRecordingTime
|
||||
}
|
||||
|
||||
// If startRecording is 0, we will continue as it might be we are in a state of restarting the agent.
|
||||
if startRecording == 0 {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): startRecording is 0, we will continue as it might be we are in a state of restarting the agent.")
|
||||
continue
|
||||
}
|
||||
|
||||
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
|
||||
@@ -367,43 +479,59 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// - Number of changes
|
||||
// - Token
|
||||
|
||||
s := strconv.FormatInt(startRecording, 10) + "_" +
|
||||
"6" + "-" +
|
||||
"967003" + "_" +
|
||||
config.Name + "_" +
|
||||
"200-200-400-400" + "_" +
|
||||
strconv.Itoa(numberOfChanges) + "_" +
|
||||
"769"
|
||||
displayTimeSeconds := displayTime / 1000 // convert to seconds
|
||||
displayTimeMilliseconds := displayTime % 1000 // convert to milliseconds
|
||||
motionRectangleString := "0-0-0-0"
|
||||
if motion.Rectangle.X != 0 || motion.Rectangle.Y != 0 ||
|
||||
motion.Rectangle.Width != 0 || motion.Rectangle.Height != 0 {
|
||||
motionRectangleString = strconv.Itoa(motion.Rectangle.X) + "-" + strconv.Itoa(motion.Rectangle.Y) + "-" +
|
||||
strconv.Itoa(motion.Rectangle.Width) + "-" + strconv.Itoa(motion.Rectangle.Height)
|
||||
}
|
||||
|
||||
// Get the number of changes from the motion detection.
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
|
||||
s := strconv.FormatInt(displayTimeSeconds, 10) + "_" + // start timestamp in seconds
|
||||
strconv.Itoa(len(strconv.FormatInt(displayTimeMilliseconds, 10))) + "-" + // length of milliseconds
|
||||
strconv.FormatInt(displayTimeMilliseconds, 10) + "_" + // milliseconds
|
||||
config.Name + "_" + // device name
|
||||
motionRectangleString + "_" + // region coordinates, we will not use this for continuous recording
|
||||
strconv.Itoa(numberOfChanges) + "_" + // number of changes
|
||||
"0" // + "_" + // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
name := s + ".mp4"
|
||||
fullName := configDirectory + "/data/recordings/" + name
|
||||
|
||||
// Running...
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): recording started")
|
||||
file, _ = os.Create(fullName)
|
||||
myMuxer, _ = mp4.CreateMp4Muxer(file)
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): recording started (" + name + ")" + " at " + strconv.FormatInt(displayTimeSeconds, 10) + " unix")
|
||||
|
||||
// Check which video codec we need to use.
|
||||
videoSteams, _ := rtspClient.GetVideoStreams()
|
||||
for _, stream := range videoSteams {
|
||||
if stream.Name == "H264" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H264)
|
||||
} else if stream.Name == "H265" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H265)
|
||||
}
|
||||
// Get width and height from the camera.
|
||||
width := configuration.Config.Capture.IPCamera.Width
|
||||
height := configuration.Config.Capture.IPCamera.Height
|
||||
|
||||
// Get SPS and PPS NALUs from the camera.
|
||||
spsNALUS := configuration.Config.Capture.IPCamera.SPSNALUs
|
||||
ppsNALUS := configuration.Config.Capture.IPCamera.PPSNALUs
|
||||
vpsNALUS := configuration.Config.Capture.IPCamera.VPSNALUs
|
||||
|
||||
if len(spsNALUS) == 0 || len(ppsNALUS) == 0 {
|
||||
log.Log.Warning("capture.main.HandleRecordStream(motiondetection): missing SPS/PPS at recording start")
|
||||
}
|
||||
// For an MP4 container, AAC is the only audio codec supported.
|
||||
audioTrack = myMuxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
|
||||
start := false
|
||||
// Create a video file, and set the dimensions.
|
||||
mp4Video := video.NewMP4(fullName, spsNALUS, ppsNALUS, vpsNALUS, configuration.Config.Capture.MaxLengthRecording)
|
||||
mp4Video.SetWidth(width)
|
||||
mp4Video.SetHeight(height)
|
||||
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var nextPkt packets.Packet
|
||||
recordingCursor := queue.DelayedGopCount(int(config.Capture.PreRecording + 1))
|
||||
|
||||
if cursorError == nil {
|
||||
pkt, cursorError = recordingCursor.ReadPacket()
|
||||
if videoCodec == "H264" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H264")
|
||||
} else if videoCodec == "H265" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H265")
|
||||
}
|
||||
if audioCodec == "AAC" {
|
||||
audioTrack = mp4Video.AddAudioTrack("AAC")
|
||||
} else if audioCodec == "PCM_MULAW" {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
|
||||
for cursorError == nil {
|
||||
@@ -413,69 +541,104 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + cursorError.Error())
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
now = time.Now().UnixMilli()
|
||||
select {
|
||||
case motion := <-communication.HandleMotion:
|
||||
timestamp = now
|
||||
motionTimestamp = now
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): motion detected while recording. Expanding recording.")
|
||||
numberOfChanges = motion.NumberOfChanges
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): Received message with recording data, detected changes to save: " + strconv.Itoa(numberOfChanges))
|
||||
default:
|
||||
}
|
||||
|
||||
if (timestamp+recordingPeriod-now < 0 || now-startRecording > maxRecordingPeriod) && nextPkt.IsKeyFrame {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): closing recording (timestamp: " + strconv.FormatInt(timestamp, 10) + ", recordingPeriod: " + strconv.FormatInt(recordingPeriod, 10) + ", now: " + strconv.FormatInt(now, 10) + ", startRecording: " + strconv.FormatInt(startRecording, 10) + ", maxRecordingPeriod: " + strconv.FormatInt(maxRecordingPeriod, 10))
|
||||
if (motionTimestamp+postRecording-now < 0 || now-startRecording > maxRecordingPeriod-500) && nextPkt.IsKeyFrame {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): timestamp+postRecording-now < 0 - " + strconv.FormatInt(motionTimestamp+postRecording-now, 10) + " < 0")
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): now-startRecording > maxRecordingPeriod-500 - " + strconv.FormatInt(now-startRecording, 10) + " > " + strconv.FormatInt(maxRecordingPeriod-500, 10))
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): closing recording (timestamp: " + strconv.FormatInt(motionTimestamp, 10) + ", postRecording: " + strconv.FormatInt(postRecording, 10) + ", now: " + strconv.FormatInt(now, 10) + ", startRecording: " + strconv.FormatInt(startRecording, 10) + ", maxRecordingPeriod: " + strconv.FormatInt(maxRecordingPeriod, 10))
|
||||
break
|
||||
}
|
||||
if pkt.IsKeyFrame && !start && pkt.Time >= lastDuration {
|
||||
if pkt.IsKeyFrame && !start && pkt.CurrentTime >= startRecording {
|
||||
// We start the recording if we have a keyframe and the last duration is 0 or less than the current packet time.
|
||||
// It could be start we start from the beginning of the recording.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): write frames")
|
||||
start = true
|
||||
}
|
||||
if start {
|
||||
|
||||
ttime := convertPTS(pkt.Time)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): add video sample")
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): add audio sample")
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
// TODO: transcode to AAC, some work to do..
|
||||
// We might need to use ffmpeg to transcode the audio to AAC.
|
||||
// For now we will skip the audio track.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
}
|
||||
|
||||
// We will sync to file every keyframe.
|
||||
if pkt.IsKeyFrame {
|
||||
err := file.Sync()
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
} else {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): synced file " + name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pkt = nextPkt
|
||||
}
|
||||
|
||||
// This will write the trailer a well.
|
||||
myMuxer.WriteTrailer()
|
||||
// Update the last duration and last recording time.
|
||||
// This is used to determine if we need to start a new recording.
|
||||
lastRecordingTime = pkt.CurrentTime
|
||||
|
||||
// This will close the recording and write the last packet.
|
||||
if len(mp4Video.SPSNALUs) == 0 && len(configuration.Config.Capture.IPCamera.SPSNALUs) > 0 {
|
||||
mp4Video.SPSNALUs = configuration.Config.Capture.IPCamera.SPSNALUs
|
||||
}
|
||||
if len(mp4Video.PPSNALUs) == 0 && len(configuration.Config.Capture.IPCamera.PPSNALUs) > 0 {
|
||||
mp4Video.PPSNALUs = configuration.Config.Capture.IPCamera.PPSNALUs
|
||||
}
|
||||
if len(mp4Video.VPSNALUs) == 0 && len(configuration.Config.Capture.IPCamera.VPSNALUs) > 0 {
|
||||
mp4Video.VPSNALUs = configuration.Config.Capture.IPCamera.VPSNALUs
|
||||
}
|
||||
if (videoCodec == "H264" && (len(mp4Video.SPSNALUs) == 0 || len(mp4Video.PPSNALUs) == 0)) ||
|
||||
(videoCodec == "H265" && (len(mp4Video.VPSNALUs) == 0 || len(mp4Video.SPSNALUs) == 0 || len(mp4Video.PPSNALUs) == 0)) {
|
||||
log.Log.Warning("capture.main.HandleRecordStream(motiondetection): closing MP4 without full parameter sets, moov may be incomplete")
|
||||
}
|
||||
mp4Video.Close(&config)
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): file save: " + name)
|
||||
|
||||
lastDuration = pkt.Time
|
||||
lastRecordingTime = time.Now().Unix()
|
||||
file.Close()
|
||||
file = nil
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name with the duration in milliseconds.
|
||||
s := strconv.FormatInt(displayTimeSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(displayTimeMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(displayTimeMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
motionRectangleString + "_" +
|
||||
strconv.Itoa(numberOfChanges) + "_" + // number of changes
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -523,6 +686,10 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func VerifyCamera(c *gin.Context) {
|
||||
|
||||
// Start OpenTelemetry tracing
|
||||
ctxVerifyCamera, span := tracer.Start(context.Background(), "VerifyCamera", trace.WithSpanKind(trace.SpanKindServer))
|
||||
defer span.End()
|
||||
|
||||
var cameraStreams models.CameraStreams
|
||||
err := c.BindJSON(&cameraStreams)
|
||||
|
||||
@@ -548,12 +715,11 @@ func VerifyCamera(c *gin.Context) {
|
||||
Url: rtspUrl,
|
||||
}
|
||||
|
||||
err := rtspClient.Connect(ctx)
|
||||
err := rtspClient.Connect(ctx, ctxVerifyCamera)
|
||||
if err == nil {
|
||||
|
||||
// Get the streams from the rtsp client.
|
||||
streams, _ := rtspClient.GetStreams()
|
||||
|
||||
videoIdx := -1
|
||||
audioIdx := -1
|
||||
for i, stream := range streams {
|
||||
@@ -564,17 +730,23 @@ func VerifyCamera(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
if videoIdx > -1 {
|
||||
c.JSON(200, models.APIResponse{
|
||||
Message: "All good, detected a H264 codec.",
|
||||
Data: streams,
|
||||
})
|
||||
err := rtspClient.Close(ctxVerifyCamera)
|
||||
if err == nil {
|
||||
if videoIdx > -1 {
|
||||
c.JSON(200, models.APIResponse{
|
||||
Message: "All good, detected a H264 codec.",
|
||||
Data: streams,
|
||||
})
|
||||
} else {
|
||||
c.JSON(400, models.APIResponse{
|
||||
Message: "Stream doesn't have a H264 codec, we only support H264 so far.",
|
||||
})
|
||||
}
|
||||
} else {
|
||||
c.JSON(400, models.APIResponse{
|
||||
Message: "Stream doesn't have a H264 codec, we only support H264 so far.",
|
||||
Message: "Something went wrong while closing the connection " + err.Error(),
|
||||
})
|
||||
}
|
||||
|
||||
} else {
|
||||
c.JSON(400, models.APIResponse{
|
||||
Message: err.Error(),
|
||||
@@ -587,7 +759,7 @@ func VerifyCamera(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func Base64Image(captureDevice *Capture, communication *models.Communication) string {
|
||||
func Base64Image(captureDevice *Capture, communication *models.Communication, configuration *models.Configuration) string {
|
||||
// We'll try to get a snapshot from the camera.
|
||||
var queue *packets.Queue
|
||||
var cursor *packets.QueueCursor
|
||||
@@ -605,7 +777,9 @@ func Base64Image(captureDevice *Capture, communication *models.Communication) st
|
||||
|
||||
// We'll try to have a keyframe, if not we'll return an empty string.
|
||||
var encodedImage string
|
||||
for {
|
||||
// Try for 3 times in a row.
|
||||
count := 0
|
||||
for count < 3 {
|
||||
if queue != nil && cursor != nil && rtspClient != nil {
|
||||
pkt, err := cursor.ReadPacket()
|
||||
if err == nil {
|
||||
@@ -615,11 +789,14 @@ func Base64Image(captureDevice *Capture, communication *models.Communication) st
|
||||
var img image.YCbCr
|
||||
img, err = (*rtspClient).DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
imageResized, _ := utils.ResizeImage(&img, uint(configuration.Config.Capture.IPCamera.BaseWidth), uint(configuration.Config.Capture.IPCamera.BaseHeight))
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
encodedImage = base64.StdEncoding.EncodeToString(bytes)
|
||||
break
|
||||
} else {
|
||||
count++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
} else {
|
||||
break
|
||||
@@ -646,15 +823,22 @@ func JpegImage(captureDevice *Capture, communication *models.Communication) imag
|
||||
|
||||
// We'll try to have a keyframe, if not we'll return an empty string.
|
||||
var image image.YCbCr
|
||||
for {
|
||||
// Try for 3 times in a row.
|
||||
count := 0
|
||||
for count < 3 {
|
||||
if queue != nil && cursor != nil && rtspClient != nil {
|
||||
pkt, err := cursor.ReadPacket()
|
||||
if err == nil {
|
||||
if !pkt.IsKeyFrame {
|
||||
continue
|
||||
}
|
||||
image, _ = (*rtspClient).DecodePacket(pkt)
|
||||
break
|
||||
image, err = (*rtspClient).DecodePacket(pkt)
|
||||
if err != nil {
|
||||
count++
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break
|
||||
@@ -666,3 +850,7 @@ func JpegImage(captureDevice *Capture, communication *models.Communication) imag
|
||||
func convertPTS(v time.Duration) uint64 {
|
||||
return uint64(v.Milliseconds())
|
||||
}
|
||||
|
||||
/*func convertPTS2(v int64) uint64 {
|
||||
return uint64(v) / 100
|
||||
}*/
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/dromara/carbon/v2"
|
||||
"github.com/elastic/go-sysinfo"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/golang-module/carbon/v2"
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
|
||||
@@ -131,7 +131,7 @@ func HandleUpload(configDirectory string, configuration *models.Configuration, c
|
||||
log.Log.Error("HandleUpload: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
delay = 20 * time.Second // slow down
|
||||
delay = 5 * time.Second // slow down
|
||||
if err != nil {
|
||||
log.Log.Error("HandleUpload: " + err.Error())
|
||||
}
|
||||
@@ -229,15 +229,17 @@ func HandleHeartBeat(configuration *models.Configuration, communication *models.
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
config := configuration.Config
|
||||
|
||||
// Get a pull point address
|
||||
var pullPointAddress string
|
||||
if config.Capture.IPCamera.ONVIFXAddr != "" {
|
||||
kerberosAgentVersion := utils.VERSION
|
||||
|
||||
// Create a loop pull point address, which we will use to retrieve async events
|
||||
// As you'll read below camera manufactures are having different implementations of events.
|
||||
var pullPointAddressLoopState string
|
||||
if configuration.Config.Capture.IPCamera.ONVIFXAddr != "" {
|
||||
cameraConfiguration := configuration.Config.Capture.IPCamera
|
||||
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
if err == nil {
|
||||
pullPointAddress, err = onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
pullPointAddressLoopState, err = onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
}
|
||||
@@ -251,7 +253,6 @@ loop:
|
||||
|
||||
// We'll check ONVIF capabilitites anyhow.. Verify if we have PTZ, presets and inputs/outputs.
|
||||
// For the inputs we will keep track of a the inputs and outputs state.
|
||||
|
||||
onvifEnabled := "false"
|
||||
onvifZoom := "false"
|
||||
onvifPanTilt := "false"
|
||||
@@ -262,6 +263,7 @@ loop:
|
||||
cameraConfiguration := configuration.Config.Capture.IPCamera
|
||||
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
if err == nil {
|
||||
// We will try to retrieve the PTZ configurations from the device.
|
||||
onvifEnabled = "true"
|
||||
configurations, err := onvif.GetPTZConfigurationsFromDevice(device)
|
||||
if err == nil {
|
||||
@@ -290,14 +292,28 @@ loop:
|
||||
onvifPresetsList = []byte("[]")
|
||||
}
|
||||
} else {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while getting PTZ configurations: " + err.Error())
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): error while getting PTZ configurations: " + err.Error())
|
||||
onvifPresetsList = []byte("[]")
|
||||
}
|
||||
|
||||
// We will also fetch some events, to know the status of the inputs and outputs.
|
||||
// More event types might be added.
|
||||
if pullPointAddress != "" {
|
||||
events, err := onvif.GetEventMessages(device, pullPointAddress)
|
||||
// -- We have two differen pull point subscriptions, one for the initials events and one for the loop.
|
||||
// -- Some cameras do send recurrent events, others don't.
|
||||
// a. For some older Hikvision models, events are send repeatedly (if input is high) with the strong state (set to false).
|
||||
// - In this scenarion we are using a polling mechanism and set a timestamp to understand if the input is still active.
|
||||
// b. For some newer Hikvision models, Avigilon, events are send only once (if state is set active).
|
||||
// - In this scenario we are creating a new subscription to retrieve the initial (current) state of the inputs and outputs.
|
||||
|
||||
// Get a new pull point address, to get the initiatal state of the inputs and outputs.
|
||||
pullPointAddressInitialState, err := onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
}
|
||||
if pullPointAddressInitialState != "" {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): Fetching events from pullPointAddressInitialState")
|
||||
events, err := onvif.GetEventMessages(device, pullPointAddressInitialState)
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): Completed fetching events from pullPointAddressInitialState")
|
||||
if err == nil && len(events) > 0 {
|
||||
onvifEventsList, err = json.Marshal(events)
|
||||
if err != nil {
|
||||
@@ -307,9 +323,28 @@ loop:
|
||||
} else if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while getting events: " + err.Error())
|
||||
onvifEventsList = []byte("[]")
|
||||
// Try to unsubscribe and subscribe again.
|
||||
onvif.UnsubscribePullPoint(device, pullPointAddress)
|
||||
pullPointAddress, err = onvif.CreatePullPointSubscription(device)
|
||||
} else if len(events) == 0 {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): no events found.")
|
||||
onvifEventsList = []byte("[]")
|
||||
}
|
||||
onvif.UnsubscribePullPoint(device, pullPointAddressInitialState)
|
||||
}
|
||||
|
||||
// We do a second run an a long-living subscription to get the events asynchronously.
|
||||
if pullPointAddressLoopState != "" {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): Fetching events from pullPointAddressLoopState")
|
||||
events, err := onvif.GetEventMessages(device, pullPointAddressLoopState)
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): Completed fetching events from pullPointAddressLoopState")
|
||||
if err == nil && len(events) > 0 {
|
||||
onvifEventsList, err = json.Marshal(events)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while marshalling events: " + err.Error())
|
||||
onvifEventsList = []byte("[]")
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while getting events: " + err.Error())
|
||||
onvifEventsList = []byte("[]")
|
||||
pullPointAddressLoopState, err = onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
}
|
||||
@@ -319,9 +354,55 @@ loop:
|
||||
}
|
||||
} else {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): no pull point address found.")
|
||||
onvifEventsList = []byte("[]")
|
||||
pullPointAddressLoopState, err = onvif.CreatePullPointSubscription(device)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while creating pull point subscription: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// It also might be that events are not supported by the camera, in that case we will try to get the digital inputs and outputs.
|
||||
// Through the `device` API, the `GetDigitalInputs` and `GetDigitalOutputs` functions are called.
|
||||
// The disadvantage of this approach is that we don't have the state of the inputs and outputs (which is crazy..)
|
||||
|
||||
if pullPointAddressInitialState == "" && pullPointAddressLoopState == "" {
|
||||
var events []onvif.ONVIFEvents
|
||||
outputs, err := onvif.GetRelayOutputs(device)
|
||||
if err != nil {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): error while getting relay outputs: " + err.Error())
|
||||
} else {
|
||||
for _, output := range outputs.RelayOutputs {
|
||||
event := onvif.ONVIFEvents{
|
||||
Key: string(output.Token),
|
||||
Value: "false",
|
||||
Type: "output",
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
|
||||
inputs, err := onvif.GetDigitalInputs(device)
|
||||
if err != nil {
|
||||
log.Log.Debug("cloud.HandleHeartBeat(): error while getting digital inputs: " + err.Error())
|
||||
} else {
|
||||
for _, input := range inputs.DigitalInputs {
|
||||
event := onvif.ONVIFEvents{
|
||||
Key: string(input.Token),
|
||||
Value: "false",
|
||||
Type: "input",
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
events = append(events, event)
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal the events
|
||||
onvifEventsList, err = json.Marshal(events)
|
||||
if err != nil {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while marshalling events: " + err.Error())
|
||||
onvifEventsList = []byte("[]")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Log.Error("cloud.HandleHeartBeat(): error while connecting to ONVIF device: " + err.Error())
|
||||
onvifPresetsList = []byte("[]")
|
||||
@@ -389,6 +470,16 @@ loop:
|
||||
hasBackChannel = "true"
|
||||
}
|
||||
|
||||
hub_encryption := "false"
|
||||
if config.HubEncryption == "true" {
|
||||
hub_encryption = "true"
|
||||
}
|
||||
|
||||
e2e_encryption := "false"
|
||||
if config.Encryption != nil && config.Encryption.Enabled == "true" {
|
||||
e2e_encryption = "true"
|
||||
}
|
||||
|
||||
// We will formated the uptime to a human readable format
|
||||
// this will be used on Kerberos Hub: Uptime -> 1 day and 2 hours.
|
||||
uptimeFormatted := uptimeStart.Format("2006-01-02 15:04:05")
|
||||
@@ -405,7 +496,9 @@ loop:
|
||||
|
||||
var object = fmt.Sprintf(`{
|
||||
"key" : "%s",
|
||||
"version" : "3.0.0",
|
||||
"version" : "%s",
|
||||
"hub_encryption": "%s",
|
||||
"e2e_encryption": "%s",
|
||||
"release" : "%s",
|
||||
"cpuid" : "%s",
|
||||
"clouduser" : "%s",
|
||||
@@ -441,11 +534,11 @@ loop:
|
||||
"docker" : true,
|
||||
"kios" : false,
|
||||
"raspberrypi" : false
|
||||
}`, config.Key, system.Version, system.CPUId, username, key, name, isEnterprise, system.Hostname, system.Architecture, system.TotalMemory, system.UsedMemory, system.FreeMemory, system.ProcessUsedMemory, macs, ips, "0", "0", "0", uptimeString, boottimeString, config.HubSite, onvifEnabled, onvifZoom, onvifPanTilt, onvifPresets, onvifPresetsList, onvifEventsList, cameraConnected, hasBackChannel)
|
||||
}`, config.Key, kerberosAgentVersion, hub_encryption, e2e_encryption, system.Version, system.CPUId, username, key, name, isEnterprise, system.Hostname, system.Architecture, system.TotalMemory, system.UsedMemory, system.FreeMemory, system.ProcessUsedMemory, macs, ips, "0", "0", "0", uptimeString, boottimeString, config.HubSite, onvifEnabled, onvifZoom, onvifPanTilt, onvifPresets, onvifPresetsList, onvifEventsList, cameraConnected, hasBackChannel)
|
||||
|
||||
// Get the private key to encrypt the data using symmetric encryption: AES.
|
||||
privateKey := config.HubPrivateKey
|
||||
if privateKey != "" {
|
||||
if hub_encryption == "true" && privateKey != "" {
|
||||
// Encrypt the data using AES.
|
||||
encrypted, err := encryption.AesEncrypt([]byte(object), privateKey)
|
||||
if err != nil {
|
||||
@@ -486,11 +579,13 @@ loop:
|
||||
// If we have a Kerberos Vault connected, we will also send some analytics
|
||||
// to that service.
|
||||
vaultURI = config.KStorage.URI
|
||||
if vaultURI != "" {
|
||||
accessKey := config.KStorage.AccessKey
|
||||
secretAccessKey := config.KStorage.SecretAccessKey
|
||||
if vaultURI != "" && accessKey != "" && secretAccessKey != "" {
|
||||
|
||||
var object = fmt.Sprintf(`{
|
||||
"key" : "%s",
|
||||
"version" : "3.0.0",
|
||||
"version" : "%s",
|
||||
"release" : "%s",
|
||||
"cpuid" : "%s",
|
||||
"clouduser" : "%s",
|
||||
@@ -524,7 +619,7 @@ loop:
|
||||
"docker" : true,
|
||||
"kios" : false,
|
||||
"raspberrypi" : false
|
||||
}`, config.Key, system.Version, system.CPUId, username, key, name, isEnterprise, system.Hostname, system.Architecture, system.TotalMemory, system.UsedMemory, system.FreeMemory, system.ProcessUsedMemory, macs, ips, "0", "0", "0", uptimeString, boottimeString, config.HubSite, onvifEnabled, onvifZoom, onvifPanTilt, onvifPresets, onvifPresetsList, cameraConnected)
|
||||
}`, config.Key, kerberosAgentVersion, system.Version, system.CPUId, username, key, name, isEnterprise, system.Hostname, system.Architecture, system.TotalMemory, system.UsedMemory, system.FreeMemory, system.ProcessUsedMemory, macs, ips, "0", "0", "0", uptimeString, boottimeString, config.HubSite, onvifEnabled, onvifZoom, onvifPanTilt, onvifPresets, onvifPresetsList, cameraConnected)
|
||||
|
||||
var jsonStr = []byte(object)
|
||||
buffy := bytes.NewBuffer(jsonStr)
|
||||
@@ -552,11 +647,11 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
if pullPointAddress != "" {
|
||||
if pullPointAddressLoopState != "" {
|
||||
cameraConfiguration := configuration.Config.Capture.IPCamera
|
||||
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
if err == nil {
|
||||
onvif.UnsubscribePullPoint(device, pullPointAddress)
|
||||
if err != nil {
|
||||
onvif.UnsubscribePullPoint(device, pullPointAddressLoopState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -577,6 +672,7 @@ func HandleLiveStreamSD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
// Check if we need to enable the live stream
|
||||
if config.Capture.Liveview != "false" {
|
||||
|
||||
deviceId := config.Key
|
||||
hubKey := ""
|
||||
if config.Cloud == "s3" && config.S3 != nil && config.S3.Publickey != "" {
|
||||
hubKey = config.S3.Publickey
|
||||
@@ -610,25 +706,79 @@ func HandleLiveStreamSD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): Sending base64 encoded images to MQTT.")
|
||||
img, err := rtspClient.DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
encoded := base64.StdEncoding.EncodeToString(bytes)
|
||||
imageResized, _ := utils.ResizeImage(&img, uint(config.Capture.IPCamera.BaseWidth), uint(config.Capture.IPCamera.BaseHeight))
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["image"] = encoded
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: configuration.Config.Key,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
chunking := config.Capture.LiveviewChunking
|
||||
|
||||
if chunking == "true" {
|
||||
|
||||
// Split encoded image into chunks of 2kb
|
||||
// This is to prevent the MQTT message to be too large.
|
||||
// By default, bytes are not encoded to base64 here; you are splitting the raw JPEG/PNG bytes.
|
||||
// However, in MQTT and web contexts, binary data may not be handled well, so base64 is often used.
|
||||
// To avoid base64 encoding, just send the raw []byte chunks as you do here.
|
||||
// If you want to avoid base64, make sure the receiver can handle binary payloads.
|
||||
|
||||
chunkSize := 25 * 1024 // 25KB chunks
|
||||
var chunks [][]byte
|
||||
for i := 0; i < len(bytes); i += chunkSize {
|
||||
end := i + chunkSize
|
||||
if end > len(bytes) {
|
||||
end = len(bytes)
|
||||
}
|
||||
chunk := bytes[i:end]
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
log.Log.Infof("cloud.HandleLiveStreamSD(): Sending %d chunks of size %d bytes.", len(chunks), chunkSize)
|
||||
|
||||
timestamp := time.Now().Unix()
|
||||
for i, chunk := range chunks {
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["id"] = timestamp
|
||||
valueMap["chunk"] = chunk
|
||||
valueMap["chunkIndex"] = i
|
||||
valueMap["chunkSize"] = chunkSize
|
||||
valueMap["chunkCount"] = len(chunks)
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Version: "v1.0.0",
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: deviceId,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey+"/"+deviceId, 1, false, payload)
|
||||
log.Log.Infof("cloud.HandleLiveStreamSD(): sent chunk %d/%d to MQTT topic kerberos/hub/%s/%s", i+1, len(chunks), hubKey, deviceId)
|
||||
time.Sleep(33 * time.Millisecond) // Sleep to avoid flooding the MQTT broker with messages
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["image"] = bytes
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: configuration.Config.Key,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
time.Sleep(1000 * time.Millisecond) // Sleep to avoid flooding the MQTT broker with messages
|
||||
}
|
||||
|
||||
} else {
|
||||
@@ -654,6 +804,12 @@ func HandleLiveStreamHD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
streams, _ := rtspClient.GetStreams()
|
||||
videoTrack := webrtc.NewVideoTrack(streams)
|
||||
audioTrack := webrtc.NewAudioTrack(streams)
|
||||
|
||||
if videoTrack == nil && audioTrack == nil {
|
||||
log.Log.Error("cloud.HandleLiveStreamHD(): failed to create both video and audio tracks")
|
||||
return
|
||||
}
|
||||
|
||||
go webrtc.WriteToTrack(livestreamCursor, configuration, communication, mqttClient, videoTrack, audioTrack, rtspClient)
|
||||
|
||||
if config.Capture.ForwardWebRTC == "true" {
|
||||
@@ -672,6 +828,79 @@ func HandleLiveStreamHD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
}
|
||||
}
|
||||
|
||||
func HandleRealtimeProcessing(processingCursor *packets.QueueCursor, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, rtspClient capture.RTSPClient) {
|
||||
|
||||
log.Log.Debug("cloud.RealtimeProcessing(): started")
|
||||
|
||||
config := configuration.Config
|
||||
|
||||
// If offline made is enabled, we will stop the thread.
|
||||
if config.Offline == "true" {
|
||||
log.Log.Debug("cloud.RealtimeProcessing(): stopping as Offline is enabled.")
|
||||
} else {
|
||||
|
||||
// Check if we need to enable the realtime processing
|
||||
if config.RealtimeProcessing == "true" {
|
||||
|
||||
hubKey := ""
|
||||
if config.Cloud == "s3" && config.S3 != nil && config.S3.Publickey != "" {
|
||||
hubKey = config.S3.Publickey
|
||||
} else if config.Cloud == "kstorage" && config.KStorage != nil && config.KStorage.CloudKey != "" {
|
||||
hubKey = config.KStorage.CloudKey
|
||||
}
|
||||
// This is the new way ;)
|
||||
if config.HubKey != "" {
|
||||
hubKey = config.HubKey
|
||||
}
|
||||
|
||||
// We will publish the keyframes to the MQTT topic.
|
||||
realtimeProcessingTopic := "kerberos/keyframes/" + hubKey
|
||||
if config.RealtimeProcessingTopic != "" {
|
||||
realtimeProcessingTopic = config.RealtimeProcessingTopic
|
||||
}
|
||||
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
|
||||
for cursorError == nil {
|
||||
pkt, cursorError = processingCursor.ReadPacket()
|
||||
if len(pkt.Data) == 0 || !pkt.IsKeyFrame {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Log.Info("cloud.RealtimeProcessing(): Sending base64 encoded images to MQTT.")
|
||||
img, err := rtspClient.DecodePacket(pkt)
|
||||
if err == nil {
|
||||
imageResized, _ := utils.ResizeImage(&img, uint(config.Capture.IPCamera.BaseWidth), uint(config.Capture.IPCamera.BaseHeight))
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
encoded := base64.StdEncoding.EncodeToString(bytes)
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["image"] = encoded
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Action: "receive-keyframe",
|
||||
DeviceId: configuration.Config.Key,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish(realtimeProcessingTopic, 0, false, payload)
|
||||
} else {
|
||||
log.Log.Info("cloud.RealtimeProcessing(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
log.Log.Debug("cloud.RealtimeProcessing(): stopping as Liveview is disabled.")
|
||||
}
|
||||
}
|
||||
|
||||
log.Log.Debug("cloud.HandleLiveStreamSD(): finished")
|
||||
}
|
||||
|
||||
// VerifyHub godoc
|
||||
// @Router /api/hub/verify [post]
|
||||
// @ID verify-hub
|
||||
@@ -995,3 +1224,184 @@ func VerifyPersistence(c *gin.Context, configDirectory string) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// VerifySecondaryPersistence godoc
|
||||
// @Router /api/persistence/secondary/verify [post]
|
||||
// @ID verify-persistence
|
||||
// @Security Bearer
|
||||
// @securityDefinitions.apikey Bearer
|
||||
// @in header
|
||||
// @name Authorization
|
||||
// @Tags persistence
|
||||
// @Param config body models.Config true "Config"
|
||||
// @Summary Will verify the secondary persistence.
|
||||
// @Description Will verify the secondary persistence.
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func VerifySecondaryPersistence(c *gin.Context, configDirectory string) {
|
||||
|
||||
var config models.Config
|
||||
err := c.BindJSON(&config)
|
||||
if err != nil || config.Cloud != "" {
|
||||
|
||||
if config.Cloud == "kstorage" || config.Cloud == "kerberosvault" {
|
||||
|
||||
if config.KStorageSecondary == nil {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): please fill-in the required Kerberos Vault credentials."
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
|
||||
} else {
|
||||
|
||||
uri := config.KStorageSecondary.URI
|
||||
accessKey := config.KStorageSecondary.AccessKey
|
||||
secretAccessKey := config.KStorageSecondary.SecretAccessKey
|
||||
directory := config.KStorageSecondary.Directory
|
||||
provider := config.KStorageSecondary.Provider
|
||||
|
||||
if err == nil && uri != "" && accessKey != "" && secretAccessKey != "" {
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", uri+"/ping", nil)
|
||||
if err == nil {
|
||||
req.Header.Add("X-Kerberos-Storage-AccessKey", accessKey)
|
||||
req.Header.Add("X-Kerberos-Storage-SecretAccessKey", secretAccessKey)
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if err == nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
|
||||
if provider != "" || directory != "" {
|
||||
|
||||
// Generate a random name.
|
||||
timestamp := time.Now().Unix()
|
||||
fileName := strconv.FormatInt(timestamp, 10) +
|
||||
"_6-967003_" + config.Name + "_200-200-400-400_24_769.mp4"
|
||||
|
||||
// Open test-480p.mp4
|
||||
file, err := os.Open(configDirectory + "/data/test-480p.mp4")
|
||||
if err != nil {
|
||||
msg := "cloud.VerifyPersistence(kerberosvault): error reading test-480p.mp4: " + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
req, err := http.NewRequest("POST", uri+"/storage", file)
|
||||
if err == nil {
|
||||
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", config.HubKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", accessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", secretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", directory)
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
|
||||
if err == nil {
|
||||
if resp != nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Upload allowed using the credentials provided (" + accessKey + ", " + secretAccessKey + ")"
|
||||
log.Log.Info(msg)
|
||||
c.JSON(200, models.APIResponse{
|
||||
Data: body,
|
||||
})
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while verifying your persistence settings. Make sure your provider is the same as the storage provider in your Kerberos Vault, and the relevant storage provider is configured properly."
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Upload of fake recording failed: " + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while creating /storage POST request." + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Provider and/or directory is missing from the request."
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while verifying storage credentials: " + string(body)
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while verifying storage credentials:" + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): Something went wrong while verifying storage credentials:" + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(kerberosvault): please fill-in the required Kerberos Vault credentials."
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
msg := "cloud.VerifySecondaryPersistence(): No persistence was specified, so do not know what to verify:" + err.Error()
|
||||
log.Log.Error(msg)
|
||||
c.JSON(400, models.APIResponse{
|
||||
Data: msg,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,14 +3,20 @@ package cloud
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
)
|
||||
|
||||
// We will count the number of retries we have done.
|
||||
// If we have done more than "kstorageRetryPolicy" retries, we will stop, and start sending to the secondary storage.
|
||||
var kstorageRetryCount = 0
|
||||
var kstorageRetryTimeout = time.Now().Unix()
|
||||
|
||||
func UploadKerberosVault(configuration *models.Configuration, fileName string) (bool, bool, error) {
|
||||
|
||||
config := configuration.Config
|
||||
@@ -19,7 +25,7 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string) (
|
||||
config.KStorage.SecretAccessKey == "" ||
|
||||
config.KStorage.Directory == "" ||
|
||||
config.KStorage.URI == "" {
|
||||
err := "UploadKerberosVault: Kerberos Vault not properly configured."
|
||||
err := "UploadKerberosVault: Kerberos Vault not properly configured"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
@@ -42,64 +48,147 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string) (
|
||||
defer file.Close()
|
||||
}
|
||||
if err != nil {
|
||||
err := "UploadKerberosVault: Upload Failed, file doesn't exists anymore."
|
||||
err := "UploadKerberosVault: Upload Failed, file doesn't exists anymore"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
|
||||
publicKey := config.KStorage.CloudKey
|
||||
// This is the new way ;)
|
||||
if config.HubKey != "" {
|
||||
publicKey = config.HubKey
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", config.KStorage.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault: error reading request, " + config.KStorage.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", config.KStorage.AccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", config.KStorage.SecretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", config.KStorage.Provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorage.Directory)
|
||||
// We need to check if we are in a retry timeout.
|
||||
if kstorageRetryTimeout <= time.Now().Unix() {
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
req, err := http.NewRequest("POST", config.KStorage.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault: error reading request, " + config.KStorage.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", config.KStorage.AccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", config.KStorage.SecretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", config.KStorage.Provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorage.Directory)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
log.Log.Info("UploadKerberosVault: Upload Finished, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + resp.Status + ", " + string(body))
|
||||
return false, true, nil
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if resp != nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
kstorageRetryCount = 0
|
||||
log.Log.Info("UploadKerberosVault: Upload Finished, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
// We increase the retry count, and set the timeout.
|
||||
// If we have reached the retry policy, we set the timeout.
|
||||
// This means we will not retry for the next 5 minutes.
|
||||
if kstorageRetryCount < config.KStorage.MaxRetries {
|
||||
kstorageRetryCount = (kstorageRetryCount + 1)
|
||||
}
|
||||
if kstorageRetryCount == config.KStorage.MaxRetries {
|
||||
kstorageRetryTimeout = time.Now().Add(time.Duration(config.KStorage.Timeout) * time.Second).Unix()
|
||||
}
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + resp.Status + ", " + string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// We might need to check if we can upload to our secondary storage.
|
||||
if config.KStorageSecondary.AccessKey == "" ||
|
||||
config.KStorageSecondary.SecretAccessKey == "" ||
|
||||
config.KStorageSecondary.Directory == "" ||
|
||||
config.KStorageSecondary.URI == "" {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Secondary Kerberos Vault not properly configured.")
|
||||
} else {
|
||||
|
||||
if kstorageRetryCount < config.KStorage.MaxRetries {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Do not upload to secondary storage, we are still in retry policy.")
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Uploading to Secondary Kerberos Vault (" + config.KStorageSecondary.URI + ")")
|
||||
|
||||
file, err = os.OpenFile(fullname, os.O_RDWR, 0755)
|
||||
if file != nil {
|
||||
defer file.Close()
|
||||
}
|
||||
if err != nil {
|
||||
err := "UploadKerberosVault (Secondary): Upload Failed, file doesn't exists anymore"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", config.KStorageSecondary.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault (Secondary): error reading request, " + config.KStorageSecondary.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", config.KStorageSecondary.AccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", config.KStorageSecondary.SecretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", config.KStorageSecondary.Provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorageSecondary.Directory)
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if resp != nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Upload Finished to secondary, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Upload Failed to secondary, " + resp.Status + ", " + string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
errorMessage := "UploadKerberosVault: Upload Failed, " + err.Error()
|
||||
log.Log.Info(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
"github.com/gin-gonic/gin"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
"github.com/kerberos-io/agent/machinery/src/capture"
|
||||
"github.com/kerberos-io/agent/machinery/src/cloud"
|
||||
@@ -23,9 +24,15 @@ import (
|
||||
"github.com/tevino/abool"
|
||||
)
|
||||
|
||||
func Bootstrap(configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
var tracer = otel.Tracer("github.com/kerberos-io/agent/machinery/src/components")
|
||||
|
||||
func Bootstrap(ctx context.Context, configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
|
||||
log.Log.Debug("components.Kerberos.Bootstrap(): bootstrapping the kerberos agent.")
|
||||
|
||||
bootstrapContext := context.Background()
|
||||
_, span := tracer.Start(bootstrapContext, "Bootstrap")
|
||||
|
||||
// We will keep track of the Kerberos Agent up time
|
||||
// This is send to Kerberos Hub in a heartbeat.
|
||||
uptimeStart := time.Now()
|
||||
@@ -36,12 +43,20 @@ func Bootstrap(configDirectory string, configuration *models.Configuration, comm
|
||||
packageCounter.Store(int64(0))
|
||||
communication.PackageCounter = &packageCounter
|
||||
|
||||
var packageCounterSub atomic.Value
|
||||
packageCounterSub.Store(int64(0))
|
||||
communication.PackageCounterSub = &packageCounterSub
|
||||
|
||||
// This is used when the last packet was received (timestamp),
|
||||
// this metric is used to determine if the camera is still online/connected.
|
||||
var lastPacketTimer atomic.Value
|
||||
packageCounter.Store(int64(0))
|
||||
communication.LastPacketTimer = &lastPacketTimer
|
||||
|
||||
var lastPacketTimerSub atomic.Value
|
||||
packageCounterSub.Store(int64(0))
|
||||
communication.LastPacketTimerSub = &lastPacketTimerSub
|
||||
|
||||
// This is used to understand if we have a working Kerberos Hub connection
|
||||
// cloudTimestamp will be updated when successfully sending heartbeats.
|
||||
var cloudTimestamp atomic.Value
|
||||
@@ -70,6 +85,8 @@ func Bootstrap(configDirectory string, configuration *models.Configuration, comm
|
||||
// Configure a MQTT client which helps for a bi-directional communication
|
||||
mqttClient := routers.ConfigureMQTT(configDirectory, configuration, communication)
|
||||
|
||||
span.End()
|
||||
|
||||
// Run the agent and fire up all the other
|
||||
// goroutines which do image capture, motion detection, onvif, etc.
|
||||
for {
|
||||
@@ -106,6 +123,9 @@ func Bootstrap(configDirectory string, configuration *models.Configuration, comm
|
||||
|
||||
func RunAgent(configDirectory string, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, uptimeStart time.Time, cameraSettings *models.Camera, captureDevice *capture.Capture) string {
|
||||
|
||||
ctx := context.Background()
|
||||
ctxRunAgent, span := tracer.Start(ctx, "RunAgent")
|
||||
|
||||
log.Log.Info("components.Kerberos.RunAgent(): Creating camera and processing threads.")
|
||||
config := configuration.Config
|
||||
|
||||
@@ -116,10 +136,10 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
rtspUrl := config.Capture.IPCamera.RTSP
|
||||
rtspClient := captureDevice.SetMainClient(rtspUrl)
|
||||
if rtspUrl != "" {
|
||||
err := rtspClient.Connect(context.Background())
|
||||
err := rtspClient.Connect(ctx, ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error connecting to RTSP stream: " + err.Error())
|
||||
rtspClient.Close()
|
||||
rtspClient.Close(ctxRunAgent)
|
||||
rtspClient = nil
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
@@ -137,7 +157,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
videoStreams, err := rtspClient.GetVideoStreams()
|
||||
if err != nil || len(videoStreams) == 0 {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): no video stream found, might be the wrong codec (we only support H264 for the moment)")
|
||||
rtspClient.Close()
|
||||
rtspClient.Close(ctxRunAgent)
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
}
|
||||
@@ -153,6 +173,27 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
configuration.Config.Capture.IPCamera.Width = width
|
||||
configuration.Config.Capture.IPCamera.Height = height
|
||||
|
||||
// Set the liveview width and height, this is used for the liveview and motion regions (drawing on the hub).
|
||||
baseWidth := config.Capture.IPCamera.BaseWidth
|
||||
baseHeight := config.Capture.IPCamera.BaseHeight
|
||||
// If the liveview height is not set, we will calculate it based on the width and aspect ratio of the camera.
|
||||
if baseWidth > 0 && baseHeight == 0 {
|
||||
widthAspectRatio := float64(baseWidth) / float64(width)
|
||||
configuration.Config.Capture.IPCamera.BaseHeight = int(float64(height) * widthAspectRatio)
|
||||
} else if baseHeight > 0 && baseWidth > 0 {
|
||||
configuration.Config.Capture.IPCamera.BaseHeight = baseHeight
|
||||
configuration.Config.Capture.IPCamera.BaseWidth = baseWidth
|
||||
} else {
|
||||
configuration.Config.Capture.IPCamera.BaseHeight = height
|
||||
configuration.Config.Capture.IPCamera.BaseWidth = width
|
||||
}
|
||||
|
||||
// Set the SPS and PPS values in the configuration.
|
||||
configuration.Config.Capture.IPCamera.SPSNALUs = [][]byte{videoStream.SPS}
|
||||
configuration.Config.Capture.IPCamera.PPSNALUs = [][]byte{videoStream.PPS}
|
||||
configuration.Config.Capture.IPCamera.VPSNALUs = [][]byte{videoStream.VPS}
|
||||
|
||||
// Define queues for the main and sub stream.
|
||||
var queue *packets.Queue
|
||||
var subQueue *packets.Queue
|
||||
|
||||
@@ -174,19 +215,19 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
rtspSubClient := captureDevice.SetSubClient(subRtspUrl)
|
||||
captureDevice.RTSPSubClient = rtspSubClient
|
||||
|
||||
err := rtspSubClient.Connect(context.Background())
|
||||
err := rtspSubClient.Connect(ctx, ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error connecting to RTSP sub stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
}
|
||||
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP sub stream: " + rtspUrl)
|
||||
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP sub stream: " + subRtspUrl)
|
||||
|
||||
// Get the video streams from the RTSP server.
|
||||
videoSubStreams, err = rtspSubClient.GetVideoStreams()
|
||||
if err != nil || len(videoSubStreams) == 0 {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): no video sub stream found, might be the wrong codec (we only support H264 for the moment)")
|
||||
rtspSubClient.Close()
|
||||
rtspSubClient.Close(ctxRunAgent)
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
}
|
||||
@@ -198,42 +239,24 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
height := videoSubStream.Height
|
||||
|
||||
// Set config values as well
|
||||
configuration.Config.Capture.IPCamera.Width = width
|
||||
configuration.Config.Capture.IPCamera.Height = height
|
||||
}
|
||||
configuration.Config.Capture.IPCamera.SubWidth = width
|
||||
configuration.Config.Capture.IPCamera.SubHeight = height
|
||||
|
||||
if cameraSettings.RTSP != rtspUrl ||
|
||||
cameraSettings.SubRTSP != subRtspUrl ||
|
||||
cameraSettings.Width != width ||
|
||||
cameraSettings.Height != height {
|
||||
|
||||
// TODO: this condition is used to reset the decoder when the camera settings change.
|
||||
// The main idea is that you only set the decoder once, and then reuse it on each restart (no new memory allocation).
|
||||
// However the stream settings of the camera might have been changed, and so the decoder might need to be reloaded.
|
||||
// .... Not used for the moment ....
|
||||
|
||||
if cameraSettings.RTSP != "" && cameraSettings.SubRTSP != "" && cameraSettings.Initialized {
|
||||
//decoder.Close()
|
||||
//if subStreamEnabled {
|
||||
// subDecoder.Close()
|
||||
//}
|
||||
// If we have a substream, we need to set the width and height of the substream. (so we will override above information)
|
||||
// Set the liveview width and height, this is used for the liveview and motion regions (drawing on the hub).
|
||||
baseWidth := config.Capture.IPCamera.BaseWidth
|
||||
baseHeight := config.Capture.IPCamera.BaseHeight
|
||||
// If the liveview height is not set, we will calculate it based on the width and aspect ratio of the camera.
|
||||
if baseWidth > 0 && baseHeight == 0 {
|
||||
widthAspectRatio := float64(baseWidth) / float64(width)
|
||||
configuration.Config.Capture.IPCamera.BaseHeight = int(float64(height) * widthAspectRatio)
|
||||
} else if baseHeight > 0 && baseWidth > 0 {
|
||||
configuration.Config.Capture.IPCamera.BaseHeight = baseHeight
|
||||
configuration.Config.Capture.IPCamera.BaseWidth = baseWidth
|
||||
} else {
|
||||
configuration.Config.Capture.IPCamera.BaseHeight = height
|
||||
configuration.Config.Capture.IPCamera.BaseWidth = width
|
||||
}
|
||||
|
||||
// At some routines we will need to decode the image.
|
||||
// Make sure its properly locked as we only have a single decoder.
|
||||
log.Log.Info("components.Kerberos.RunAgent(): camera settings changed, reloading decoder")
|
||||
//capture.GetVideoDecoder(decoder, streams)
|
||||
//if subStreamEnabled {
|
||||
// capture.GetVideoDecoder(subDecoder, subStreams)
|
||||
//}
|
||||
|
||||
cameraSettings.RTSP = rtspUrl
|
||||
cameraSettings.SubRTSP = subRtspUrl
|
||||
cameraSettings.Width = width
|
||||
cameraSettings.Height = height
|
||||
cameraSettings.Initialized = true
|
||||
} else {
|
||||
log.Log.Info("components.Kerberos.RunAgent(): camera settings did not change, keeping decoder")
|
||||
}
|
||||
|
||||
// We are creating a queue to store the RTSP frames in, these frames will be
|
||||
@@ -243,25 +266,31 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
// Set the maximum GOP count, this is used to determine the pre-recording time.
|
||||
log.Log.Info("components.Kerberos.RunAgent(): SetMaxGopCount was set with: " + strconv.Itoa(int(config.Capture.PreRecording)+1))
|
||||
queue.SetMaxGopCount(int(config.Capture.PreRecording) + 1) // GOP time frame is set to prerecording (we'll add 2 gops to leave some room).
|
||||
queue.SetMaxGopCount(1) // We will adjust this later on, when we have the GOP size.
|
||||
queue.WriteHeader(videoStreams)
|
||||
go rtspClient.Start(context.Background(), queue, configuration, communication)
|
||||
go rtspClient.Start(ctx, "main", queue, configuration, communication)
|
||||
|
||||
// Main stream is connected and ready to go.
|
||||
communication.MainStreamConnected = true
|
||||
|
||||
// Try to create backchannel
|
||||
rtspBackChannelClient := captureDevice.SetBackChannelClient(rtspUrl)
|
||||
err = rtspBackChannelClient.ConnectBackChannel(context.Background())
|
||||
err = rtspBackChannelClient.ConnectBackChannel(ctx, ctxRunAgent)
|
||||
if err == nil {
|
||||
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP backchannel stream: " + rtspUrl)
|
||||
go rtspBackChannelClient.StartBackChannel(context.Background())
|
||||
go rtspBackChannelClient.StartBackChannel(ctx, ctxRunAgent)
|
||||
}
|
||||
|
||||
rtspSubClient := captureDevice.RTSPSubClient
|
||||
if subStreamEnabled && rtspSubClient != nil {
|
||||
subQueue = packets.NewQueue()
|
||||
communication.SubQueue = subQueue
|
||||
subQueue.SetMaxGopCount(1) // GOP time frame is set to prerecording (we'll add 2 gops to leave some room).
|
||||
subQueue.SetMaxGopCount(1) // GOP time frame is set to 1 for motion detection and livestreaming.
|
||||
subQueue.WriteHeader(videoSubStreams)
|
||||
go rtspSubClient.Start(context.Background(), subQueue, configuration, communication)
|
||||
go rtspSubClient.Start(ctx, "sub", subQueue, configuration, communication)
|
||||
|
||||
// Sub stream is connected and ready to go.
|
||||
communication.SubStreamConnected = true
|
||||
}
|
||||
|
||||
// Handle livestream SD (low resolution over MQTT)
|
||||
@@ -274,7 +303,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
}
|
||||
|
||||
// Handle livestream HD (high resolution over WEBRTC)
|
||||
communication.HandleLiveHDHandshake = make(chan models.RequestHDStreamPayload, 1)
|
||||
communication.HandleLiveHDHandshake = make(chan models.RequestHDStreamPayload, 10)
|
||||
if subStreamEnabled {
|
||||
livestreamHDCursor := subQueue.Latest()
|
||||
go cloud.HandleLiveStreamHD(livestreamHDCursor, configuration, communication, mqttClient, rtspSubClient)
|
||||
@@ -287,7 +316,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
go capture.HandleRecordStream(queue, configDirectory, configuration, communication, rtspClient)
|
||||
|
||||
// Handle processing of motion
|
||||
communication.HandleMotion = make(chan models.MotionDataPartial, 1)
|
||||
communication.HandleMotion = make(chan models.MotionDataPartial, 10)
|
||||
if subStreamEnabled {
|
||||
motionCursor := subQueue.Latest()
|
||||
go computervision.ProcessMotion(motionCursor, configuration, communication, mqttClient, rtspSubClient)
|
||||
@@ -296,14 +325,23 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
go computervision.ProcessMotion(motionCursor, configuration, communication, mqttClient, rtspClient)
|
||||
}
|
||||
|
||||
// Handle realtime processing if enabled.
|
||||
if subStreamEnabled {
|
||||
realtimeProcessingCursor := subQueue.Latest()
|
||||
go cloud.HandleRealtimeProcessing(realtimeProcessingCursor, configuration, communication, mqttClient, rtspClient)
|
||||
} else {
|
||||
realtimeProcessingCursor := queue.Latest()
|
||||
go cloud.HandleRealtimeProcessing(realtimeProcessingCursor, configuration, communication, mqttClient, rtspClient)
|
||||
}
|
||||
|
||||
// Handle Upload to cloud provider (Kerberos Hub, Kerberos Vault and others)
|
||||
go cloud.HandleUpload(configDirectory, configuration, communication)
|
||||
|
||||
// Handle ONVIF actions
|
||||
communication.HandleONVIF = make(chan models.OnvifAction, 1)
|
||||
communication.HandleONVIF = make(chan models.OnvifAction, 10)
|
||||
go onvif.HandleONVIFActions(configuration, communication)
|
||||
|
||||
communication.HandleAudio = make(chan models.AudioDataPartial, 1)
|
||||
communication.HandleAudio = make(chan models.AudioDataPartial, 10)
|
||||
if rtspBackChannelClient.HasBackChannel {
|
||||
communication.HasBackChannel = true
|
||||
go WriteAudioToBackchannel(communication, rtspBackChannelClient)
|
||||
@@ -312,6 +350,9 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
// If we reach this point, we have a working RTSP connection.
|
||||
communication.CameraConnected = true
|
||||
|
||||
// Otel end span
|
||||
span.End()
|
||||
|
||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
// This will go into a blocking state, once this channel is triggered
|
||||
// the agent will cleanup and restart.
|
||||
@@ -320,6 +361,8 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
// If we reach this point, we are stopping the stream.
|
||||
communication.CameraConnected = false
|
||||
communication.MainStreamConnected = false
|
||||
communication.SubStreamConnected = false
|
||||
|
||||
// Cancel the main context, this will stop all the other goroutines.
|
||||
(*communication.CancelContext)()
|
||||
@@ -332,9 +375,20 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
// Here we are cleaning up everything!
|
||||
if configuration.Config.Offline != "true" {
|
||||
communication.HandleUpload <- "stop"
|
||||
select {
|
||||
case communication.HandleUpload <- "stop":
|
||||
log.Log.Info("components.Kerberos.RunAgent(): stopping upload")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.RunAgent(): stopping upload timed out")
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case communication.HandleStream <- "stop":
|
||||
log.Log.Info("components.Kerberos.RunAgent(): stopping stream")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.RunAgent(): stopping stream timed out")
|
||||
}
|
||||
communication.HandleStream <- "stop"
|
||||
// We use the steam channel to stop both main and sub stream.
|
||||
//if subStreamEnabled {
|
||||
// communication.HandleSubStream <- "stop"
|
||||
@@ -342,7 +396,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
err = rtspClient.Close()
|
||||
err = rtspClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
@@ -354,7 +408,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
communication.Queue = nil
|
||||
|
||||
if subStreamEnabled {
|
||||
err = rtspSubClient.Close()
|
||||
err = rtspSubClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP sub stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
@@ -365,7 +419,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
communication.SubQueue = nil
|
||||
}
|
||||
|
||||
err = rtspBackChannelClient.Close()
|
||||
err = rtspBackChannelClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP backchannel stream: " + err.Error())
|
||||
}
|
||||
@@ -397,14 +451,19 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
func ControlAgent(communication *models.Communication) {
|
||||
log.Log.Debug("components.Kerberos.ControlAgent(): started")
|
||||
packageCounter := communication.PackageCounter
|
||||
packageSubCounter := communication.PackageCounterSub
|
||||
go func() {
|
||||
// A channel to check the camera activity
|
||||
var previousPacket int64 = 0
|
||||
var previousPacketSub int64 = 0
|
||||
var occurence = 0
|
||||
var occurenceSub = 0
|
||||
for {
|
||||
|
||||
// If camera is connected, we'll check if we are still receiving packets.
|
||||
if communication.CameraConnected {
|
||||
|
||||
// First we'll check the main stream.
|
||||
packetsR := packageCounter.Load().(int64)
|
||||
if packetsR == previousPacket {
|
||||
// If we are already reconfiguring,
|
||||
@@ -416,16 +475,49 @@ func ControlAgent(communication *models.Communication) {
|
||||
occurence = 0
|
||||
}
|
||||
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Number of packets read " + strconv.FormatInt(packetsR, 10))
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Number of packets read from mainstream: " + strconv.FormatInt(packetsR, 10))
|
||||
|
||||
// After 15 seconds without activity this is thrown..
|
||||
if occurence == 3 {
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery.")
|
||||
communication.HandleBootstrap <- "restart"
|
||||
time.Sleep(2 * time.Second)
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking mainstream.")
|
||||
select {
|
||||
case communication.HandleBootstrap <- "restart":
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream timed out")
|
||||
}
|
||||
occurence = 0
|
||||
}
|
||||
|
||||
// Now we'll check the sub stream.
|
||||
packetsSubR := packageSubCounter.Load().(int64)
|
||||
if communication.SubStreamConnected {
|
||||
if packetsSubR == previousPacketSub {
|
||||
// If we are already reconfiguring,
|
||||
// we dont need to check if the stream is blocking.
|
||||
if !communication.IsConfiguring.IsSet() {
|
||||
occurenceSub = occurenceSub + 1
|
||||
}
|
||||
} else {
|
||||
occurenceSub = 0
|
||||
}
|
||||
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Number of packets read from substream: " + strconv.FormatInt(packetsSubR, 10))
|
||||
|
||||
// After 15 seconds without activity this is thrown..
|
||||
if occurenceSub == 3 {
|
||||
select {
|
||||
case communication.HandleBootstrap <- "restart":
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream timed out")
|
||||
}
|
||||
occurenceSub = 0
|
||||
}
|
||||
}
|
||||
|
||||
previousPacket = packageCounter.Load().(int64)
|
||||
previousPacketSub = packageSubCounter.Load().(int64)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
@@ -556,7 +648,12 @@ func GetDays(c *gin.Context, configDirectory string, configuration *models.Confi
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func StopAgent(c *gin.Context, communication *models.Communication) {
|
||||
log.Log.Info("components.Kerberos.StopAgent(): sending signal to stop agent, this will os.Exit(0).")
|
||||
communication.HandleBootstrap <- "stop"
|
||||
select {
|
||||
case communication.HandleBootstrap <- "stop":
|
||||
log.Log.Info("components.Kerberos.StopAgent(): Stopping machinery.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.StopAgent(): Stopping machinery timed out")
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"stopped": true,
|
||||
})
|
||||
@@ -571,7 +668,12 @@ func StopAgent(c *gin.Context, communication *models.Communication) {
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func RestartAgent(c *gin.Context, communication *models.Communication) {
|
||||
log.Log.Info("components.Kerberos.RestartAgent(): sending signal to restart agent.")
|
||||
communication.HandleBootstrap <- "restart"
|
||||
select {
|
||||
case communication.HandleBootstrap <- "restart":
|
||||
log.Log.Info("components.Kerberos.RestartAgent(): Restarting machinery.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("components.Kerberos.RestartAgent(): Restarting machinery timed out")
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"restarted": true,
|
||||
})
|
||||
@@ -605,7 +707,7 @@ func MakeRecording(c *gin.Context, communication *models.Communication) {
|
||||
// @Success 200
|
||||
func GetSnapshotBase64(c *gin.Context, captureDevice *capture.Capture, configuration *models.Configuration, communication *models.Communication) {
|
||||
// We'll try to get a snapshot from the camera.
|
||||
base64Image := capture.Base64Image(captureDevice, communication)
|
||||
base64Image := capture.Base64Image(captureDevice, communication, configuration)
|
||||
if base64Image != "" {
|
||||
communication.Image = base64Image
|
||||
}
|
||||
@@ -627,7 +729,8 @@ func GetSnapshotRaw(c *gin.Context, captureDevice *capture.Capture, configuratio
|
||||
image := capture.JpegImage(captureDevice, communication)
|
||||
|
||||
// encode image to jpeg
|
||||
bytes, _ := utils.ImageToBytes(&image)
|
||||
imageResized, _ := utils.ResizeImage(&image, uint(configuration.Config.Capture.IPCamera.BaseWidth), uint(configuration.Config.Capture.IPCamera.BaseHeight))
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
|
||||
// Return image/jpeg
|
||||
c.Data(200, "image/jpeg", bytes)
|
||||
@@ -642,7 +745,7 @@ func GetSnapshotRaw(c *gin.Context, captureDevice *capture.Capture, configuratio
|
||||
// @Success 200
|
||||
func GetConfig(c *gin.Context, captureDevice *capture.Capture, configuration *models.Configuration, communication *models.Communication) {
|
||||
// We'll try to get a snapshot from the camera.
|
||||
base64Image := capture.Base64Image(captureDevice, communication)
|
||||
base64Image := capture.Base64Image(captureDevice, communication, configuration)
|
||||
if base64Image != "" {
|
||||
communication.Image = base64Image
|
||||
}
|
||||
|
||||
@@ -87,7 +87,6 @@ func WriteFileToBackChannel(infile av.DemuxCloser) {
|
||||
break
|
||||
}
|
||||
// Send to backchannel
|
||||
fmt.Println(buffer)
|
||||
infile.Write(buffer, 2, uint32(count))
|
||||
|
||||
count = count + 1024
|
||||
|
||||
@@ -21,6 +21,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
|
||||
var isPixelChangeThresholdReached = false
|
||||
var changesToReturn = 0
|
||||
var motionRectangle models.MotionRectangle
|
||||
|
||||
pixelThreshold := config.Capture.PixelChangeThreshold
|
||||
// Might not be set in the config file, so set it to 150
|
||||
@@ -62,16 +63,34 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
}
|
||||
}
|
||||
|
||||
// A user might have set the base width and height for the IPCamera.
|
||||
// This means also the polygon coordinates are set to a specific width and height (which might be different than the actual packets
|
||||
// received from the IPCamera). So we will resize the polygon coordinates to the base width and height.
|
||||
baseWidthRatio := 1.0
|
||||
baseHeightRatio := 1.0
|
||||
baseWidth := config.Capture.IPCamera.BaseWidth
|
||||
baseHeight := config.Capture.IPCamera.BaseHeight
|
||||
if baseWidth > 0 && baseHeight > 0 {
|
||||
// We'll get the first image to calculate the ratio
|
||||
img := imageArray[0]
|
||||
if img != nil {
|
||||
bounds := img.Bounds()
|
||||
rows := bounds.Dy()
|
||||
cols := bounds.Dx()
|
||||
baseWidthRatio = float64(cols) / float64(baseWidth)
|
||||
baseHeightRatio = float64(rows) / float64(baseHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate mask
|
||||
var polyObjects []geo.Polygon
|
||||
|
||||
if config.Region != nil {
|
||||
for _, polygon := range config.Region.Polygon {
|
||||
coords := polygon.Coordinates
|
||||
poly := geo.Polygon{}
|
||||
for _, c := range coords {
|
||||
x := c.X
|
||||
y := c.Y
|
||||
x := c.X * baseWidthRatio
|
||||
y := c.Y * baseHeightRatio
|
||||
p := geo.NewPoint(x, y)
|
||||
if !poly.Contains(p) {
|
||||
poly.Add(p)
|
||||
@@ -132,7 +151,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
if detectMotion {
|
||||
|
||||
// Remember additional information about the result of findmotion
|
||||
isPixelChangeThresholdReached, changesToReturn = FindMotion(imageArray, coordinatesToCheck, pixelThreshold)
|
||||
isPixelChangeThresholdReached, changesToReturn, motionRectangle = FindMotion(imageArray, coordinatesToCheck, pixelThreshold)
|
||||
if isPixelChangeThresholdReached {
|
||||
|
||||
// If offline mode is disabled, send a message to the hub
|
||||
@@ -150,7 +169,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 2, false, payload)
|
||||
} else {
|
||||
log.Log.Info("computervision.main.ProcessMotion(): failed to package MQTT message: " + err.Error())
|
||||
}
|
||||
@@ -164,6 +183,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
dataToPass := models.MotionDataPartial{
|
||||
Timestamp: time.Now().Unix(),
|
||||
NumberOfChanges: changesToReturn,
|
||||
Rectangle: motionRectangle,
|
||||
}
|
||||
communication.HandleMotion <- dataToPass //Save data to the channel
|
||||
}
|
||||
@@ -185,24 +205,58 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
log.Log.Debug("computervision.main.ProcessMotion(): stop the motion detection.")
|
||||
}
|
||||
|
||||
func FindMotion(imageArray [3]*image.Gray, coordinatesToCheck []int, pixelChangeThreshold int) (thresholdReached bool, changesDetected int) {
|
||||
func FindMotion(imageArray [3]*image.Gray, coordinatesToCheck []int, pixelChangeThreshold int) (thresholdReached bool, changesDetected int, motionRectangle models.MotionRectangle) {
|
||||
image1 := imageArray[0]
|
||||
image2 := imageArray[1]
|
||||
image3 := imageArray[2]
|
||||
threshold := 60
|
||||
changes := AbsDiffBitwiseAndThreshold(image1, image2, image3, threshold, coordinatesToCheck)
|
||||
return changes > pixelChangeThreshold, changes
|
||||
changes, motionRectangle := AbsDiffBitwiseAndThreshold(image1, image2, image3, threshold, coordinatesToCheck)
|
||||
return changes > pixelChangeThreshold, changes, motionRectangle
|
||||
}
|
||||
|
||||
func AbsDiffBitwiseAndThreshold(img1 *image.Gray, img2 *image.Gray, img3 *image.Gray, threshold int, coordinatesToCheck []int) int {
|
||||
func AbsDiffBitwiseAndThreshold(img1 *image.Gray, img2 *image.Gray, img3 *image.Gray, threshold int, coordinatesToCheck []int) (int, models.MotionRectangle) {
|
||||
changes := 0
|
||||
var pixelList [][]int
|
||||
for i := 0; i < len(coordinatesToCheck); i++ {
|
||||
pixel := coordinatesToCheck[i]
|
||||
diff := int(img3.Pix[pixel]) - int(img1.Pix[pixel])
|
||||
diff2 := int(img3.Pix[pixel]) - int(img2.Pix[pixel])
|
||||
if (diff > threshold || diff < -threshold) && (diff2 > threshold || diff2 < -threshold) {
|
||||
changes++
|
||||
// Store the pixel coordinates where the change is detected
|
||||
pixelList = append(pixelList, []int{pixel % img1.Bounds().Dx(), pixel / img1.Bounds().Dx()})
|
||||
}
|
||||
}
|
||||
return changes
|
||||
|
||||
// Calculate rectangle of pixelList (startX, startY, endX, endY)
|
||||
var motionRectangle models.MotionRectangle
|
||||
if len(pixelList) > 0 {
|
||||
startX := pixelList[0][0]
|
||||
startY := pixelList[0][1]
|
||||
endX := startX
|
||||
endY := startY
|
||||
for _, pixel := range pixelList {
|
||||
if pixel[0] < startX {
|
||||
startX = pixel[0]
|
||||
}
|
||||
if pixel[1] < startY {
|
||||
startY = pixel[1]
|
||||
}
|
||||
if pixel[0] > endX {
|
||||
endX = pixel[0]
|
||||
}
|
||||
if pixel[1] > endY {
|
||||
endY = pixel[1]
|
||||
}
|
||||
}
|
||||
log.Log.Debugf("Rectangle of changes detected: startX: %d, startY: %d, endX: %d, endY: %d", startX, startY, endX, endY)
|
||||
motionRectangle = models.MotionRectangle{
|
||||
X: startX,
|
||||
Y: startY,
|
||||
Width: endX - startX,
|
||||
Height: endY - startY,
|
||||
}
|
||||
log.Log.Debugf("Motion rectangle: %+v", motionRectangle)
|
||||
}
|
||||
return changes, motionRectangle
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func OpenConfig(configDirectory string, configuration *models.Configuration) {
|
||||
// Write to mongodb
|
||||
client := database.New()
|
||||
|
||||
db := client.Database(database.DatabaseName)
|
||||
db := client.Client.Database(database.DatabaseName)
|
||||
collection := db.Collection("configuration")
|
||||
|
||||
var globalConfig models.Config
|
||||
@@ -135,6 +135,12 @@ func OpenConfig(configDirectory string, configuration *models.Configuration) {
|
||||
conjungo.Merge(&kerberosvault, configuration.CustomConfig.KStorage, opts)
|
||||
configuration.Config.KStorage = &kerberosvault
|
||||
|
||||
// Merge Secondary Kerberos Vault settings
|
||||
var kerberosvaultSecondary models.KStorage
|
||||
conjungo.Merge(&kerberosvaultSecondary, configuration.GlobalConfig.KStorageSecondary, opts)
|
||||
conjungo.Merge(&kerberosvaultSecondary, configuration.CustomConfig.KStorageSecondary, opts)
|
||||
configuration.Config.KStorageSecondary = &kerberosvaultSecondary
|
||||
|
||||
// Merge Kerberos S3 settings
|
||||
var s3 models.S3
|
||||
conjungo.Merge(&s3, configuration.GlobalConfig.S3, opts)
|
||||
@@ -183,15 +189,19 @@ func OpenConfig(configDirectory string, configuration *models.Configuration) {
|
||||
}
|
||||
jsonFile.Close()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// This function will override the configuration with environment variables.
|
||||
func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
environmentVariables := os.Environ()
|
||||
|
||||
// Initialize the configuration for some new fields.
|
||||
if configuration.Config.KStorageSecondary == nil {
|
||||
configuration.Config.KStorageSecondary = &models.KStorage{}
|
||||
}
|
||||
|
||||
for _, env := range environmentVariables {
|
||||
if strings.Contains(env, "AGENT_") {
|
||||
key := strings.Split(env, "=")[0]
|
||||
@@ -229,7 +239,15 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.Capture.IPCamera.SubRTSP = value
|
||||
break
|
||||
|
||||
/* ONVIF connnection settings */
|
||||
/* Base width and height for the liveview and motion regions */
|
||||
case "AGENT_CAPTURE_IPCAMERA_BASE_WIDTH":
|
||||
configuration.Config.Capture.IPCamera.BaseWidth, _ = strconv.Atoi(value)
|
||||
break
|
||||
case "AGENT_CAPTURE_IPCAMERA_BASE_HEIGHT":
|
||||
configuration.Config.Capture.IPCamera.BaseHeight, _ = strconv.Atoi(value)
|
||||
break
|
||||
|
||||
/* ONVIF connnection settings */
|
||||
case "AGENT_CAPTURE_IPCAMERA_ONVIF":
|
||||
configuration.Config.Capture.IPCamera.ONVIF = value
|
||||
break
|
||||
@@ -382,10 +400,26 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.MQTTPassword = value
|
||||
break
|
||||
|
||||
/* MQTT chunking of low-resolution images into multiple messages */
|
||||
case "AGENT_CAPTURE_LIVEVIEW_CHUNKING":
|
||||
configuration.Config.Capture.LiveviewChunking = value
|
||||
break
|
||||
|
||||
/* Real-time streaming of keyframes to a MQTT topic */
|
||||
case "AGENT_REALTIME_PROCESSING":
|
||||
configuration.Config.RealtimeProcessing = value
|
||||
break
|
||||
case "AGENT_REALTIME_PROCESSING_TOPIC":
|
||||
configuration.Config.RealtimeProcessingTopic = value
|
||||
break
|
||||
|
||||
/* WebRTC settings for live-streaming (remote) */
|
||||
case "AGENT_STUN_URI":
|
||||
configuration.Config.STUNURI = value
|
||||
break
|
||||
case "AGENT_FORCE_TURN":
|
||||
configuration.Config.ForceTurn = value
|
||||
break
|
||||
case "AGENT_TURN_URI":
|
||||
configuration.Config.TURNURI = value
|
||||
break
|
||||
@@ -406,6 +440,9 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
break
|
||||
|
||||
/* When connected and storing in Kerberos Hub (SAAS) */
|
||||
case "AGENT_HUB_ENCRYPTION":
|
||||
configuration.Config.HubEncryption = value
|
||||
break
|
||||
case "AGENT_HUB_URI":
|
||||
configuration.Config.HubURI = value
|
||||
break
|
||||
@@ -422,7 +459,7 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.S3.Region = value
|
||||
break
|
||||
|
||||
/* When storing in a Kerberos Vault */
|
||||
/* When storing in a Vault */
|
||||
case "AGENT_KERBEROSVAULT_URI":
|
||||
configuration.Config.KStorage.URI = value
|
||||
break
|
||||
@@ -439,6 +476,37 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.KStorage.Directory = value
|
||||
break
|
||||
|
||||
/* Retry policy and timeout */
|
||||
case "AGENT_KERBEROSVAULT_MAX_RETRIES":
|
||||
maxRetries, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
configuration.Config.KStorage.MaxRetries = maxRetries
|
||||
}
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_TIMEOUT":
|
||||
timeout, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
configuration.Config.KStorage.Timeout = timeout
|
||||
}
|
||||
break
|
||||
|
||||
/* When storing in a secondary Vault */
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_URI":
|
||||
configuration.Config.KStorageSecondary.URI = value
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_ACCESS_KEY":
|
||||
configuration.Config.KStorageSecondary.AccessKey = value
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_SECRET_KEY":
|
||||
configuration.Config.KStorageSecondary.SecretAccessKey = value
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_PROVIDER":
|
||||
configuration.Config.KStorageSecondary.Provider = value
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_DIRECTORY":
|
||||
configuration.Config.KStorageSecondary.Directory = value
|
||||
break
|
||||
|
||||
/* When storing in dropbox */
|
||||
case "AGENT_DROPBOX_ACCESS_TOKEN":
|
||||
configuration.Config.Dropbox.AccessToken = value
|
||||
@@ -464,9 +532,26 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
case "AGENT_ENCRYPTION_SYMMETRIC_KEY":
|
||||
configuration.Config.Encryption.SymmetricKey = value
|
||||
break
|
||||
|
||||
/* When signing is enabled */
|
||||
case "AGENT_SIGNING":
|
||||
configuration.Config.Signing.Enabled = value
|
||||
break
|
||||
case "AGENT_SIGNING_PRIVATE_KEY":
|
||||
signingPrivateKey := strings.ReplaceAll(value, "\\n", "\n")
|
||||
configuration.Config.Signing.PrivateKey = signingPrivateKey
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signing is a new feature, so if empty we set default values.
|
||||
if configuration.Config.Signing == nil || configuration.Config.Signing.PrivateKey == "" {
|
||||
configuration.Config.Signing = &models.Signing{
|
||||
Enabled: "true",
|
||||
PrivateKey: "-----BEGIN PRIVATE KEY-----\nMIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDoSxjyw08lRxF4Yoqmcaewjq3XjB55dMy4tlN5MGLdr8aAPuNR9Mwh3jlh1bDpwQXNgZkHDV/q9bpdPGGi7SQo2xw+rDuo5Y1f3wdzz+iuCTPbzoGFalE+1PZlU5TEtUtlbt7MRc4pxTaLP3u0P3EtW3KnzcUarcJWZJYxzv7gqVNCA/47BN+1ptqjwz3LAlah5yaftEvVjkaANOsafUswbS4VT44XfSlbKgebORCKDuNgQiyhuV5gU+J0TOaqRWwwMAWV0UoScyJLfhHRBCrUwrCUTwqH9jfkB7pgRFsYoZJd4MKMeHJjFSum+QXCBqInSnwu8c2kJChiLMWqJ+mhpTdfUAmSkeUSStfbbcavIPbDABvMgzOcmYMIVXXe57twU0xdu3AqWLtc9kw1BkUgZblM9pSSpYrIDheEyMs2/hiLgXsIaM0nVQtqwrA7rbeEGuPblzA6hvHgwN9K6HaBqdlGSlpYZ0v3SWIMwmxRB+kIojlyuggm8Qa4mqL97GFDGl6gOBGlNUFTBUVEa3EaJ7NJpGobRGsh/9dXzcW4aYmT9WxlzTlIKksI1ro6KdRfuVWfEs4AnG8bVEJmofK8EUrueB9IdXlcJZB49xolnOZPFohtMe/0U7evQOQP3sZnX+KotCsE7OXJvL09oF58JKoqmK9lPp0+pFBU4g6NjQIDAQABAoICAA+RSWph1t+q5R3nxUxFTYMrhv5IjQe2mDxJpF3B409zolC9OHxgGUisobTY3pBqs0DtKbxUeH2A0ehUH/axEosWHcz3cmIbgxHE9kdlJ9B3Lmss6j/uw+PWutu1sgm5phaIFIvuNNRWhPB6yXUwU4sLRat1+Z9vTmIQiKdtLIrtJz/n2VDvrJxn1N+yAsE20fnrksFKyZuxVsJaZPiX/t5Yv1/z0LjFjVoL7GUA5/Si7csN4ftqEhUrkNr2BvcZlTyffrF4lZCXrtl76RNUaxhqIu3H0gFbV2UfBpuckkfAhNRpXJ4iFSxm4nQbk4ojV8+l21RFOBeDN2Z7Ocu6auP5MnzpopR66vmDCmPoid498VGgDzFQEVkOar8WAa4v9h85QgLKrth6FunmaWJUT6OggQD3yY58GSwp5+ARMETMBP2x6Eld+PGgqoJvPT1+l/e9gOw7/SJ+Wz6hRXZAm/eiXMppHtB7sfea5rscNanPjJkK9NvPM0MX9cq/iA6QjXuETkMbubjo+Cxk3ydZiIQmWQDAx/OgxTyHbeRCVhLPcAphX0clykCuHZpI9Mvvj643/LoE0mjTByWJXf/WuGJA8ElHkjSdokVJ7jumz8OZZHfq0+V7+la2opsObeQANHW5MLWrnHlRVzTGV0IRZDXh7h1ptUJ4ubdvw/GJ2NeTAoIBAQD0lXXdjYKWC4uZ4YlgydP8b1CGda9cBV5RcPt7q9Ya1R2E4ieYyohmzltopvdaOXdsTZzhtdzOzKF+2qNcbBKhBTleYZ8GN5RKbo7HwXWpzfCTjseKHOD/QPwvBKXzLVWNtXn1NrLR79Rv0wbkYF6DtoqpEPf5kMs4bx79yW+mz8FUgdEeMjKphx6Jd5RYlTUxS64K6bnK7gjHNCF2cwdxsh4B6EB649GKeNz4JXi+oQBmOcX5ncXnkJrbju+IjtCkQ40HINVNdX7XeEaaw6KGaImVjw61toPUuDaioYUojufayoyXaUJnDbHQ2tNekEpq5iwnenZCbUKWmSeRe7dLAoIBAQDzIscYujsrmPxiTj2prhG0v36NRNP99mShnnJGowiIs+UBS0EMdOmBFa2sC9uFs/VnreQNYPDJdfr7O5VK9kfbH/PSiiKJ+wVebfdAlWkJYH27JN2Kl2l/OsvRVelNvF3BWIYF46qzGxIM0axaz3T2ZAJ9SrUgeAYhak6uyM4fbexEWXxDgPGu6C0jB6IAzmHJnnh+j5+4ZXqjVyUxBYtUsWXF/TXomVcT9jxj7aUmS2/Us0XTVOVNpALqqYcekrzsX/wX0OEi5HkivYXHcNaDHx3NuUf6KdYof5DwPUM76qe+5/kWlSIHP3M6rIFK3pYFUnkHn2E8jNWcO97Aio+HAoIBAA+bcff/TbPxbKkXIUMR3fsfx02tONFwbkJYKVQM9Q6lRsrx+4Dee7HDvUWCUgpp3FsG4NnuVvbDTBLiNMZzBwVLZgvFwvYMmePeBjJs/+sj/xQLamQ/z4O6S91cOJK589mlGPEy2lpXKYExQCFWnPFetp5vPMOqH62sOZgMQJmubDHOTt/UaDM1Mhenj8nPS6OnpqV/oKF4awr7Ip+CW5k/unZ4sZSl8PsbF06mZXwUngfn6+Av1y8dpSQZjONz6ZBx1w/7YmEc/EkXnbnGfhqBlTX7+P5TdTofvyzFjc+2vsjRYANRbjFRSGWBcTd5kaYcpfim8eDvQ+6EO2gnMt0CggEAH2ln1Y8B5AEQ4lZ/avOdP//ZhsDUrqPtnl/NHckkahzrwj4JumVEYbP+SxMBGoYEd4+kvgG/OhfvBBRPlm65G9tF8fZ8vdzbdba5UfO7rUV1GP+LS8OCErjy6imySaPDbR5Vul8Oh7NAor1YCidxUf/bvnovanF3QUvtvHEfCDp4YuA4yLPZBaLjaforePUw9w5tPNSravRZYs74dBvmQ1vj7S9ojpN5B5AxfyuNwaPPX+iFZec69MvywISEe3Ozysof1Kfc3lgsOkvIA9tVK32SqSh93xkWnQbWH+OaUxxe7bAko0FDMzKEXZk53wVg1nEwR8bUljEPy+6EOdXs8wKCAQEAsEOWYMY5m7HkeG2XTTvX7ECmmdGl/c4ZDVwzB4IPxqUG7XfLmtsON8YoKOEUpJoc4ANafLXzmU+esUGbH4Ph22IWgP9jzws7jxaN/Zoku64qrSjgEZFTRIpKyhFk/ImWbS9laBW4l+m0tqTTRqoE0QEJf/2uv/04q65zrA70X9z2+KTrAtqOiRQPWl/IxRe9U4OEeGL+oD+YlXKCDsnJ3rwUIOZgJx0HWZg7K35DKwqs1nVi56FBdljiTRKAjVLRedjgDCSfGS1yUZ3krHzpaPt1qgnT3rdtYcIdbYDr66V2/gEEaz6XMGHuTk/ewjzUJxq9UTVeXOCbkRPXgVJg1w==\n-----END PRIVATE KEY-----",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SaveConfig(configDirectory string, config models.Config, configuration *models.Configuration, communication *models.Communication) error {
|
||||
@@ -482,7 +567,9 @@ func SaveConfig(configDirectory string, config models.Config, configuration *mod
|
||||
if communication.CameraConnected {
|
||||
select {
|
||||
case communication.HandleBootstrap <- "restart":
|
||||
default:
|
||||
log.Log.Info("config.main.SaveConfig(): update config, restart agent.")
|
||||
case <-time.After(1 * time.Second):
|
||||
log.Log.Info("config.main.SaveConfig(): update config, restart agent.")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -504,12 +591,16 @@ func StoreConfig(configDirectory string, config models.Config) error {
|
||||
config.Encryption.PrivateKey = encryptionPrivateKey
|
||||
}
|
||||
|
||||
// Reset the basewidth and baseheight
|
||||
config.Capture.IPCamera.BaseWidth = 0
|
||||
config.Capture.IPCamera.BaseHeight = 0
|
||||
|
||||
// Save into database
|
||||
if os.Getenv("DEPLOYMENT") == "factory" || os.Getenv("MACHINERY_ENVIRONMENT") == "kubernetes" {
|
||||
// Write to mongodb
|
||||
client := database.New()
|
||||
|
||||
db := client.Database(database.DatabaseName)
|
||||
db := client.Client.Database(database.DatabaseName)
|
||||
collection := db.Collection("configuration")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
@@ -15,12 +15,19 @@ type DB struct {
|
||||
Client *mongo.Client
|
||||
}
|
||||
|
||||
var TIMEOUT = 10 * time.Second
|
||||
var _init_ctx sync.Once
|
||||
var _instance *DB
|
||||
var DatabaseName = "KerberosFactory"
|
||||
|
||||
func New() *mongo.Client {
|
||||
var DatabaseName = os.Getenv("MONGODB_DATABASE_FACTORY")
|
||||
|
||||
func New() *DB {
|
||||
|
||||
if DatabaseName == "" {
|
||||
DatabaseName = "KerberosFactory"
|
||||
}
|
||||
|
||||
mongodbURI := os.Getenv("MONGODB_URI")
|
||||
host := os.Getenv("MONGODB_HOST")
|
||||
databaseCredentials := os.Getenv("MONGODB_DATABASE_CREDENTIALS")
|
||||
replicaset := os.Getenv("MONGODB_REPLICASET")
|
||||
@@ -28,28 +35,46 @@ func New() *mongo.Client {
|
||||
password := os.Getenv("MONGODB_PASSWORD")
|
||||
authentication := "SCRAM-SHA-256"
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), TIMEOUT)
|
||||
defer cancel()
|
||||
|
||||
_init_ctx.Do(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_instance = new(DB)
|
||||
mongodbURI := fmt.Sprintf("mongodb://%s:%s@%s", username, password, host)
|
||||
if replicaset != "" {
|
||||
mongodbURI = fmt.Sprintf("%s/?replicaSet=%s", mongodbURI, replicaset)
|
||||
}
|
||||
|
||||
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongodbURI).SetAuth(options.Credential{
|
||||
AuthMechanism: authentication,
|
||||
AuthSource: databaseCredentials,
|
||||
Username: username,
|
||||
Password: password,
|
||||
}))
|
||||
if err != nil {
|
||||
fmt.Printf("Error setting up mongodb connection: %+v\n", err)
|
||||
os.Exit(1)
|
||||
// We can also apply the complete URI
|
||||
// e.g. "mongodb+srv://<username>:<password>@kerberos-hub.shhng.mongodb.net/?retryWrites=true&w=majority&appName=kerberos-hub"
|
||||
if mongodbURI != "" {
|
||||
serverAPI := options.ServerAPI(options.ServerAPIVersion1)
|
||||
opts := options.Client().ApplyURI(mongodbURI).SetServerAPIOptions(serverAPI)
|
||||
|
||||
// Create a new client and connect to the server
|
||||
client, err := mongo.Connect(ctx, opts)
|
||||
if err != nil {
|
||||
fmt.Printf("Error setting up mongodb connection: %+v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
_instance.Client = client
|
||||
|
||||
} else {
|
||||
|
||||
// New MongoDB driver
|
||||
mongodbURI := fmt.Sprintf("mongodb://%s:%s@%s", username, password, host)
|
||||
if replicaset != "" {
|
||||
mongodbURI = fmt.Sprintf("%s/?replicaSet=%s", mongodbURI, replicaset)
|
||||
}
|
||||
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongodbURI).SetAuth(options.Credential{
|
||||
AuthMechanism: authentication,
|
||||
AuthSource: databaseCredentials,
|
||||
Username: username,
|
||||
Password: password,
|
||||
}))
|
||||
if err != nil {
|
||||
fmt.Printf("Error setting up mongodb connection: %+v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
_instance.Client = client
|
||||
}
|
||||
_instance.Client = client
|
||||
})
|
||||
|
||||
return _instance.Client
|
||||
return _instance
|
||||
}
|
||||
|
||||
@@ -76,7 +76,6 @@ func ConfigureLogrus(level string, output string, timezone *time.Location) {
|
||||
logLevel = logrus.ErrorLevel
|
||||
} else if level == "debug" {
|
||||
logLevel = logrus.DebugLevel
|
||||
logrus.SetReportCaller(true)
|
||||
} else if level == "fatal" {
|
||||
logLevel = logrus.FatalLevel
|
||||
} else if level == "warning" {
|
||||
@@ -119,6 +118,16 @@ func (self *Logging) Info(sentence string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Infof(format string, args ...interface{}) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
gologging.Infof(format, args...)
|
||||
case "logrus":
|
||||
logrus.Infof(format, args...)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Warning(sentence string) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
@@ -139,6 +148,16 @@ func (self *Logging) Debug(sentence string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Debugf(format string, args ...interface{}) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
gologging.Debugf(format, args...)
|
||||
case "logrus":
|
||||
logrus.Debugf(format, args...)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Error(sentence string) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
|
||||
@@ -15,6 +15,8 @@ type Communication struct {
|
||||
CancelContext *context.CancelFunc
|
||||
PackageCounter *atomic.Value
|
||||
LastPacketTimer *atomic.Value
|
||||
PackageCounterSub *atomic.Value
|
||||
LastPacketTimerSub *atomic.Value
|
||||
CloudTimestamp *atomic.Value
|
||||
HandleBootstrap chan string
|
||||
HandleStream chan string
|
||||
@@ -33,5 +35,7 @@ type Communication struct {
|
||||
SubQueue *packets.Queue
|
||||
Image string
|
||||
CameraConnected bool
|
||||
MainStreamConnected bool
|
||||
SubStreamConnected bool
|
||||
HasBackChannel bool
|
||||
}
|
||||
|
||||
@@ -12,37 +12,43 @@ type Configuration struct {
|
||||
// Config is the highlevel struct which contains all the configuration of
|
||||
// your Kerberos Open Source instance.
|
||||
type Config struct {
|
||||
Type string `json:"type"`
|
||||
Key string `json:"key"`
|
||||
Name string `json:"name"`
|
||||
FriendlyName string `json:"friendly_name"`
|
||||
Time string `json:"time" bson:"time"`
|
||||
Offline string `json:"offline"`
|
||||
AutoClean string `json:"auto_clean"`
|
||||
RemoveAfterUpload string `json:"remove_after_upload"`
|
||||
MaxDirectorySize int64 `json:"max_directory_size"`
|
||||
Timezone string `json:"timezone"`
|
||||
Capture Capture `json:"capture"`
|
||||
Timetable []*Timetable `json:"timetable"`
|
||||
Region *Region `json:"region"`
|
||||
Cloud string `json:"cloud" bson:"cloud"`
|
||||
S3 *S3 `json:"s3,omitempty" bson:"s3,omitempty"`
|
||||
KStorage *KStorage `json:"kstorage,omitempty" bson:"kstorage,omitempty"`
|
||||
Dropbox *Dropbox `json:"dropbox,omitempty" bson:"dropbox,omitempty"`
|
||||
MQTTURI string `json:"mqtturi" bson:"mqtturi,omitempty"`
|
||||
MQTTUsername string `json:"mqtt_username" bson:"mqtt_username"`
|
||||
MQTTPassword string `json:"mqtt_password" bson:"mqtt_password"`
|
||||
STUNURI string `json:"stunuri" bson:"stunuri"`
|
||||
TURNURI string `json:"turnuri" bson:"turnuri"`
|
||||
TURNUsername string `json:"turn_username" bson:"turn_username"`
|
||||
TURNPassword string `json:"turn_password" bson:"turn_password"`
|
||||
HeartbeatURI string `json:"heartbeaturi" bson:"heartbeaturi"` /*obsolete*/
|
||||
HubURI string `json:"hub_uri" bson:"hub_uri"`
|
||||
HubKey string `json:"hub_key" bson:"hub_key"`
|
||||
HubPrivateKey string `json:"hub_private_key" bson:"hub_private_key"`
|
||||
HubSite string `json:"hub_site" bson:"hub_site"`
|
||||
ConditionURI string `json:"condition_uri" bson:"condition_uri"`
|
||||
Encryption *Encryption `json:"encryption,omitempty" bson:"encryption,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Key string `json:"key"`
|
||||
Name string `json:"name"`
|
||||
FriendlyName string `json:"friendly_name"`
|
||||
Time string `json:"time" bson:"time"`
|
||||
Offline string `json:"offline"`
|
||||
AutoClean string `json:"auto_clean"`
|
||||
RemoveAfterUpload string `json:"remove_after_upload"`
|
||||
MaxDirectorySize int64 `json:"max_directory_size"`
|
||||
Timezone string `json:"timezone"`
|
||||
Capture Capture `json:"capture"`
|
||||
Timetable []*Timetable `json:"timetable"`
|
||||
Region *Region `json:"region"`
|
||||
Cloud string `json:"cloud" bson:"cloud"`
|
||||
S3 *S3 `json:"s3,omitempty" bson:"s3,omitempty"`
|
||||
KStorage *KStorage `json:"kstorage,omitempty" bson:"kstorage,omitempty"`
|
||||
KStorageSecondary *KStorage `json:"kstorage_secondary,omitempty" bson:"kstorage_secondary,omitempty"`
|
||||
Dropbox *Dropbox `json:"dropbox,omitempty" bson:"dropbox,omitempty"`
|
||||
MQTTURI string `json:"mqtturi" bson:"mqtturi,omitempty"`
|
||||
MQTTUsername string `json:"mqtt_username" bson:"mqtt_username"`
|
||||
MQTTPassword string `json:"mqtt_password" bson:"mqtt_password"`
|
||||
STUNURI string `json:"stunuri" bson:"stunuri"`
|
||||
ForceTurn string `json:"turn_force" bson:"turn_force"`
|
||||
TURNURI string `json:"turnuri" bson:"turnuri"`
|
||||
TURNUsername string `json:"turn_username" bson:"turn_username"`
|
||||
TURNPassword string `json:"turn_password" bson:"turn_password"`
|
||||
HeartbeatURI string `json:"heartbeaturi" bson:"heartbeaturi"` /*obsolete*/
|
||||
HubEncryption string `json:"hub_encryption" bson:"hub_encryption"`
|
||||
HubURI string `json:"hub_uri" bson:"hub_uri"`
|
||||
HubKey string `json:"hub_key" bson:"hub_key"`
|
||||
HubPrivateKey string `json:"hub_private_key" bson:"hub_private_key"`
|
||||
HubSite string `json:"hub_site" bson:"hub_site"`
|
||||
ConditionURI string `json:"condition_uri" bson:"condition_uri"`
|
||||
Encryption *Encryption `json:"encryption,omitempty" bson:"encryption,omitempty"`
|
||||
Signing *Signing `json:"signing,omitempty" bson:"signing,omitempty"`
|
||||
RealtimeProcessing string `json:"realtimeprocessing,omitempty" bson:"realtimeprocessing,omitempty"`
|
||||
RealtimeProcessingTopic string `json:"realtimeprocessing_topic" bson:"realtimeprocessing_topic"`
|
||||
}
|
||||
|
||||
// Capture defines which camera type (Id) you are using (IP, USB or Raspberry Pi camera),
|
||||
@@ -56,9 +62,11 @@ type Capture struct {
|
||||
Snapshots string `json:"snapshots,omitempty"`
|
||||
Motion string `json:"motion,omitempty"`
|
||||
Liveview string `json:"liveview,omitempty"`
|
||||
LiveviewChunking string `json:"liveview_chunking,omitempty" bson:"liveview_chunking,omitempty"`
|
||||
Continuous string `json:"continuous,omitempty"`
|
||||
PostRecording int64 `json:"postrecording"`
|
||||
PreRecording int64 `json:"prerecording"`
|
||||
GopSize int `json:"gopsize,omitempty" bson:"gopsize,omitempty"` // GOP size in seconds, used for pre-recording
|
||||
MaxLengthRecording int64 `json:"maxlengthrecording"`
|
||||
TranscodingWebRTC string `json:"transcodingwebrtc"`
|
||||
TranscodingResolution int64 `json:"transcodingresolution"`
|
||||
@@ -71,15 +79,28 @@ type Capture struct {
|
||||
// IPCamera configuration, such as the RTSP url of the IPCamera and the FPS.
|
||||
// Also includes ONVIF integration
|
||||
type IPCamera struct {
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
FPS string `json:"fps"`
|
||||
RTSP string `json:"rtsp"`
|
||||
SubRTSP string `json:"sub_rtsp"`
|
||||
ONVIF string `json:"onvif,omitempty" bson:"onvif"`
|
||||
ONVIFXAddr string `json:"onvif_xaddr" bson:"onvif_xaddr"`
|
||||
ONVIFUsername string `json:"onvif_username" bson:"onvif_username"`
|
||||
ONVIFPassword string `json:"onvif_password" bson:"onvif_password"`
|
||||
RTSP string `json:"rtsp"`
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
FPS string `json:"fps"`
|
||||
|
||||
SubRTSP string `json:"sub_rtsp"`
|
||||
SubWidth int `json:"sub_width"`
|
||||
SubHeight int `json:"sub_height"`
|
||||
|
||||
BaseWidth int `json:"base_width"`
|
||||
BaseHeight int `json:"base_height"`
|
||||
|
||||
SubFPS string `json:"sub_fps"`
|
||||
ONVIF string `json:"onvif,omitempty" bson:"onvif"`
|
||||
ONVIFXAddr string `json:"onvif_xaddr" bson:"onvif_xaddr"`
|
||||
ONVIFUsername string `json:"onvif_username" bson:"onvif_username"`
|
||||
ONVIFPassword string `json:"onvif_password" bson:"onvif_password"`
|
||||
SPSNALUs [][]byte `json:"sps_nalus,omitempty" bson:"sps_nalus,omitempty"`
|
||||
PPSNALUs [][]byte `json:"pps_nalus,omitempty" bson:"pps_nalus,omitempty"`
|
||||
VPSNALUs [][]byte `json:"vps_nalus,omitempty" bson:"vps_nalus,omitempty"`
|
||||
SampleRate int `json:"sample_rate,omitempty" bson:"sample_rate,omitempty"`
|
||||
Channels int `json:"channels,omitempty" bson:"channels,omitempty"`
|
||||
}
|
||||
|
||||
// USBCamera configuration, such as the device path (/dev/video*)
|
||||
@@ -151,6 +172,8 @@ type KStorage struct {
|
||||
SecretAccessKey string `json:"secret_access_key,omitempty" bson:"secret_access_key,omitempty"`
|
||||
Provider string `json:"provider,omitempty" bson:"provider,omitempty"`
|
||||
Directory string `json:"directory,omitempty" bson:"directory,omitempty"`
|
||||
MaxRetries int `json:"max_retries,omitempty" bson:"max_retries,omitempty"`
|
||||
Timeout int `json:"timeout,omitempty" bson:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
// Dropbox integration
|
||||
@@ -167,3 +190,9 @@ type Encryption struct {
|
||||
PrivateKey string `json:"private_key" bson:"private_key"`
|
||||
SymmetricKey string `json:"symmetric_key" bson:"symmetric_key"`
|
||||
}
|
||||
|
||||
// Signing
|
||||
type Signing struct {
|
||||
Enabled string `json:"enabled" bson:"enabled"`
|
||||
PrivateKey string `json:"private_key" bson:"private_key"`
|
||||
}
|
||||
|
||||
@@ -27,31 +27,13 @@ func PackageMQTTMessage(configuration *Configuration, msg Message) ([]byte, erro
|
||||
msg.DeviceId = msg.Payload.DeviceId
|
||||
msg.Timestamp = time.Now().Unix()
|
||||
|
||||
// We'll hide the message (by default in latest version)
|
||||
// We will encrypt using the Kerberos Hub private key if set.
|
||||
/*msg.Hidden = false
|
||||
if configuration.Config.HubPrivateKey != "" {
|
||||
msg.Hidden = true
|
||||
pload := msg.Payload
|
||||
// Pload to base64
|
||||
data, err := json.Marshal(pload)
|
||||
if err != nil {
|
||||
msg.Hidden = false
|
||||
} else {
|
||||
k := configuration.Config.Encryption.SymmetricKey
|
||||
encryptedValue, err := encryption.AesEncrypt(data, k)
|
||||
if err == nil {
|
||||
data := base64.StdEncoding.EncodeToString(encryptedValue)
|
||||
msg.Payload.HiddenValue = data
|
||||
msg.Payload.Value = make(map[string]interface{})
|
||||
}
|
||||
}
|
||||
}*/
|
||||
// Configuration
|
||||
config := configuration.Config
|
||||
|
||||
// Next to hiding the message, we can also encrypt it using your own private key.
|
||||
// Which is not stored in a remote environment (hence you are the only one owning it).
|
||||
msg.Encrypted = false
|
||||
if configuration.Config.Encryption != nil && configuration.Config.Encryption.Enabled == "true" {
|
||||
if config.Encryption != nil && config.Encryption.Enabled == "true" {
|
||||
msg.Encrypted = true
|
||||
}
|
||||
msg.PublicKey = ""
|
||||
@@ -85,19 +67,47 @@ func PackageMQTTMessage(configuration *Configuration, msg Message) ([]byte, erro
|
||||
rsaKey, _ := key.(*rsa.PrivateKey)
|
||||
|
||||
// Create a 16bit key random
|
||||
k := configuration.Config.Encryption.SymmetricKey
|
||||
if config.Encryption != nil && config.Encryption.SymmetricKey != "" {
|
||||
k := config.Encryption.SymmetricKey
|
||||
encryptedValue, err := encryption.AesEncrypt(data, k)
|
||||
if err == nil {
|
||||
|
||||
data := base64.StdEncoding.EncodeToString(encryptedValue)
|
||||
// Sign the encrypted value
|
||||
signature, err := encryption.SignWithPrivateKey([]byte(data), rsaKey)
|
||||
if err == nil {
|
||||
base64Signature := base64.StdEncoding.EncodeToString(signature)
|
||||
msg.Payload.EncryptedValue = data
|
||||
msg.Payload.Signature = base64Signature
|
||||
msg.Payload.Value = make(map[string]interface{})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We'll hide the message (by default in latest version)
|
||||
// We will encrypt using the Kerberos Hub private key if set.
|
||||
msg.Hidden = false
|
||||
if config.HubEncryption == "true" && config.HubPrivateKey != "" {
|
||||
msg.Hidden = true
|
||||
}
|
||||
|
||||
if msg.Hidden {
|
||||
pload := msg.Payload
|
||||
// Pload to base64
|
||||
data, err := json.Marshal(pload)
|
||||
if err != nil {
|
||||
msg.Hidden = false
|
||||
} else {
|
||||
k := config.HubPrivateKey
|
||||
encryptedValue, err := encryption.AesEncrypt(data, k)
|
||||
if err == nil {
|
||||
|
||||
data := base64.StdEncoding.EncodeToString(encryptedValue)
|
||||
// Sign the encrypted value
|
||||
signature, err := encryption.SignWithPrivateKey([]byte(data), rsaKey)
|
||||
if err == nil {
|
||||
base64Signature := base64.StdEncoding.EncodeToString(signature)
|
||||
msg.Payload.EncryptedValue = data
|
||||
msg.Payload.Signature = base64Signature
|
||||
msg.Payload.Value = make(map[string]interface{})
|
||||
}
|
||||
msg.Payload.HiddenValue = data
|
||||
msg.Payload.EncryptedValue = ""
|
||||
msg.Payload.Signature = ""
|
||||
msg.Payload.Value = make(map[string]interface{})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -122,6 +132,7 @@ type Message struct {
|
||||
// The payload structure which is used to send over
|
||||
// and receive messages from the MQTT broker
|
||||
type Payload struct {
|
||||
Version string `json:"version"` // Version of the message, e.g. "1.0"
|
||||
Action string `json:"action"`
|
||||
DeviceId string `json:"device_id"`
|
||||
Signature string `json:"signature"`
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package models
|
||||
|
||||
type MotionDataPartial struct {
|
||||
Timestamp int64 `json:"timestamp" bson:"timestamp"`
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Timestamp int64 `json:"timestamp" bson:"timestamp"`
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Rectangle MotionRectangle `json:"rectangle" bson:"rectangle"`
|
||||
}
|
||||
|
||||
type MotionDataFull struct {
|
||||
@@ -14,3 +15,10 @@ type MotionDataFull struct {
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Token int `json:"token" bson:"token"`
|
||||
}
|
||||
|
||||
type MotionRectangle struct {
|
||||
X int `json:"x" bson:"x"`
|
||||
Y int `json:"y" bson:"y"`
|
||||
Width int `json:"width" bson:"width"`
|
||||
Height int `json:"height" bson:"height"`
|
||||
}
|
||||
|
||||
@@ -200,9 +200,19 @@ func ConnectToOnvifDevice(cameraConfiguration *models.IPCamera) (*onvif.Device,
|
||||
|
||||
var capabilities device.GetCapabilitiesResponse
|
||||
if err != nil {
|
||||
log.Log.Debug("onvif.ConnectToOnvifDevice(): " + err.Error())
|
||||
} else {
|
||||
// Try again with other authentication mode
|
||||
dev, err = onvif.NewDevice(onvif.DeviceParams{
|
||||
Xaddr: cameraConfiguration.ONVIFXAddr,
|
||||
Username: cameraConfiguration.ONVIFUsername,
|
||||
Password: cameraConfiguration.ONVIFPassword,
|
||||
AuthMode: "digest",
|
||||
})
|
||||
if err != nil {
|
||||
log.Log.Debug("onvif.ConnectToOnvifDevice(): " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
getCapabilities := device.GetCapabilities{Category: []xsdonvif.CapabilityCategory{"All"}}
|
||||
resp, err := dev.CallMethod(getCapabilities)
|
||||
if err != nil {
|
||||
@@ -212,10 +222,10 @@ func ConnectToOnvifDevice(cameraConfiguration *models.IPCamera) (*onvif.Device,
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.ConnectToOnvifDevice(): " + err.Error())
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetCapabilitiesResponse")
|
||||
@@ -242,10 +252,10 @@ func GetTokenFromProfile(device *onvif.Device, profileId int) (xsdonvif.Referenc
|
||||
// Get Profiles
|
||||
resp, err := device.CallMethod(media.GetProfiles{})
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetProfilesResponse")
|
||||
if err != nil {
|
||||
log.Log.Debug("onvif.GetTokenFromProfile(): " + err.Error())
|
||||
@@ -278,21 +288,19 @@ func GetPTZConfigurationsFromDevice(device *onvif.Device) (ptz.GetConfigurations
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetConfigurationsResponse")
|
||||
if err != nil {
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetConfigurationsResponse")
|
||||
if err != nil {
|
||||
log.Log.Debug("onvif.GetPTZConfigurationsFromDevice(): " + err.Error())
|
||||
return configurations, err
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&configurations, et); err != nil {
|
||||
log.Log.Debug("onvif.GetPTZConfigurationsFromDevice(): " + err.Error())
|
||||
return configurations, err
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&configurations, et); err != nil {
|
||||
log.Log.Debug("onvif.GetPTZConfigurationsFromDevice(): " + err.Error())
|
||||
return configurations, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -312,12 +320,17 @@ func GetPositionFromDevice(configuration models.Configuration) (xsdonvif.PTZVect
|
||||
// Get the PTZ configurations from the device
|
||||
position, err := GetPosition(device, token)
|
||||
if err == nil {
|
||||
// float to string
|
||||
x := strconv.FormatFloat(position.PanTilt.X, 'f', 6, 64)
|
||||
y := strconv.FormatFloat(position.PanTilt.Y, 'f', 6, 64)
|
||||
z := strconv.FormatFloat(position.Zoom.X, 'f', 6, 64)
|
||||
log.Log.Info("onvif.GetPositionFromDevice(): successfully got position (" + x + ", " + y + ", " + z + ")")
|
||||
return position, err
|
||||
if position.PanTilt != nil && position.Zoom != nil {
|
||||
// float to string
|
||||
x := strconv.FormatFloat(position.PanTilt.X, 'f', 6, 64)
|
||||
y := strconv.FormatFloat(position.PanTilt.Y, 'f', 6, 64)
|
||||
z := strconv.FormatFloat(position.Zoom.X, 'f', 6, 64)
|
||||
log.Log.Info("onvif.GetPositionFromDevice(): successfully got position (" + x + ", " + y + ", " + z + ")")
|
||||
return position, err
|
||||
} else {
|
||||
log.Log.Debug("onvif.GetPositionFromDevice(): position is nil")
|
||||
return position, errors.New("position is nil")
|
||||
}
|
||||
} else {
|
||||
log.Log.Debug("onvif.GetPositionFromDevice(): " + err.Error())
|
||||
return position, err
|
||||
@@ -345,7 +358,7 @@ func GetPosition(device *onvif.Device, token xsdonvif.ReferenceToken) (xsdonvif.
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
@@ -779,7 +792,7 @@ func GetPresetsFromDevice(device *onvif.Device) ([]models.OnvifActionPreset, err
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
@@ -793,14 +806,16 @@ func GetPresetsFromDevice(device *onvif.Device) ([]models.OnvifActionPreset, err
|
||||
return presets, err
|
||||
}
|
||||
|
||||
presetsList := ""
|
||||
for _, preset := range presetsResponse.Preset {
|
||||
log.Log.Debug("onvif.main.GetPresetsFromDevice(): " + string(preset.Name) + " (" + string(preset.Token) + ")")
|
||||
p := models.OnvifActionPreset{
|
||||
Name: string(preset.Name),
|
||||
Token: string(preset.Token),
|
||||
}
|
||||
presetsList += string(preset.Name) + " (" + string(preset.Token) + "), "
|
||||
presets = append(presets, p)
|
||||
}
|
||||
log.Log.Debug("onvif.main.GetPresetsFromDevice(): " + presetsList)
|
||||
|
||||
return presets, err
|
||||
}
|
||||
@@ -828,7 +843,7 @@ func GoToPresetFromDevice(device *onvif.Device, presetName string) error {
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
@@ -893,7 +908,7 @@ func GetPTZFunctionsFromDevice(configurations ptz.GetConfigurationsResponse) ([]
|
||||
}
|
||||
|
||||
// VerifyOnvifConnection godoc
|
||||
// @Router /api/onvif/verify [post]
|
||||
// @Router /api/camera/onvif/verify [post]
|
||||
// @ID verify-onvif
|
||||
// @Security Bearer
|
||||
// @securityDefinitions.apikey Bearer
|
||||
@@ -969,25 +984,30 @@ func CreatePullPointSubscription(dev *onvif.Device) (string, error) {
|
||||
// For the time being we are just interested in the digital inputs and outputs, therefore
|
||||
// we have set the topic to the followin filter.
|
||||
terminate := xsd.String("PT60S")
|
||||
if dev == nil {
|
||||
return pullPointAdress, errors.New("dev is nil, ONVIF was not able to connect to the device")
|
||||
}
|
||||
|
||||
resp, err := dev.CallMethod(event.CreatePullPointSubscription{
|
||||
InitialTerminationTime: &terminate,
|
||||
|
||||
Filter: &event.FilterType{
|
||||
TopicExpression: &event.TopicExpressionType{
|
||||
Dialect: xsd.String("http://www.onvif.org/ver10/tev/topicExpression/ConcreteSet"),
|
||||
TopicKinds: "tns1:Device/Trigger//.",
|
||||
TopicKinds: "tns1:Device/Trigger//.", // -> This works for Avigilon, Hanwa, Hikvision
|
||||
// TopicKinds: "//.", -> This works for Axis, but throws other errors.
|
||||
},
|
||||
},
|
||||
})
|
||||
var b2 []byte
|
||||
if resp != nil {
|
||||
b2, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
if err == nil {
|
||||
stringBody := string(b2)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "CreatePullPointSubscriptionResponse")
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.CreatePullPointSubscription(): " + err.Error())
|
||||
log.Log.Debug("onvif.main.CreatePullPointSubscription(): " + err.Error())
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&createPullPointSubscriptionResponse, et); err != nil {
|
||||
log.Log.Error("onvif.main.CreatePullPointSubscription(): " + err.Error())
|
||||
@@ -1001,19 +1021,25 @@ func CreatePullPointSubscription(dev *onvif.Device) (string, error) {
|
||||
}
|
||||
|
||||
func UnsubscribePullPoint(dev *onvif.Device, pullPointAddress string) error {
|
||||
|
||||
// Unsubscribe from the device
|
||||
unsubscribe := event.Unsubscribe{}
|
||||
requestBody, err := xml.Marshal(unsubscribe)
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.UnsubscribePullPoint(): " + err.Error())
|
||||
}
|
||||
|
||||
res, err := dev.SendSoap(pullPointAddress, string(requestBody))
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.UnsubscribePullPoint(): " + err.Error())
|
||||
}
|
||||
if res != nil {
|
||||
_, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
b, err := io.ReadAll(res.Body)
|
||||
res.Body.Close() // Ensure the response body is closed
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
log.Log.Debug("onvif.main.UnsubscribePullPoint(): " + stringBody)
|
||||
}
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.UnsubscribePullPoint(): " + err.Error())
|
||||
}
|
||||
@@ -1032,14 +1058,10 @@ func GetInputOutputs() ([]ONVIFEvents, error) {
|
||||
// We have some odd behaviour for inputs: the logical state is set to false even if circuit is closed. However we do see repeated events (looks like heartbeats).
|
||||
// We are assuming that if we do not receive an event for 15 seconds the input is inactive, otherwise we set to active.
|
||||
for key, value := range inputOutputDeviceMap {
|
||||
if value.Type == "input" {
|
||||
if time.Now().Unix()-value.Timestamp > 15 {
|
||||
value.Value = "false"
|
||||
} else {
|
||||
value.Value = "true"
|
||||
}
|
||||
inputOutputDeviceMap[key] = value
|
||||
if time.Now().Unix()-value.Timestamp < 15 && value.Value == "false" {
|
||||
value.Value = "true"
|
||||
}
|
||||
inputOutputDeviceMap[key] = value
|
||||
eventsArray = append(eventsArray, *value)
|
||||
}
|
||||
for _, value := range eventsArray {
|
||||
@@ -1065,7 +1087,7 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
// Pull message
|
||||
pullMessage := event.PullMessages{
|
||||
Timeout: xsd.Duration("PT5S"),
|
||||
MessageLimit: 100,
|
||||
MessageLimit: 10,
|
||||
}
|
||||
requestBody, err := xml.Marshal(pullMessage)
|
||||
if err != nil {
|
||||
@@ -1081,7 +1103,7 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
var pullMessagesResponse event.PullMessagesResponse
|
||||
if res != nil {
|
||||
bs, err := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
res.Body.Close() // Ensure the response body is closed
|
||||
if err == nil {
|
||||
stringBody := string(bs)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "PullMessagesResponse")
|
||||
@@ -1099,13 +1121,18 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
|
||||
for _, message := range pullMessagesResponse.NotificationMessage {
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages): " + string(message.Topic.TopicKinds))
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages): " + string(message.Message.Message.Data.SimpleItem[0].Name) + " " + string(message.Message.Message.Data.SimpleItem[0].Value))
|
||||
if message.Topic.TopicKinds == "tns1:Device/Trigger/Relay" {
|
||||
//if len(message.Message.Message.Data.SimpleItem) > 0 {
|
||||
// log.Log.Debug("onvif.main.GetEventMessages(pullMessages): " + string(message.Message.Message.Data.SimpleItem[0].Name) + " " + string(message.Message.Message.Data.SimpleItem[0].Value))
|
||||
//}
|
||||
if message.Topic.TopicKinds == "tns1:Device/Trigger/Relay" ||
|
||||
message.Topic.TopicKinds == "tns1:Device/tns1:Trigger/tns1:Relay" { // This is for avigilon cameras
|
||||
if len(message.Message.Message.Data.SimpleItem) > 0 {
|
||||
if message.Message.Message.Data.SimpleItem[0].Name == "LogicalState" {
|
||||
if message.Message.Message.Data.SimpleItem[0].Name == "LogicalState" ||
|
||||
message.Message.Message.Data.SimpleItem[0].Name == "RelayLogicalState" { // On avigilon it's called RelayLogicalState
|
||||
key := string(message.Message.Message.Source.SimpleItem[0].Value)
|
||||
value := string(message.Message.Message.Data.SimpleItem[0].Value)
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) output: " + key + " " + value)
|
||||
propertyOperation := string(message.Message.Message.PropertyOperation)
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) output: " + key + " " + value + " (" + propertyOperation + ")")
|
||||
|
||||
// Depending on the onvif library they might use different values for active and inactive.
|
||||
if value == "active" || value == "1" {
|
||||
@@ -1116,26 +1143,30 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
|
||||
// Check if key exists in map
|
||||
// If it does not exist we'll add it to the map otherwise we'll update the value.
|
||||
if _, ok := inputOutputDeviceMap[key]; !ok {
|
||||
inputOutputDeviceMap[key] = &ONVIFEvents{
|
||||
Key: key,
|
||||
if _, ok := inputOutputDeviceMap[key+"-output"]; !ok {
|
||||
inputOutputDeviceMap[key+"-output"] = &ONVIFEvents{
|
||||
Key: key + "-output",
|
||||
Type: "output",
|
||||
Value: value,
|
||||
Timestamp: 0,
|
||||
}
|
||||
} else {
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) output: " + key + " " + value)
|
||||
inputOutputDeviceMap[key].Value = value
|
||||
inputOutputDeviceMap[key].Timestamp = time.Now().Unix()
|
||||
} else if propertyOperation == "Changed" {
|
||||
inputOutputDeviceMap[key+"-output"].Value = value
|
||||
inputOutputDeviceMap[key+"-output"].Timestamp = time.Now().Unix()
|
||||
} else if propertyOperation == "Initialized" {
|
||||
inputOutputDeviceMap[key+"-output"].Value = value
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if message.Topic.TopicKinds == "tns1:Device/Trigger/DigitalInput" {
|
||||
} else if message.Topic.TopicKinds == "tns1:Device/Trigger/DigitalInput" ||
|
||||
message.Topic.TopicKinds == "tns1:Device/tns1:Trigger/tnssamsung:DigitalInput" { // This is for avigilon's camera
|
||||
if len(message.Message.Message.Data.SimpleItem) > 0 {
|
||||
if message.Message.Message.Data.SimpleItem[0].Name == "LogicalState" {
|
||||
if message.Message.Message.Data.SimpleItem[0].Name == "LogicalState" ||
|
||||
message.Message.Message.Data.SimpleItem[0].Name == "Level" { // On avigilon it's called level
|
||||
key := string(message.Message.Message.Source.SimpleItem[0].Value)
|
||||
value := string(message.Message.Message.Data.SimpleItem[0].Value)
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) input: " + key + " " + value)
|
||||
propertyOperation := string(message.Message.Message.PropertyOperation)
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) input: " + key + " " + value + " (" + propertyOperation + ")")
|
||||
|
||||
// Depending on the onvif library they might use different values for active and inactive.
|
||||
if value == "active" || value == "1" {
|
||||
@@ -1146,17 +1177,18 @@ func GetEventMessages(dev *onvif.Device, pullPointAddress string) ([]ONVIFEvents
|
||||
|
||||
// Check if key exists in map
|
||||
// If it does not exist we'll add it to the map otherwise we'll update the value.
|
||||
if _, ok := inputOutputDeviceMap[key]; !ok {
|
||||
inputOutputDeviceMap[key] = &ONVIFEvents{
|
||||
Key: key,
|
||||
if _, ok := inputOutputDeviceMap[key+"-input"]; !ok {
|
||||
inputOutputDeviceMap[key+"-input"] = &ONVIFEvents{
|
||||
Key: key + "-input",
|
||||
Type: "input",
|
||||
Value: value,
|
||||
Timestamp: 0,
|
||||
}
|
||||
} else {
|
||||
log.Log.Debug("onvif.main.GetEventMessages(pullMessages) input: " + key + " " + value)
|
||||
inputOutputDeviceMap[key].Value = value
|
||||
inputOutputDeviceMap[key].Timestamp = time.Now().Unix()
|
||||
} else if propertyOperation == "Changed" {
|
||||
inputOutputDeviceMap[key+"-input"].Value = value
|
||||
inputOutputDeviceMap[key+"-input"].Timestamp = time.Now().Unix()
|
||||
} else if propertyOperation == "Initialized" {
|
||||
inputOutputDeviceMap[key+"-input"].Value = value
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1180,7 +1212,7 @@ func GetDigitalInputs(dev *onvif.Device) (device.GetDigitalInputsResponse, error
|
||||
resp, err := dev.CallMethod(deviceio.GetDigitalInputs{})
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
@@ -1212,21 +1244,19 @@ func GetRelayOutputs(dev *onvif.Device) (device.GetRelayOutputsResponse, error)
|
||||
var b []byte
|
||||
if resp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if err == nil {
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetRelayOutputsResponse")
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.GetRelayOutputs(): " + err.Error())
|
||||
stringBody := string(b)
|
||||
decodedXML, et, err := getXMLNode(stringBody, "GetRelayOutputsResponse")
|
||||
if err != nil {
|
||||
log.Log.Error("onvif.main.GetRelayOutputs(): " + err.Error())
|
||||
return relayoutputs, err
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&relayoutputs, et); err != nil {
|
||||
log.Log.Debug("onvif.main.GetRelayOutputs(): " + err.Error())
|
||||
return relayoutputs, err
|
||||
} else {
|
||||
if err := decodedXML.DecodeElement(&relayoutputs, et); err != nil {
|
||||
log.Log.Debug("onvif.main.GetRelayOutputs(): " + err.Error())
|
||||
return relayoutputs, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1244,8 +1274,8 @@ func TriggerRelayOutput(dev *onvif.Device, output string) (err error) {
|
||||
// However in theory there might be multiple outputs. We might need to change
|
||||
// this in the future "kerberos-io/onvif" library.
|
||||
if err == nil {
|
||||
token := relayoutputs.RelayOutputs.Token
|
||||
if output == string(token) {
|
||||
token := relayoutputs.RelayOutputs[0].Token
|
||||
if output == string(token+"-output") {
|
||||
outputState := device.SetRelayOutputState{
|
||||
RelayOutputToken: token,
|
||||
LogicalState: "active",
|
||||
@@ -1255,7 +1285,7 @@ func TriggerRelayOutput(dev *onvif.Device, output string) (err error) {
|
||||
var b []byte
|
||||
if errResp != nil {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
resp.Body.Close() // Ensure the response body is closed
|
||||
}
|
||||
stringBody := string(b)
|
||||
if err == nil && resp.StatusCode == 200 {
|
||||
|
||||
@@ -9,12 +9,15 @@ import (
|
||||
// Packet represents an RTP Packet
|
||||
type Packet struct {
|
||||
Packet *rtp.Packet
|
||||
IsAudio bool // packet is audio
|
||||
IsVideo bool // packet is video
|
||||
IsKeyFrame bool // video packet is key frame
|
||||
Idx int8 // stream index in container format
|
||||
Codec string // codec name
|
||||
CompositionTime time.Duration // packet presentation time minus decode time for H264 B-Frame
|
||||
Time time.Duration // packet decode time
|
||||
Data []byte // packet data
|
||||
IsAudio bool // packet is audio
|
||||
IsVideo bool // packet is video
|
||||
IsKeyFrame bool // video packet is key frame
|
||||
Idx int8 // stream index in container format
|
||||
Codec string // codec name
|
||||
CompositionTime int64 // packet presentation time minus decode time for H264 B-Frame
|
||||
Time int64 // packet decode time
|
||||
TimeLegacy time.Duration
|
||||
CurrentTime int64 // current time in milliseconds (UNIX timestamp)
|
||||
Data []byte // packet data
|
||||
Gopsize int // size of the GOP
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ package packets
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// time
|
||||
@@ -46,6 +45,11 @@ func (self *Queue) SetMaxGopCount(n int) {
|
||||
return
|
||||
}
|
||||
|
||||
func (self *Queue) GetMaxGopCount() int {
|
||||
n := self.maxgopcount
|
||||
return n
|
||||
}
|
||||
|
||||
func (self *Queue) WriteHeader(streams []Stream) error {
|
||||
self.lock.Lock()
|
||||
|
||||
@@ -145,7 +149,7 @@ func (self *Queue) Oldest() *QueueCursor {
|
||||
}
|
||||
|
||||
// Create cursor position at specific time in buffered packets.
|
||||
func (self *Queue) DelayedTime(dur time.Duration) *QueueCursor {
|
||||
func (self *Queue) DelayedTime(dur int64) *QueueCursor {
|
||||
cursor := self.newCursor()
|
||||
cursor.init = func(buf *Buf, videoidx int) BufPos {
|
||||
i := buf.Tail - 1
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package packets
|
||||
|
||||
type Stream struct {
|
||||
// The ID of the stream.
|
||||
Index int `json:"index" bson:"index"`
|
||||
|
||||
// The name of the stream.
|
||||
Name string
|
||||
|
||||
@@ -39,4 +42,13 @@ type Stream struct {
|
||||
|
||||
// IsBackChannel is true if this stream is a back channel.
|
||||
IsBackChannel bool
|
||||
|
||||
// SampleRate is the sample rate of the audio stream.
|
||||
SampleRate int
|
||||
|
||||
// Channels is the number of audio channels.
|
||||
Channels int
|
||||
|
||||
// GopSize is the size of the GOP (Group of Pictures).
|
||||
GopSize int
|
||||
}
|
||||
|
||||
@@ -41,10 +41,13 @@ import (
|
||||
|
||||
func StartServer(configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
|
||||
// Set release mode
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
|
||||
// Initialize REST API
|
||||
r := gin.Default()
|
||||
|
||||
// Profileerggerg
|
||||
// Profiler
|
||||
pprof.Register(r)
|
||||
|
||||
// Setup CORS
|
||||
|
||||
@@ -395,7 +395,9 @@ func DoGetDigitalInputs(c *gin.Context) {
|
||||
}
|
||||
|
||||
cameraConfiguration := configuration.Config.Capture.IPCamera
|
||||
_, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
|
||||
|
||||
onvifInputs, _ := onvif.GetDigitalInputs(device)
|
||||
if err == nil {
|
||||
// Get the digital inputs and outputs from the device
|
||||
inputOutputs, err := onvif.GetInputOutputs()
|
||||
@@ -408,6 +410,24 @@ func DoGetDigitalInputs(c *gin.Context) {
|
||||
inputs = append(inputs, event)
|
||||
}
|
||||
}
|
||||
// Iterate over inputs from onvif and compare
|
||||
|
||||
for _, input := range onvifInputs.DigitalInputs {
|
||||
find := false
|
||||
for _, event := range inputs {
|
||||
key := string(input.Token)
|
||||
if key == event.Key {
|
||||
find = true
|
||||
}
|
||||
}
|
||||
if !find {
|
||||
key := string(input.Token)
|
||||
inputs = append(inputs, onvif.ONVIFEvents{
|
||||
Key: key,
|
||||
Type: "input",
|
||||
})
|
||||
}
|
||||
}
|
||||
c.JSON(200, gin.H{
|
||||
"data": inputs,
|
||||
})
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func AddRoutes(r *gin.Engine, authMiddleware *jwt.GinJWTMiddleware, configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) *gin.RouterGroup {
|
||||
|
||||
r.GET("/ws", func(c *gin.Context) {
|
||||
websocket.WebsocketHandler(c, communication, captureDevice)
|
||||
websocket.WebsocketHandler(c, configuration, communication, captureDevice)
|
||||
})
|
||||
|
||||
// This is legacy should be removed in future! Now everything
|
||||
@@ -54,21 +54,21 @@ func AddRoutes(r *gin.Engine, authMiddleware *jwt.GinJWTMiddleware, configDirect
|
||||
components.UpdateConfig(c, configDirectory, configuration, communication)
|
||||
})
|
||||
|
||||
// Will verify the current onvif settings.
|
||||
api.POST("/onvif/verify", func(c *gin.Context) {
|
||||
onvif.VerifyOnvifConnection(c)
|
||||
})
|
||||
|
||||
// Will verify the current hub settings.
|
||||
// Will verify the hub settings.
|
||||
api.POST("/hub/verify", func(c *gin.Context) {
|
||||
cloud.VerifyHub(c)
|
||||
})
|
||||
|
||||
// Will verify the current persistence settings.
|
||||
// Will verify the persistence settings.
|
||||
api.POST("/persistence/verify", func(c *gin.Context) {
|
||||
cloud.VerifyPersistence(c, configDirectory)
|
||||
})
|
||||
|
||||
// Will verify the secondary persistence settings.
|
||||
api.POST("/persistence/secondary/verify", func(c *gin.Context) {
|
||||
cloud.VerifySecondaryPersistence(c, configDirectory)
|
||||
})
|
||||
|
||||
// Camera specific methods. Doesn't require any authorization.
|
||||
// These are available for anyone, but require the agent, to reach
|
||||
// the camera.
|
||||
@@ -94,7 +94,8 @@ func AddRoutes(r *gin.Engine, authMiddleware *jwt.GinJWTMiddleware, configDirect
|
||||
})
|
||||
|
||||
// Onvif specific methods. Doesn't require any authorization.
|
||||
|
||||
// Will verify the current onvif settings.
|
||||
api.POST("/camera/onvif/verify", onvif.VerifyOnvifConnection)
|
||||
api.POST("/camera/onvif/login", LoginToOnvif)
|
||||
api.POST("/camera/onvif/capabilities", GetOnvifCapabilities)
|
||||
api.POST("/camera/onvif/presets", GetOnvifPresets)
|
||||
|
||||
@@ -123,7 +123,6 @@ func ConfigureMQTT(configDirectory string, configuration *models.Configuration,
|
||||
opts.SetClientID(mqttClientID)
|
||||
log.Log.Info("routers.mqtt.main.ConfigureMQTT(): Set ClientID " + mqttClientID)
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
webrtc.CandidateArrays = make(map[string](chan string))
|
||||
|
||||
opts.OnConnect = func(c mqtt.Client) {
|
||||
// We managed to connect to the MQTT broker, hurray!
|
||||
@@ -166,9 +165,33 @@ func MQTTListenerHandler(mqttClient mqtt.Client, hubKey string, configDirectory
|
||||
|
||||
// We will receive all messages from our hub, so we'll need to filter to the relevant device.
|
||||
if message.Mid != "" && message.Timestamp != 0 && message.DeviceId == configuration.Config.Key {
|
||||
// Messages might be encrypted, if so we'll
|
||||
// need to decrypt them.
|
||||
var payload models.Payload
|
||||
|
||||
// Messages might be hidden, if so we'll need to decrypt them using the Kerberos Hub private key.
|
||||
if message.Hidden && configuration.Config.HubEncryption == "true" {
|
||||
hiddenValue := message.Payload.HiddenValue
|
||||
if len(hiddenValue) > 0 {
|
||||
privateKey := configuration.Config.HubPrivateKey
|
||||
if privateKey != "" {
|
||||
data, err := base64.StdEncoding.DecodeString(hiddenValue)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
visibleValue, err := encryption.AesDecrypt(data, privateKey)
|
||||
if err != nil {
|
||||
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error decrypting message: " + err.Error())
|
||||
return
|
||||
}
|
||||
json.Unmarshal(visibleValue, &payload)
|
||||
message.Payload = payload
|
||||
} else {
|
||||
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error decrypting message, no private key provided.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Messages might be end-to-end encrypted, if so we'll need to decrypt them,
|
||||
// using our own keys.
|
||||
if message.Encrypted && configuration.Config.Encryption != nil && configuration.Config.Encryption.Enabled == "true" {
|
||||
encryptedValue := message.Payload.EncryptedValue
|
||||
if len(encryptedValue) > 0 {
|
||||
@@ -317,7 +340,7 @@ func HandleGetPTZPosition(mqttClient mqtt.Client, hubKey string, payload models.
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 2, false, payload)
|
||||
} else {
|
||||
log.Log.Info("routers.mqtt.main.HandlePTZPosition(): something went wrong while sending position to hub: " + string(payload))
|
||||
}
|
||||
@@ -352,10 +375,13 @@ func HandleRequestConfig(mqttClient mqtt.Client, hubKey string, payload models.P
|
||||
json.Unmarshal(jsonData, &configPayload)
|
||||
|
||||
if configPayload.Timestamp != 0 {
|
||||
// Get Config from the device
|
||||
|
||||
// Get Config from the device
|
||||
key := configuration.Config.Key
|
||||
name := configuration.Config.Name
|
||||
if configuration.Config.FriendlyName != "" {
|
||||
name = configuration.Config.FriendlyName
|
||||
}
|
||||
|
||||
if key != "" && name != "" {
|
||||
|
||||
@@ -378,7 +404,7 @@ func HandleRequestConfig(mqttClient mqtt.Client, hubKey string, payload models.P
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 2, false, payload)
|
||||
} else {
|
||||
log.Log.Info("routers.mqtt.main.HandleRequestConfig(): something went wrong while sending config to hub: " + string(payload))
|
||||
}
|
||||
@@ -417,7 +443,7 @@ func HandleUpdateConfig(mqttClient mqtt.Client, hubKey string, payload models.Pa
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 2, false, payload)
|
||||
} else {
|
||||
log.Log.Info("routers.mqtt.main.HandleUpdateConfig(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ var upgrader = websocket.Upgrader{
|
||||
},
|
||||
}
|
||||
|
||||
func WebsocketHandler(c *gin.Context, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
func WebsocketHandler(c *gin.Context, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
w := c.Writer
|
||||
r := c.Request
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
@@ -112,7 +112,7 @@ func WebsocketHandler(c *gin.Context, communication *models.Communication, captu
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
sockets[clientID].Cancels["stream-sd"] = cancel
|
||||
go ForwardSDStream(ctx, clientID, sockets[clientID], communication, captureDevice)
|
||||
go ForwardSDStream(ctx, clientID, sockets[clientID], configuration, communication, captureDevice)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func WebsocketHandler(c *gin.Context, communication *models.Communication, captu
|
||||
}
|
||||
}
|
||||
|
||||
func ForwardSDStream(ctx context.Context, clientID string, connection *Connection, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
func ForwardSDStream(ctx context.Context, clientID string, connection *Connection, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
|
||||
var queue *packets.Queue
|
||||
var cursor *packets.QueueCursor
|
||||
@@ -159,8 +159,13 @@ logreader:
|
||||
var img image.YCbCr
|
||||
img, err = (*rtspClient).DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
config := configuration.Config
|
||||
// Resize the image to the base width and height
|
||||
imageResized, _ := utils.ResizeImage(&img, uint(config.Capture.IPCamera.BaseWidth), uint(config.Capture.IPCamera.BaseHeight))
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
encodedImage = base64.StdEncoding.EncodeToString(bytes)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.Log.Error("routers.websocket.main.ForwardSDStream():" + err.Error())
|
||||
|
||||
@@ -21,8 +21,15 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/encryption"
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
|
||||
"github.com/nfnt/resize"
|
||||
)
|
||||
|
||||
// VERSION is the agent version. It defaults to "0.0.0" for local dev builds
|
||||
// and is overridden at build time via:
|
||||
// go build -ldflags "-X github.com/kerberos-io/agent/machinery/src/utils.VERSION=v1.2.3"
|
||||
var VERSION = "0.0.0"
|
||||
|
||||
const letterBytes = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
// MaxUint8 - maximum value which can be held in an uint8
|
||||
@@ -399,9 +406,31 @@ func Decrypt(directoryOrFile string, symmetricKey []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func ImageToBytes(img image.Image) ([]byte, error) {
|
||||
func ImageToBytes(img *image.Image) ([]byte, error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
w := bufio.NewWriter(buffer)
|
||||
err := jpeg.Encode(w, img, &jpeg.Options{Quality: 15})
|
||||
err := jpeg.Encode(w, *img, &jpeg.Options{Quality: 35})
|
||||
log.Log.Debug("ImageToBytes() - buffer size: " + strconv.Itoa(buffer.Len()))
|
||||
return buffer.Bytes(), err
|
||||
}
|
||||
|
||||
func ResizeImage(img image.Image, newWidth uint, newHeight uint) (*image.Image, error) {
|
||||
if img == nil {
|
||||
return nil, errors.New("image is nil")
|
||||
}
|
||||
|
||||
// resize to width 640 using Lanczos resampling
|
||||
// and preserve aspect ratio
|
||||
m := resize.Resize(newWidth, newHeight, img, resize.Lanczos3)
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func ResizeHeightWithAspectRatio(newWidth int, width int, height int) (int, int) {
|
||||
if newWidth <= 0 || width <= 0 || height <= 0 {
|
||||
return width, height
|
||||
}
|
||||
// Calculate the new height based on the aspect ratio
|
||||
newHeight := (newWidth * height) / width
|
||||
// Return the new dimensions
|
||||
return newWidth, newHeight
|
||||
}
|
||||
|
||||
1379
machinery/src/video/mp4.go
Normal file
1379
machinery/src/video/mp4.go
Normal file
File diff suppressed because it is too large
Load Diff
176
machinery/src/video/mp4_duration_test.go
Normal file
176
machinery/src/video/mp4_duration_test.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package video
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
mp4ff "github.com/Eyevinn/mp4ff/mp4"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
)
|
||||
|
||||
// TestMP4Duration creates an MP4 file simulating a 5-second video recording
|
||||
// and verifies that the durations in all boxes match the sum of sample durations.
|
||||
func TestMP4Duration(t *testing.T) {
|
||||
tmpFile := "/tmp/test_duration.mp4"
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
// Minimal SPS for H.264 (baseline, 640x480) - proper Annex B format with start code
|
||||
sps := []byte{0x67, 0x42, 0xc0, 0x1e, 0xd9, 0x00, 0xa0, 0x47, 0xfe, 0xc8}
|
||||
pps := []byte{0x68, 0xce, 0x38, 0x80}
|
||||
|
||||
mp4Video := NewMP4(tmpFile, [][]byte{sps}, [][]byte{pps}, nil, 10)
|
||||
mp4Video.SetWidth(640)
|
||||
mp4Video.SetHeight(480)
|
||||
videoTrack := mp4Video.AddVideoTrack("H264")
|
||||
|
||||
// Simulate 5 seconds at 25fps (200 frames, keyframe every 50 frames = 2s)
|
||||
// PTS in milliseconds (timescale=1000)
|
||||
frameDuration := uint64(40) // 40ms per frame = 25fps
|
||||
numFrames := 150
|
||||
gopSize := 50
|
||||
|
||||
// Create a fake Annex B NAL unit (keyframe IDR = type 5, non-keyframe = type 1)
|
||||
makeFrame := func(isKey bool) []byte {
|
||||
nalType := byte(0x01) // non-IDR slice
|
||||
if isKey {
|
||||
nalType = 0x65 // IDR slice
|
||||
}
|
||||
// Start code (4 bytes) + NAL header + some data
|
||||
frame := []byte{0x00, 0x00, 0x00, 0x01, nalType}
|
||||
// Add some padding data
|
||||
for i := 0; i < 100; i++ {
|
||||
frame = append(frame, byte(i))
|
||||
}
|
||||
return frame
|
||||
}
|
||||
|
||||
var expectedDuration uint64
|
||||
for i := 0; i < numFrames; i++ {
|
||||
pts := uint64(i) * frameDuration
|
||||
isKeyframe := i%gopSize == 0
|
||||
err := mp4Video.AddSampleToTrack(videoTrack, isKeyframe, makeFrame(isKeyframe), pts)
|
||||
if err != nil {
|
||||
t.Fatalf("AddSampleToTrack failed at frame %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
expectedDuration = uint64(numFrames) * frameDuration // Should be 6000ms (150 * 40)
|
||||
|
||||
// Close with config that has signing key to avoid nil panics
|
||||
config := &models.Config{
|
||||
Signing: &models.Signing{
|
||||
PrivateKey: "",
|
||||
},
|
||||
}
|
||||
mp4Video.Close(config)
|
||||
|
||||
// Log what the code computed
|
||||
t.Logf("VideoTotalDuration: %d ms", mp4Video.VideoTotalDuration)
|
||||
t.Logf("Expected duration: %d ms", expectedDuration)
|
||||
t.Logf("Segments: %d", len(mp4Video.SegmentDurations))
|
||||
var sumSegDur uint64
|
||||
for i, d := range mp4Video.SegmentDurations {
|
||||
t.Logf(" Segment %d: duration=%d ms", i, d)
|
||||
sumSegDur += d
|
||||
}
|
||||
t.Logf("Sum of segment durations: %d ms", sumSegDur)
|
||||
|
||||
// Now read back the file and inspect the boxes
|
||||
f, err := os.Open(tmpFile)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to open output file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to stat output file: %v", err)
|
||||
}
|
||||
|
||||
parsedFile, err := mp4ff.DecodeFile(f)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode MP4: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("File size: %d bytes", fi.Size())
|
||||
|
||||
// Check moov box
|
||||
if parsedFile.Moov == nil {
|
||||
t.Fatal("No moov box found")
|
||||
}
|
||||
|
||||
// Check mvhd duration
|
||||
mvhd := parsedFile.Moov.Mvhd
|
||||
t.Logf("mvhd.Duration: %d (timescale=%d) = %.2f seconds", mvhd.Duration, mvhd.Timescale, float64(mvhd.Duration)/float64(mvhd.Timescale))
|
||||
t.Logf("mvhd.Rate: 0x%08x", mvhd.Rate)
|
||||
t.Logf("mvhd.Volume: 0x%04x", mvhd.Volume)
|
||||
|
||||
// Check each trak
|
||||
for i, trak := range parsedFile.Moov.Traks {
|
||||
t.Logf("Track %d:", i)
|
||||
t.Logf(" tkhd.Duration: %d", trak.Tkhd.Duration)
|
||||
t.Logf(" mdhd.Duration: %d (timescale=%d) = %.2f seconds", trak.Mdia.Mdhd.Duration, trak.Mdia.Mdhd.Timescale, float64(trak.Mdia.Mdhd.Duration)/float64(trak.Mdia.Mdhd.Timescale))
|
||||
}
|
||||
|
||||
// Check mvex/mehd
|
||||
if parsedFile.Moov.Mvex != nil && parsedFile.Moov.Mvex.Mehd != nil {
|
||||
t.Logf("mehd.FragmentDuration: %d", parsedFile.Moov.Mvex.Mehd.FragmentDuration)
|
||||
}
|
||||
|
||||
// Sum up actual sample durations from trun boxes in all segments
|
||||
var actualTrunDuration uint64
|
||||
var sampleCount int
|
||||
for _, seg := range parsedFile.Segments {
|
||||
for _, frag := range seg.Fragments {
|
||||
for _, traf := range frag.Moof.Trafs {
|
||||
// Only count video track (track 1)
|
||||
if traf.Tfhd.TrackID == 1 {
|
||||
for _, trun := range traf.Truns {
|
||||
for _, s := range trun.Samples {
|
||||
actualTrunDuration += uint64(s.Dur)
|
||||
sampleCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
t.Logf("Actual trun sample count: %d", sampleCount)
|
||||
t.Logf("Actual trun total duration: %d ms", actualTrunDuration)
|
||||
|
||||
// Check sidx
|
||||
if parsedFile.Sidx != nil {
|
||||
var sidxDuration uint64
|
||||
for _, ref := range parsedFile.Sidx.SidxRefs {
|
||||
sidxDuration += uint64(ref.SubSegmentDuration)
|
||||
}
|
||||
t.Logf("sidx total duration: %d ms", sidxDuration)
|
||||
}
|
||||
|
||||
// VERIFY: All duration values should be consistent
|
||||
// The expected duration for 150 frames at 40ms each:
|
||||
// - The sample-buffering pattern means the LAST sample uses LastVideoSampleDTS as duration
|
||||
// - So all 150 samples should produce 150 * 40ms = 6000ms total
|
||||
// But due to the pending sample pattern, the actual trun durations might differ
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("=== DURATION CONSISTENCY CHECK ===")
|
||||
fmt.Printf("Expected (150 * 40ms): %d ms\n", expectedDuration)
|
||||
fmt.Printf("mvhd.Duration: %d ms\n", mvhd.Duration)
|
||||
fmt.Printf("tkhd.Duration: %d ms\n", parsedFile.Moov.Traks[0].Tkhd.Duration)
|
||||
fmt.Printf("mdhd.Duration: %d ms\n", parsedFile.Moov.Traks[0].Mdia.Mdhd.Duration)
|
||||
fmt.Printf("Actual trun durations sum: %d ms\n", actualTrunDuration)
|
||||
fmt.Printf("VideoTotalDuration: %d ms\n", mp4Video.VideoTotalDuration)
|
||||
fmt.Printf("Sum of SegmentDurations: %d ms\n", sumSegDur)
|
||||
fmt.Println()
|
||||
|
||||
// The key assertion: header duration must equal trun sum
|
||||
if mvhd.Duration != actualTrunDuration {
|
||||
t.Errorf("MISMATCH: mvhd.Duration (%d) != actual trun sum (%d), diff = %d ms",
|
||||
mvhd.Duration, actualTrunDuration, int64(mvhd.Duration)-int64(actualTrunDuration))
|
||||
}
|
||||
if parsedFile.Moov.Traks[0].Mdia.Mdhd.Duration != 0 {
|
||||
t.Errorf("MISMATCH: mdhd.Duration should be 0 for fragmented MP4, got %d",
|
||||
parsedFile.Moov.Traks[0].Mdia.Mdhd.Duration)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package webrtc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
@@ -9,24 +10,118 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
//"github.com/izern/go-fdkaac/fdkaac"
|
||||
"github.com/kerberos-io/agent/machinery/src/capture"
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/packets"
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
pionWebRTC "github.com/pion/webrtc/v3"
|
||||
pionMedia "github.com/pion/webrtc/v3/pkg/media"
|
||||
"github.com/pion/interceptor"
|
||||
"github.com/pion/interceptor/pkg/intervalpli"
|
||||
pionWebRTC "github.com/pion/webrtc/v4"
|
||||
pionMedia "github.com/pion/webrtc/v4/pkg/media"
|
||||
)
|
||||
|
||||
var (
|
||||
CandidatesMutex sync.Mutex
|
||||
CandidateArrays map[string](chan string)
|
||||
peerConnectionCount int64
|
||||
peerConnections map[string]*pionWebRTC.PeerConnection
|
||||
//encoder *ffmpeg.VideoEncoder
|
||||
const (
|
||||
// Channel buffer sizes
|
||||
candidateChannelBuffer = 100
|
||||
rtcpBufferSize = 1500
|
||||
|
||||
// Timeouts and intervals
|
||||
keepAliveTimeout = 15 * time.Second
|
||||
defaultTimeout = 10 * time.Second
|
||||
|
||||
// Track identifiers
|
||||
trackStreamID = "kerberos-stream"
|
||||
)
|
||||
|
||||
// ConnectionManager manages WebRTC peer connections in a thread-safe manner
|
||||
type ConnectionManager struct {
|
||||
mu sync.RWMutex
|
||||
candidateChannels map[string]chan string
|
||||
peerConnections map[string]*peerConnectionWrapper
|
||||
peerConnectionCount int64
|
||||
}
|
||||
|
||||
// peerConnectionWrapper wraps a peer connection with additional metadata
|
||||
type peerConnectionWrapper struct {
|
||||
conn *pionWebRTC.PeerConnection
|
||||
cancelCtx context.CancelFunc
|
||||
done chan struct{}
|
||||
closeOnce sync.Once
|
||||
}
|
||||
|
||||
var globalConnectionManager = NewConnectionManager()
|
||||
|
||||
// NewConnectionManager creates a new connection manager
|
||||
func NewConnectionManager() *ConnectionManager {
|
||||
return &ConnectionManager{
|
||||
candidateChannels: make(map[string]chan string),
|
||||
peerConnections: make(map[string]*peerConnectionWrapper),
|
||||
}
|
||||
}
|
||||
|
||||
// GetOrCreateCandidateChannel gets or creates a candidate channel for a session
|
||||
func (cm *ConnectionManager) GetOrCreateCandidateChannel(sessionKey string) chan string {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if ch, exists := cm.candidateChannels[sessionKey]; exists {
|
||||
return ch
|
||||
}
|
||||
|
||||
ch := make(chan string, candidateChannelBuffer)
|
||||
cm.candidateChannels[sessionKey] = ch
|
||||
return ch
|
||||
}
|
||||
|
||||
// CloseCandidateChannel safely closes and removes a candidate channel
|
||||
func (cm *ConnectionManager) CloseCandidateChannel(sessionKey string) {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if ch, exists := cm.candidateChannels[sessionKey]; exists {
|
||||
close(ch)
|
||||
delete(cm.candidateChannels, sessionKey)
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeerConnection adds a peer connection to the manager
|
||||
func (cm *ConnectionManager) AddPeerConnection(sessionID string, wrapper *peerConnectionWrapper) {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
cm.peerConnections[sessionID] = wrapper
|
||||
}
|
||||
|
||||
// RemovePeerConnection removes a peer connection from the manager
|
||||
func (cm *ConnectionManager) RemovePeerConnection(sessionID string) {
|
||||
cm.mu.Lock()
|
||||
defer cm.mu.Unlock()
|
||||
|
||||
if wrapper, exists := cm.peerConnections[sessionID]; exists {
|
||||
if wrapper.cancelCtx != nil {
|
||||
wrapper.cancelCtx()
|
||||
}
|
||||
delete(cm.peerConnections, sessionID)
|
||||
}
|
||||
}
|
||||
|
||||
// GetPeerConnectionCount returns the current count of active peer connections
|
||||
func (cm *ConnectionManager) GetPeerConnectionCount() int64 {
|
||||
return atomic.LoadInt64(&cm.peerConnectionCount)
|
||||
}
|
||||
|
||||
// IncrementPeerCount atomically increments the peer connection count
|
||||
func (cm *ConnectionManager) IncrementPeerCount() int64 {
|
||||
return atomic.AddInt64(&cm.peerConnectionCount, 1)
|
||||
}
|
||||
|
||||
// DecrementPeerCount atomically decrements the peer connection count
|
||||
func (cm *ConnectionManager) DecrementPeerCount() int64 {
|
||||
return atomic.AddInt64(&cm.peerConnectionCount, -1)
|
||||
}
|
||||
|
||||
type WebRTC struct {
|
||||
Name string
|
||||
StunServers []string
|
||||
@@ -37,24 +132,6 @@ type WebRTC struct {
|
||||
PacketsCount chan int
|
||||
}
|
||||
|
||||
// No longer used, is for transcoding, might comeback on this!
|
||||
/*func init() {
|
||||
// Encoder is created for once and for all.
|
||||
var err error
|
||||
encoder, err = ffmpeg.NewVideoEncoderByCodecType(av.H264)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if encoder == nil {
|
||||
err = fmt.Errorf("Video encoder not found")
|
||||
return
|
||||
}
|
||||
encoder.SetFramerate(30, 1)
|
||||
encoder.SetPixelFormat(av.I420)
|
||||
encoder.SetBitrate(1000000) // 1MB
|
||||
encoder.SetGopSize(30 / 1) // 1s
|
||||
}*/
|
||||
|
||||
func CreateWebRTC(name string, stunServers []string, turnServers []string, turnServersUsername string, turnServersCredential string) *WebRTC {
|
||||
return &WebRTC{
|
||||
Name: name,
|
||||
@@ -62,8 +139,7 @@ func CreateWebRTC(name string, stunServers []string, turnServers []string, turnS
|
||||
TurnServers: turnServers,
|
||||
TurnServersUsername: turnServersUsername,
|
||||
TurnServersCredential: turnServersCredential,
|
||||
Timer: time.NewTimer(time.Second * 10),
|
||||
PacketsCount: make(chan int),
|
||||
Timer: time.NewTimer(defaultTimeout),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,19 +161,27 @@ func (w WebRTC) CreateOffer(sd []byte) pionWebRTC.SessionDescription {
|
||||
}
|
||||
|
||||
func RegisterCandidates(key string, candidate models.ReceiveHDCandidatesPayload) {
|
||||
// Set lock
|
||||
CandidatesMutex.Lock()
|
||||
_, ok := CandidateArrays[key]
|
||||
if !ok {
|
||||
CandidateArrays[key] = make(chan string)
|
||||
}
|
||||
log.Log.Info("webrtc.main.HandleReceiveHDCandidates(): " + candidate.Candidate)
|
||||
ch := globalConnectionManager.GetOrCreateCandidateChannel(key)
|
||||
|
||||
log.Log.Info("webrtc.main.RegisterCandidates(): " + candidate.Candidate)
|
||||
select {
|
||||
case CandidateArrays[key] <- candidate.Candidate:
|
||||
case ch <- candidate.Candidate:
|
||||
default:
|
||||
log.Log.Info("webrtc.main.HandleReceiveHDCandidates(): channel is full.")
|
||||
log.Log.Info("webrtc.main.RegisterCandidates(): channel is full, dropping candidate")
|
||||
}
|
||||
CandidatesMutex.Unlock()
|
||||
}
|
||||
|
||||
func RegisterDefaultInterceptors(mediaEngine *pionWebRTC.MediaEngine, interceptorRegistry *interceptor.Registry) error {
|
||||
if err := pionWebRTC.ConfigureNack(mediaEngine, interceptorRegistry); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pionWebRTC.ConfigureRTCPReports(interceptorRegistry); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pionWebRTC.ConfigureSimulcastExtensionHeaders(mediaEngine); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func InitializeWebRTCConnection(configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, videoTrack *pionWebRTC.TrackLocalStaticSample, audioTrack *pionWebRTC.TrackLocalStaticSample, handshake models.RequestHDStreamPayload) {
|
||||
@@ -111,12 +195,7 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
|
||||
// We create a channel which will hold the candidates for this session.
|
||||
sessionKey := config.Key + "/" + handshake.SessionID
|
||||
CandidatesMutex.Lock()
|
||||
_, ok := CandidateArrays[sessionKey]
|
||||
if !ok {
|
||||
CandidateArrays[sessionKey] = make(chan string)
|
||||
}
|
||||
CandidatesMutex.Unlock()
|
||||
candidateChannel := globalConnectionManager.GetOrCreateCandidateChannel(sessionKey)
|
||||
|
||||
// Set variables
|
||||
hubKey := handshake.HubKey
|
||||
@@ -133,7 +212,36 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong registering codecs for media engine: " + err.Error())
|
||||
}
|
||||
|
||||
api := pionWebRTC.NewAPI(pionWebRTC.WithMediaEngine(mediaEngine))
|
||||
// Create a InterceptorRegistry. This is the user configurable RTP/RTCP Pipeline.
|
||||
// This provides NACKs, RTCP Reports and other features. If you use `webrtc.NewPeerConnection`
|
||||
// this is enabled by default. If you are manually managing You MUST create a InterceptorRegistry
|
||||
// for each PeerConnection.
|
||||
interceptorRegistry := &interceptor.Registry{}
|
||||
|
||||
// Use the default set of Interceptors
|
||||
if err := pionWebRTC.RegisterDefaultInterceptors(mediaEngine, interceptorRegistry); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Register a intervalpli factory
|
||||
// This interceptor sends a PLI every 3 seconds. A PLI causes a video keyframe to be generated by the sender.
|
||||
// This makes our video seekable and more error resilent, but at a cost of lower picture quality and higher bitrates
|
||||
// A real world application should process incoming RTCP packets from viewers and forward them to senders
|
||||
intervalPliFactory, err := intervalpli.NewReceiverInterceptor()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
interceptorRegistry.Add(intervalPliFactory)
|
||||
|
||||
api := pionWebRTC.NewAPI(
|
||||
pionWebRTC.WithMediaEngine(mediaEngine),
|
||||
pionWebRTC.WithInterceptorRegistry(interceptorRegistry),
|
||||
)
|
||||
|
||||
policy := pionWebRTC.ICETransportPolicyAll
|
||||
if config.ForceTurn == "true" {
|
||||
policy = pionWebRTC.ICETransportPolicyRelay
|
||||
}
|
||||
|
||||
peerConnection, err := api.NewPeerConnection(
|
||||
pionWebRTC.Configuration{
|
||||
@@ -147,55 +255,137 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
Credential: w.TurnServersCredential,
|
||||
},
|
||||
},
|
||||
//ICETransportPolicy: pionWebRTC.ICETransportPolicyRelay, // This will force a relay server, we might make this configurable.
|
||||
ICETransportPolicy: policy,
|
||||
},
|
||||
)
|
||||
|
||||
if err == nil && peerConnection != nil {
|
||||
|
||||
if _, err = peerConnection.AddTrack(videoTrack); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while adding video track: " + err.Error())
|
||||
// Create context for this connection
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wrapper := &peerConnectionWrapper{
|
||||
conn: peerConnection,
|
||||
cancelCtx: cancel,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
if _, err = peerConnection.AddTrack(audioTrack); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while adding audio track: " + err.Error())
|
||||
var videoSender *pionWebRTC.RTPSender = nil
|
||||
if videoTrack != nil {
|
||||
if videoSender, err = peerConnection.AddTrack(videoTrack); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): error adding video track: " + err.Error())
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): video track is nil, skipping video")
|
||||
}
|
||||
|
||||
peerConnection.OnICEConnectionStateChange(func(connectionState pionWebRTC.ICEConnectionState) {
|
||||
if connectionState == pionWebRTC.ICEConnectionStateDisconnected {
|
||||
atomic.AddInt64(&peerConnectionCount, -1)
|
||||
|
||||
// Set lock
|
||||
CandidatesMutex.Lock()
|
||||
peerConnections[handshake.SessionID] = nil
|
||||
_, ok := CandidateArrays[sessionKey]
|
||||
if ok {
|
||||
close(CandidateArrays[sessionKey])
|
||||
}
|
||||
CandidatesMutex.Unlock()
|
||||
|
||||
close(w.PacketsCount)
|
||||
if err := peerConnection.Close(); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while closing peer connection: " + err.Error())
|
||||
}
|
||||
} else if connectionState == pionWebRTC.ICEConnectionStateConnected {
|
||||
atomic.AddInt64(&peerConnectionCount, 1)
|
||||
} else if connectionState == pionWebRTC.ICEConnectionStateChecking {
|
||||
// Iterate over the candidates and send them to the remote client
|
||||
// Non blocking channel
|
||||
for candidate := range CandidateArrays[sessionKey] {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Received candidate from channel: " + candidate)
|
||||
if candidateErr := peerConnection.AddICECandidate(pionWebRTC.ICECandidateInit{Candidate: string(candidate)}); candidateErr != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while adding candidate: " + candidateErr.Error())
|
||||
// Read incoming RTCP packets
|
||||
// Before these packets are returned they are processed by interceptors. For things
|
||||
// like NACK this needs to be called.
|
||||
if videoSender != nil {
|
||||
go func() {
|
||||
defer func() {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): video RTCP reader stopped")
|
||||
}()
|
||||
rtcpBuf := make([]byte, rtcpBufferSize)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
if _, _, rtcpErr := videoSender.Read(rtcpBuf); rtcpErr != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if connectionState == pionWebRTC.ICEConnectionStateFailed {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): ICEConnectionStateFailed")
|
||||
}()
|
||||
}
|
||||
|
||||
var audioSender *pionWebRTC.RTPSender = nil
|
||||
if audioTrack != nil {
|
||||
if audioSender, err = peerConnection.AddTrack(audioTrack); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): error adding audio track: " + err.Error())
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): audio track is nil, skipping audio")
|
||||
}
|
||||
|
||||
// Read incoming RTCP packets
|
||||
// Before these packets are returned they are processed by interceptors. For things
|
||||
// like NACK this needs to be called.
|
||||
if audioSender != nil {
|
||||
go func() {
|
||||
defer func() {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): audio RTCP reader stopped")
|
||||
}()
|
||||
rtcpBuf := make([]byte, rtcpBufferSize)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
if _, _, rtcpErr := audioSender.Read(rtcpBuf); rtcpErr != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
peerConnection.OnConnectionStateChange(func(connectionState pionWebRTC.PeerConnectionState) {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): connection state changed to: " + connectionState.String())
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Number of peers connected (" + strconv.FormatInt(peerConnectionCount, 10) + ")")
|
||||
|
||||
switch connectionState {
|
||||
case pionWebRTC.PeerConnectionStateDisconnected, pionWebRTC.PeerConnectionStateClosed:
|
||||
wrapper.closeOnce.Do(func() {
|
||||
count := globalConnectionManager.DecrementPeerCount()
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Peer disconnected. Active peers: " + string(rune(count)))
|
||||
|
||||
// Clean up resources
|
||||
globalConnectionManager.CloseCandidateChannel(sessionKey)
|
||||
|
||||
if err := peerConnection.Close(); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): error closing peer connection: " + err.Error())
|
||||
}
|
||||
|
||||
globalConnectionManager.RemovePeerConnection(handshake.SessionID)
|
||||
close(wrapper.done)
|
||||
})
|
||||
|
||||
case pionWebRTC.PeerConnectionStateConnected:
|
||||
count := globalConnectionManager.IncrementPeerCount()
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Peer connected. Active peers: " + string(rune(count)))
|
||||
|
||||
case pionWebRTC.PeerConnectionStateFailed:
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): ICE connection failed")
|
||||
}
|
||||
})
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): candidate processor stopped for session: " + handshake.SessionID)
|
||||
}()
|
||||
|
||||
// Iterate over the candidates and send them to the remote client
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case candidate, ok := <-candidateChannel:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Received candidate from channel: " + candidate)
|
||||
if candidateErr := peerConnection.AddICECandidate(pionWebRTC.ICECandidateInit{Candidate: candidate}); candidateErr != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): error adding candidate: " + candidateErr.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
offer := w.CreateOffer(sd)
|
||||
if err = peerConnection.SetRemoteDescription(offer); err != nil {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): something went wrong while setting remote description: " + err.Error())
|
||||
@@ -210,20 +400,56 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
|
||||
// When an ICE candidate is available send to the other peer using the signaling server (MQTT).
|
||||
// The other peer will add this candidate by calling AddICECandidate
|
||||
var hasRelayCandidates bool
|
||||
peerConnection.OnICECandidate(func(candidate *pionWebRTC.ICECandidate) {
|
||||
|
||||
if candidate == nil {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): ICE gathering complete (candidate is nil)")
|
||||
if !hasRelayCandidates {
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): WARNING - No TURN (relay) candidates were gathered! TURN servers: " +
|
||||
config.TURNURI + ", Username: " + config.TURNUsername + ", ForceTurn: " + config.ForceTurn)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Log candidate details for debugging
|
||||
candidateJSON := candidate.ToJSON()
|
||||
candidateStr := candidateJSON.Candidate
|
||||
|
||||
// Determine candidate type from the candidate string
|
||||
candidateType := "unknown"
|
||||
if candidateJSON.Candidate != "" {
|
||||
switch candidate.Typ {
|
||||
case pionWebRTC.ICECandidateTypeRelay:
|
||||
candidateType = "relay"
|
||||
case pionWebRTC.ICECandidateTypeSrflx:
|
||||
candidateType = "srflx"
|
||||
case pionWebRTC.ICECandidateTypeHost:
|
||||
candidateType = "host"
|
||||
case pionWebRTC.ICECandidateTypePrflx:
|
||||
candidateType = "prflx"
|
||||
}
|
||||
}
|
||||
|
||||
// Track if we received any relay (TURN) candidates
|
||||
if candidateType == "relay" {
|
||||
hasRelayCandidates = true
|
||||
}
|
||||
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): ICE candidate received - Type: " + candidateType +
|
||||
", Candidate: " + candidateStr)
|
||||
|
||||
// Create a config map
|
||||
valueMap := make(map[string]interface{})
|
||||
candateJSON := candidate.ToJSON()
|
||||
sdpmid := "0"
|
||||
candateJSON.SDPMid = &sdpmid
|
||||
candateBinary, err := json.Marshal(candateJSON)
|
||||
candateBinary, err := json.Marshal(candidateJSON)
|
||||
if err == nil {
|
||||
valueMap["candidate"] = string(candateBinary)
|
||||
// SDP is not needed to be send..
|
||||
//valueMap["sdp"] = []byte(base64.StdEncoding.EncodeToString([]byte(answer.SDP)))
|
||||
valueMap["session_id"] = handshake.SessionID
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): sending " + candidateType + " candidate to hub")
|
||||
} else {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): something went wrong while marshalling candidate: " + err.Error())
|
||||
log.Log.Error("webrtc.main.InitializeWebRTCConnection(): failed to marshal candidate: " + err.Error())
|
||||
}
|
||||
|
||||
// We'll send the candidate to the hub
|
||||
@@ -243,13 +469,14 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
}
|
||||
})
|
||||
|
||||
// Create a channel which will be used to send candidates to the other peer
|
||||
peerConnections[handshake.SessionID] = peerConnection
|
||||
// Store peer connection in manager
|
||||
globalConnectionManager.AddPeerConnection(handshake.SessionID, wrapper)
|
||||
|
||||
if err == nil {
|
||||
// Create a config map
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["sdp"] = []byte(base64.StdEncoding.EncodeToString([]byte(answer.SDP)))
|
||||
valueMap["session_id"] = handshake.SessionID
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): Send SDP answer")
|
||||
|
||||
// We'll send the candidate to the hub
|
||||
@@ -276,7 +503,11 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
|
||||
func NewVideoTrack(streams []packets.Stream) *pionWebRTC.TrackLocalStaticSample {
|
||||
mimeType := pionWebRTC.MimeTypeH264
|
||||
outboundVideoTrack, _ := pionWebRTC.NewTrackLocalStaticSample(pionWebRTC.RTPCodecCapability{MimeType: mimeType}, "video", "pion124")
|
||||
outboundVideoTrack, err := pionWebRTC.NewTrackLocalStaticSample(pionWebRTC.RTPCodecCapability{MimeType: mimeType}, "video", trackStreamID)
|
||||
if err != nil {
|
||||
log.Log.Error("webrtc.main.NewVideoTrack(): error creating video track: " + err.Error())
|
||||
return nil
|
||||
}
|
||||
return outboundVideoTrack
|
||||
}
|
||||
|
||||
@@ -291,130 +522,245 @@ func NewAudioTrack(streams []packets.Stream) *pionWebRTC.TrackLocalStaticSample
|
||||
mimeType = pionWebRTC.MimeTypePCMA
|
||||
}
|
||||
}
|
||||
outboundAudioTrack, _ := pionWebRTC.NewTrackLocalStaticSample(pionWebRTC.RTPCodecCapability{MimeType: mimeType}, "audio", "pion124")
|
||||
if mimeType == "" {
|
||||
log.Log.Error("webrtc.main.NewAudioTrack(): no supported audio codec found")
|
||||
return nil
|
||||
}
|
||||
outboundAudioTrack, err := pionWebRTC.NewTrackLocalStaticSample(pionWebRTC.RTPCodecCapability{MimeType: mimeType}, "audio", trackStreamID)
|
||||
if err != nil {
|
||||
log.Log.Error("webrtc.main.NewAudioTrack(): error creating audio track: " + err.Error())
|
||||
return nil
|
||||
}
|
||||
return outboundAudioTrack
|
||||
}
|
||||
|
||||
// streamState holds state information for the streaming process
|
||||
type streamState struct {
|
||||
lastKeepAlive int64
|
||||
peerCount int64
|
||||
start bool
|
||||
receivedKeyFrame bool
|
||||
lastAudioSample *pionMedia.Sample
|
||||
lastVideoSample *pionMedia.Sample
|
||||
}
|
||||
|
||||
// codecSupport tracks which codecs are available in the stream
|
||||
type codecSupport struct {
|
||||
hasH264 bool
|
||||
hasPCM_MULAW bool
|
||||
hasAAC bool
|
||||
hasOpus bool
|
||||
}
|
||||
|
||||
// detectCodecs examines the stream to determine which codecs are available
|
||||
func detectCodecs(rtspClient capture.RTSPClient) codecSupport {
|
||||
support := codecSupport{}
|
||||
streams, _ := rtspClient.GetStreams()
|
||||
|
||||
for _, stream := range streams {
|
||||
switch stream.Name {
|
||||
case "H264":
|
||||
support.hasH264 = true
|
||||
case "PCM_MULAW":
|
||||
support.hasPCM_MULAW = true
|
||||
case "AAC":
|
||||
support.hasAAC = true
|
||||
case "OPUS":
|
||||
support.hasOpus = true
|
||||
}
|
||||
}
|
||||
|
||||
return support
|
||||
}
|
||||
|
||||
// hasValidCodecs checks if at least one valid video or audio codec is present
|
||||
func (cs codecSupport) hasValidCodecs() bool {
|
||||
hasVideo := cs.hasH264
|
||||
hasAudio := cs.hasPCM_MULAW || cs.hasAAC || cs.hasOpus
|
||||
return hasVideo || hasAudio
|
||||
}
|
||||
|
||||
// shouldContinueStreaming determines if streaming should continue based on keepalive and peer count
|
||||
func shouldContinueStreaming(config models.Config, state *streamState) bool {
|
||||
if config.Capture.ForwardWebRTC != "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
hasTimedOut := (now - state.lastKeepAlive) > int64(keepAliveTimeout.Seconds())
|
||||
hasNoPeers := state.peerCount == 0
|
||||
|
||||
return !hasTimedOut && !hasNoPeers
|
||||
}
|
||||
|
||||
// updateStreamState updates keepalive and peer count from communication channels
|
||||
func updateStreamState(communication *models.Communication, state *streamState) {
|
||||
select {
|
||||
case keepAliveStr := <-communication.HandleLiveHDKeepalive:
|
||||
if val, err := strconv.ParseInt(keepAliveStr, 10, 64); err == nil {
|
||||
state.lastKeepAlive = val
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case peerCountStr := <-communication.HandleLiveHDPeers:
|
||||
if val, err := strconv.ParseInt(peerCountStr, 10, 64); err == nil {
|
||||
state.peerCount = val
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// writeFinalSamples writes any remaining buffered samples
|
||||
func writeFinalSamples(state *streamState, videoTrack, audioTrack *pionWebRTC.TrackLocalStaticSample) {
|
||||
if state.lastVideoSample != nil && videoTrack != nil {
|
||||
if err := videoTrack.WriteSample(*state.lastVideoSample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.writeFinalSamples(): error writing final video sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if state.lastAudioSample != nil && audioTrack != nil {
|
||||
if err := audioTrack.WriteSample(*state.lastAudioSample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.writeFinalSamples(): error writing final audio sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processVideoPacket processes a video packet and writes samples to the track
|
||||
func processVideoPacket(pkt packets.Packet, state *streamState, videoTrack *pionWebRTC.TrackLocalStaticSample, config models.Config) {
|
||||
if videoTrack == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Start at the first keyframe
|
||||
if pkt.IsKeyFrame {
|
||||
state.start = true
|
||||
}
|
||||
|
||||
if !state.start {
|
||||
return
|
||||
}
|
||||
|
||||
sample := pionMedia.Sample{Data: pkt.Data, PacketTimestamp: uint32(pkt.Time)}
|
||||
|
||||
if config.Capture.ForwardWebRTC == "true" {
|
||||
// Remote forwarding not yet implemented
|
||||
log.Log.Debug("webrtc.main.processVideoPacket(): remote forwarding not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
if state.lastVideoSample != nil {
|
||||
duration := sample.PacketTimestamp - state.lastVideoSample.PacketTimestamp
|
||||
state.lastVideoSample.Duration = time.Duration(duration) * time.Millisecond
|
||||
|
||||
if err := videoTrack.WriteSample(*state.lastVideoSample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.processVideoPacket(): error writing video sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
state.lastVideoSample = &sample
|
||||
}
|
||||
|
||||
// processAudioPacket processes an audio packet and writes samples to the track
|
||||
func processAudioPacket(pkt packets.Packet, state *streamState, audioTrack *pionWebRTC.TrackLocalStaticSample, hasAAC bool) {
|
||||
if audioTrack == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if hasAAC {
|
||||
// AAC transcoding not yet implemented
|
||||
// TODO: Implement AAC to PCM_MULAW transcoding
|
||||
return
|
||||
}
|
||||
|
||||
sample := pionMedia.Sample{Data: pkt.Data, PacketTimestamp: uint32(pkt.Time)}
|
||||
|
||||
if state.lastAudioSample != nil {
|
||||
duration := sample.PacketTimestamp - state.lastAudioSample.PacketTimestamp
|
||||
state.lastAudioSample.Duration = time.Duration(duration) * time.Millisecond
|
||||
|
||||
if err := audioTrack.WriteSample(*state.lastAudioSample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.processAudioPacket(): error writing audio sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
state.lastAudioSample = &sample
|
||||
}
|
||||
|
||||
func WriteToTrack(livestreamCursor *packets.QueueCursor, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, videoTrack *pionWebRTC.TrackLocalStaticSample, audioTrack *pionWebRTC.TrackLocalStaticSample, rtspClient capture.RTSPClient) {
|
||||
|
||||
config := configuration.Config
|
||||
|
||||
// Make peerconnection map
|
||||
peerConnections = make(map[string]*pionWebRTC.PeerConnection)
|
||||
|
||||
// Set the indexes for the video & audio streams
|
||||
// Later when we read a packet we need to figure out which track to send it to.
|
||||
hasH264 := false
|
||||
hasPCM_MULAW := false
|
||||
streams, _ := rtspClient.GetStreams()
|
||||
for _, stream := range streams {
|
||||
if stream.Name == "H264" {
|
||||
hasH264 = true
|
||||
} else if stream.Name == "PCM_MULAW" {
|
||||
hasPCM_MULAW = true
|
||||
}
|
||||
// Check if at least one track is available
|
||||
if videoTrack == nil && audioTrack == nil {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): both video and audio tracks are nil, cannot proceed")
|
||||
return
|
||||
}
|
||||
|
||||
if !hasH264 && !hasPCM_MULAW {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): no valid video codec and audio codec found.")
|
||||
} else {
|
||||
if config.Capture.TranscodingWebRTC == "true" {
|
||||
// Todo..
|
||||
} else {
|
||||
//log.Log.Info("webrtc.main.WriteToTrack(): not using a transcoder.")
|
||||
}
|
||||
// Detect available codecs
|
||||
codecs := detectCodecs(rtspClient)
|
||||
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var previousTime time.Duration
|
||||
|
||||
start := false
|
||||
receivedKeyFrame := false
|
||||
lastKeepAlive := "0"
|
||||
peerCount := "0"
|
||||
|
||||
for cursorError == nil {
|
||||
|
||||
pkt, cursorError = livestreamCursor.ReadPacket()
|
||||
bufferDuration := pkt.Time - previousTime
|
||||
previousTime = pkt.Time
|
||||
|
||||
if config.Capture.ForwardWebRTC != "true" && peerConnectionCount == 0 {
|
||||
start = false
|
||||
receivedKeyFrame = false
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case lastKeepAlive = <-communication.HandleLiveHDKeepalive:
|
||||
default:
|
||||
}
|
||||
|
||||
select {
|
||||
case peerCount = <-communication.HandleLiveHDPeers:
|
||||
default:
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
lastKeepAliveN, _ := strconv.ParseInt(lastKeepAlive, 10, 64)
|
||||
hasTimedOut := (now - lastKeepAliveN) > 15 // if longer then no response in 15 sec.
|
||||
hasNoPeers := peerCount == "0"
|
||||
|
||||
if config.Capture.ForwardWebRTC == "true" && (hasTimedOut || hasNoPeers) {
|
||||
start = false
|
||||
receivedKeyFrame = false
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pkt.Data) == 0 || pkt.Data == nil {
|
||||
receivedKeyFrame = false
|
||||
continue
|
||||
}
|
||||
|
||||
if !receivedKeyFrame {
|
||||
if pkt.IsKeyFrame {
|
||||
receivedKeyFrame = true
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
//if config.Capture.TranscodingWebRTC == "true" {
|
||||
// We will transcode the video
|
||||
// TODO..
|
||||
//}
|
||||
|
||||
if pkt.IsVideo {
|
||||
// Start at the first keyframe
|
||||
if pkt.IsKeyFrame {
|
||||
start = true
|
||||
}
|
||||
if start {
|
||||
sample := pionMedia.Sample{Data: pkt.Data, Duration: bufferDuration}
|
||||
if config.Capture.ForwardWebRTC == "true" {
|
||||
// We will send the video to a remote peer
|
||||
// TODO..
|
||||
} else {
|
||||
if err := videoTrack.WriteSample(sample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
// We will send the audio
|
||||
sample := pionMedia.Sample{Data: pkt.Data, Duration: pkt.Time}
|
||||
if err := audioTrack.WriteSample(sample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, p := range peerConnections {
|
||||
if p != nil {
|
||||
p.Close()
|
||||
}
|
||||
if !codecs.hasValidCodecs() {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): no valid video or audio codec found")
|
||||
return
|
||||
}
|
||||
|
||||
peerConnectionCount = 0
|
||||
log.Log.Info("webrtc.main.WriteToTrack(): stop writing to track.")
|
||||
if config.Capture.TranscodingWebRTC == "true" {
|
||||
log.Log.Info("webrtc.main.WriteToTrack(): transcoding enabled but not yet implemented")
|
||||
}
|
||||
|
||||
// Initialize streaming state
|
||||
state := &streamState{
|
||||
lastKeepAlive: time.Now().Unix(),
|
||||
peerCount: 0,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
writeFinalSamples(state, videoTrack, audioTrack)
|
||||
log.Log.Info("webrtc.main.WriteToTrack(): stopped writing to track")
|
||||
}()
|
||||
|
||||
var pkt packets.Packet
|
||||
var cursorError error
|
||||
|
||||
for cursorError == nil {
|
||||
pkt, cursorError = livestreamCursor.ReadPacket()
|
||||
|
||||
if cursorError != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Update state from communication channels
|
||||
updateStreamState(communication, state)
|
||||
|
||||
// Check if we should continue streaming
|
||||
if !shouldContinueStreaming(config, state) {
|
||||
state.start = false
|
||||
state.receivedKeyFrame = false
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip empty packets
|
||||
if len(pkt.Data) == 0 || pkt.Data == nil {
|
||||
state.receivedKeyFrame = false
|
||||
continue
|
||||
}
|
||||
|
||||
// Wait for first keyframe before processing
|
||||
if !state.receivedKeyFrame {
|
||||
if pkt.IsKeyFrame {
|
||||
state.receivedKeyFrame = true
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Process video or audio packets
|
||||
if pkt.IsVideo {
|
||||
processVideoPacket(pkt, state, videoTrack, config)
|
||||
} else if pkt.IsAudio {
|
||||
processAudioPacket(pkt, state, audioTrack, codecs.hasAAC)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
4
machinery/update-mod.sh
Executable file
4
machinery/update-mod.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
export GOSUMDB=off
|
||||
rm -rf go.*
|
||||
go mod init github.com/kerberos-io/agent/machinery
|
||||
go mod tidy
|
||||
6
snap/hooks/configure
vendored
6
snap/hooks/configure
vendored
@@ -1,6 +0,0 @@
|
||||
#!/bin/sh -e
|
||||
|
||||
cp -R $SNAP/data $SNAP_COMMON/
|
||||
cp -R $SNAP/www $SNAP_COMMON/
|
||||
cp -R $SNAP/version $SNAP_COMMON/
|
||||
cp -R $SNAP/mp4fragment $SNAP_COMMON/
|
||||
@@ -1,23 +0,0 @@
|
||||
name: kerberosio # you probably want to 'snapcraft register <name>'
|
||||
base: core22 # the base snap is the execution environment for this snap
|
||||
version: '3.1.0' # just for humans, typically '1.2+git' or '1.3.2'
|
||||
summary: A stand-alone open source video surveillance system # 79 char long summary
|
||||
description: |
|
||||
Kerberos Agent is an isolated and scalable video (surveillance) management
|
||||
agent made available as Open Source under the MIT License. This means that
|
||||
all the source code is available for you or your company, and you can use,
|
||||
transform and distribute the source code; as long you keep a reference of
|
||||
the original license. Kerberos Agent can be used for commercial usage.
|
||||
|
||||
grade: stable # stable # must be 'stable' to release into candidate/stable channels
|
||||
confinement: strict # use 'strict' once you have the right plugs and slots
|
||||
environment:
|
||||
GIN_MODE: release
|
||||
apps:
|
||||
agent:
|
||||
command: main -config /var/snap/kerberosio/common
|
||||
plugs: [ network, network-bind ]
|
||||
parts:
|
||||
agent:
|
||||
source: https://github.com/kerberos-io/agent/releases/download/21c0e01/agent-amd64.tar
|
||||
plugin: dump
|
||||
@@ -1,7 +1,6 @@
|
||||
{
|
||||
"name": "agent-ui",
|
||||
"version": "0.1.0",
|
||||
"private": false,
|
||||
"dependencies": {
|
||||
"@giantmachines/redux-websocket": "^1.5.1",
|
||||
"@kerberos-io/ui": "^1.76.0",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Allgemeine Einstellungen für den Kerberos Agent",
|
||||
"key": "Schlüssel",
|
||||
"camera_name": "Kamera Name",
|
||||
"camera_friendly_name": "Kamera Anzeigename",
|
||||
"timezone": "Zeitzone",
|
||||
"select_timezone": "Zeitzone auswählen",
|
||||
"advanced_configuration": "Erweiterte Konfiguration",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN Server",
|
||||
"turn_username": "Benutzername",
|
||||
"turn_password": "Passwort",
|
||||
"force_turn": "Erzwinge TURN",
|
||||
"force_turn_description": "Erzwinge die Verwendung von TURN",
|
||||
"stun_turn_forward": "Weiterleiten und transkodieren",
|
||||
"stun_turn_description_forward": "Optiemierungen und Verbesserungen der TURN/STUN Kommunikation.",
|
||||
"stun_turn_webrtc": "Weiterleiten an WebRTC Schnittstelle",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Die möglichkeit zur Speicherung der Daten an einem Zentralen Ort ist der Beginn einer effektiven Videoüberwachung. Es kann zwischen",
|
||||
"description2_persistence": ", oder einem Drittanbieter gewählt werden.",
|
||||
"select_persistence": "Speicherort auswählen",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "Der Proxy Endpunkt zum hochladen der Aufnahmen.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
},
|
||||
"navigation": {
|
||||
"profile": "Profile",
|
||||
"admin": "admin",
|
||||
"admin": "Admin",
|
||||
"management": "Management",
|
||||
"dashboard": "Dashboard",
|
||||
"recordings": "Recordings",
|
||||
@@ -32,11 +32,11 @@
|
||||
"latest_events": "Latest events",
|
||||
"configure_connection": "Configure connection",
|
||||
"no_events": "No events",
|
||||
"no_events_description": "No recordings where found, make sure your Kerberos Agent is properly configured.",
|
||||
"no_events_description": "No recordings were found, make sure your Agent is properly configured.",
|
||||
"motion_detected": "Motion was detected",
|
||||
"live_view": "Live view",
|
||||
"loading_live_view": "Loading live view",
|
||||
"loading_live_view_description": "Hold on we are loading your live view here. If you didn't configure your camera connection, update it on the settings pages.",
|
||||
"loading_live_view_description": "Hold on, we are loading your live view here. If you didn't configure your camera connection, update it on the settings pages.",
|
||||
"time": "Time",
|
||||
"description": "Description",
|
||||
"name": "Name"
|
||||
@@ -59,31 +59,32 @@
|
||||
"persistence": "Persistence"
|
||||
},
|
||||
"info": {
|
||||
"kerberos_hub_demo": "Have a look at our Kerberos Hub demo environment, to see Kerberos Hub in action!",
|
||||
"configuration_updated_success": "Your configuration have been updated successfully.",
|
||||
"kerberos_hub_demo": "Have a look at our Hub demo environment, to see Hub in action!",
|
||||
"configuration_updated_success": "Your configuration has been updated successfully.",
|
||||
"configuration_updated_error": "Something went wrong while saving.",
|
||||
"verify_hub": "Verifying your Kerberos Hub settings.",
|
||||
"verify_hub_success": "Kerberos Hub settings are successfully verified.",
|
||||
"verify_hub_error": "Something went wrong while verifying Kerberos Hub",
|
||||
"verify_hub": "Verifying your Hub settings.",
|
||||
"verify_hub_success": "Hub settings are successfully verified.",
|
||||
"verify_hub_error": "Something went wrong while verifying Hub.",
|
||||
"verify_persistence": "Verifying your persistence settings.",
|
||||
"verify_persistence_success": "Persistence settings are successfully verified.",
|
||||
"verify_persistence_error": "Something went wrong while verifying the persistence",
|
||||
"verify_persistence_error": "Something went wrong while verifying the persistence.",
|
||||
"verify_camera": "Verifying your camera settings.",
|
||||
"verify_camera_success": "Camera settings are successfully verified.",
|
||||
"verify_camera_error": "Something went wrong while verifying the camera settings",
|
||||
"verify_camera_error": "Something went wrong while verifying the camera settings.",
|
||||
"verify_onvif": "Verifying your ONVIF settings.",
|
||||
"verify_onvif_success": "ONVIF settings are successfully verified.",
|
||||
"verify_onvif_error": "Something went wrong while verifying the ONVIF settings"
|
||||
"verify_onvif_error": "Something went wrong while verifying the ONVIF settings."
|
||||
},
|
||||
"overview": {
|
||||
"general": "General",
|
||||
"description_general": "General settings for your Kerberos Agent",
|
||||
"description_general": "General settings for your Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera name",
|
||||
"camera_friendly_name": "Friendly name",
|
||||
"timezone": "Timezone",
|
||||
"select_timezone": "Select a timezone",
|
||||
"advanced_configuration": "Advanced configuration",
|
||||
"description_advanced_configuration": "Detailed configuration options to enable or disable specific parts of the Kerberos Agent",
|
||||
"description_advanced_configuration": "Detailed configuration options to enable or disable specific parts of the Agent",
|
||||
"offline_mode": "Offline mode",
|
||||
"description_offline_mode": "Disable all outgoing traffic",
|
||||
"encryption": "Encryption",
|
||||
@@ -100,9 +101,9 @@
|
||||
"camera": "Camera",
|
||||
"description_camera": "Camera settings are required to make a connection to your camera of choice.",
|
||||
"only_h264": "Currently only H264/H265 RTSP streams are supported.",
|
||||
"rtsp_url": "RTSP url",
|
||||
"rtsp_url": "RTSP URL",
|
||||
"rtsp_h264": "A H264/H265 RTSP connection to your camera.",
|
||||
"sub_rtsp_url": "Sub RTSP url (used for livestreaming)",
|
||||
"sub_rtsp_url": "Sub RTSP URL (used for livestreaming)",
|
||||
"sub_rtsp_h264": "A secondary RTSP connection to the low resolution of your camera.",
|
||||
"onvif": "ONVIF",
|
||||
"description_onvif": "Credentials to communicate with ONVIF capabilities. These are used for PTZ or other capabilities provided by the camera.",
|
||||
@@ -114,28 +115,28 @@
|
||||
},
|
||||
"recording": {
|
||||
"recording": "Recording",
|
||||
"description_recording": "Specify how you would like to make recordings. Having a continuous 24/7 setup or a motion based recording.",
|
||||
"description_recording": "Specify how you would like to make recordings. Having a continuous 24/7 setup or a motion-based recording.",
|
||||
"continuous_recording": "Continuous recording",
|
||||
"description_continuous_recording": "Make 24/7 or motion based recordings.",
|
||||
"max_duration": "max video duration (seconds)",
|
||||
"description_continuous_recording": "Make 24/7 or motion-based recordings.",
|
||||
"max_duration": "Max video duration (seconds)",
|
||||
"description_max_duration": "The maximum duration of a recording.",
|
||||
"pre_recording": "pre recording (key frames buffered)",
|
||||
"pre_recording": "Pre recording (key frames buffered)",
|
||||
"description_pre_recording": "Seconds before an event occurred.",
|
||||
"post_recording": "post recording (seconds)",
|
||||
"post_recording": "Post recording (seconds)",
|
||||
"description_post_recording": "Seconds after an event occurred.",
|
||||
"threshold": "Recording threshold (pixels)",
|
||||
"description_threshold": "The number of pixels changed to record",
|
||||
"description_threshold": "The number of pixels changed to record.",
|
||||
"autoclean": "Auto clean",
|
||||
"description_autoclean": "Specify if the Kerberos Agent can cleanup recordings when a specific storage capacity (MB) is reached. This will remove the oldest recordings when the capacity is reached.",
|
||||
"description_autoclean": "Specify if the Agent can clean up recordings when a specific storage capacity (MB) is reached. This will remove the oldest recordings when the capacity is reached.",
|
||||
"autoclean_enable": "Enable auto clean",
|
||||
"autoclean_description_enable": "Remove oldest recording when capacity reached.",
|
||||
"autoclean_max_directory_size": "Maximum directory size (MB)",
|
||||
"autoclean_description_max_directory_size": "The maximum MB's of recordings stored.",
|
||||
"autoclean_description_max_directory_size": "The maximum MBs of recordings stored.",
|
||||
"fragmentedrecordings": "Fragmented recordings",
|
||||
"description_fragmentedrecordings": "When recordings are fragmented they are suitable for an HLS stream. When turned on the MP4 container will look a bit different.",
|
||||
"description_fragmentedrecordings": "When recordings are fragmented they are suitable for an HLS stream. When turned on, the MP4 container will look a bit different.",
|
||||
"fragmentedrecordings_enable": "Enable fragmentation",
|
||||
"fragmentedrecordings_description_enable": "Fragmented recordings are required for HLS.",
|
||||
"fragmentedrecordings_duration": "fragment duration",
|
||||
"fragmentedrecordings_duration": "Fragment duration",
|
||||
"fragmentedrecordings_description_duration": "Duration of a single fragment."
|
||||
},
|
||||
"streaming": {
|
||||
@@ -145,19 +146,26 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Username",
|
||||
"turn_password": "Password",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Forwarding and transcoding",
|
||||
"stun_turn_description_forward": "Optimisations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_description_forward": "Optimizations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_webrtc": "Forwarding to WebRTC broker",
|
||||
"stun_turn_description_webrtc": "Forward h264 stream through MQTT",
|
||||
"stun_turn_description_webrtc": "Forward H264 stream through MQTT",
|
||||
"stun_turn_transcode": "Transcode stream",
|
||||
"stun_turn_description_transcode": "Convert stream to a lower resolution",
|
||||
"stun_turn_downscale": "Downscale resolution (in % of original resolution)",
|
||||
"mqtt": "MQTT",
|
||||
"description_mqtt": "A MQTT broker is used to communicate from",
|
||||
"description2_mqtt": "to the Kerberos Agent, to achieve for example livestreaming or ONVIF (PTZ) capabilities.",
|
||||
"mqtt_brokeruri": "Broker Uri",
|
||||
"description_mqtt": "An MQTT broker is used to communicate from",
|
||||
"description2_mqtt": "to the Agent, to achieve for example livestreaming or ONVIF (PTZ) capabilities.",
|
||||
"mqtt_brokeruri": "Broker URI",
|
||||
"mqtt_username": "Username",
|
||||
"mqtt_password": "Password"
|
||||
"mqtt_password": "Password",
|
||||
"realtimeprocessing": "Realtime Processing",
|
||||
"description_realtimeprocessing": "By enabling realtime processing, you will receive realtime video keyframes through the MQTT connection specified above.",
|
||||
"realtimeprocessing_topic": "Topic to publish",
|
||||
"realtimeprocessing_enabled": "Enable realtime processing",
|
||||
"description_realtimeprocessing_enabled": "Send realtime video keyframes through MQTT."
|
||||
},
|
||||
"conditions": {
|
||||
"timeofinterest": "Time Of Interest",
|
||||
@@ -172,53 +180,61 @@
|
||||
"friday": "Friday",
|
||||
"saturday": "Saturday",
|
||||
"externalcondition": "External Condition",
|
||||
"description_externalcondition": "Depending on an external webservice recording can be enabled or disabled.",
|
||||
"description_externalcondition": "Depending on an external web service, recording can be enabled or disabled.",
|
||||
"regionofinterest": "Region Of Interest",
|
||||
"description_regionofinterest": "By defining one or more regions, motion will be tracked only in the regions you have defined."
|
||||
},
|
||||
"persistence": {
|
||||
"kerberoshub": "Kerberos Hub",
|
||||
"description_kerberoshub": "Kerberos Agents can send heartbeats to a central",
|
||||
"description2_kerberoshub": "installation. Heartbeats and other relevant information are synced to Kerberos Hub to show realtime information about your video landscape.",
|
||||
"kerberoshub": "Hub",
|
||||
"description_kerberoshub": "Agents can send heartbeats to a central",
|
||||
"description2_kerberoshub": "installation. Heartbeats and other relevant information are synced to Hub to show realtime information about your video landscape.",
|
||||
"persistence": "Persistence",
|
||||
"saasoffering": "Kerberos Hub (SAAS offering)",
|
||||
"secondary_persistence": "Secondary Persistence",
|
||||
"description_secondary_persistence": "Recordings will be sent to secondary persistence if the primary persistence is unavailable or fails. This can be useful for failover purposes.",
|
||||
"saasoffering": "Hub (SaaS offering)",
|
||||
"description_persistence": "Having the ability to store your recordings is the beginning of everything. You can choose between our",
|
||||
"description2_persistence": ", or a 3rd party provider",
|
||||
"select_persistence": "Select a persistence",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Hub will be encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "The Proxy endpoint for uploading your recordings.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
"kerberoshub_apiurl": "Hub API URL",
|
||||
"kerberoshub_description_apiurl": "The API endpoint for uploading your recordings.",
|
||||
"kerberoshub_publickey": "Public key",
|
||||
"kerberoshub_description_publickey": "The public key granted to your Kerberos Hub account.",
|
||||
"kerberoshub_description_publickey": "The public key granted to your Hub account.",
|
||||
"kerberoshub_privatekey": "Private key",
|
||||
"kerberoshub_description_privatekey": "The private key granted to your Kerberos Hub account.",
|
||||
"kerberoshub_description_privatekey": "The private key granted to your Hub account.",
|
||||
"kerberoshub_site": "Site",
|
||||
"kerberoshub_description_site": "The site ID the Kerberos Agents are belonging to in Kerberos Hub.",
|
||||
"kerberoshub_description_site": "The site ID the Agents belong to in Hub.",
|
||||
"kerberoshub_region": "Region",
|
||||
"kerberoshub_description_region": "The region we are storing our recordings in.",
|
||||
"kerberoshub_bucket": "Bucket",
|
||||
"kerberoshub_description_bucket": "The bucket we are storing our recordings in.",
|
||||
"kerberoshub_username": "Username/Directory (should match Kerberos Hub username)",
|
||||
"kerberoshub_description_username": "The username of your Kerberos Hub account.",
|
||||
"kerberosvault_apiurl": "Kerberos Vault API URL",
|
||||
"kerberosvault_description_apiurl": "The Kerberos Vault API",
|
||||
"kerberoshub_username": "Username/Directory (should match Hub username)",
|
||||
"kerberoshub_description_username": "The username of your Hub account.",
|
||||
"kerberosvault_apiurl": "Vault API URL",
|
||||
"kerberosvault_description_apiurl": "The Vault API",
|
||||
"kerberosvault_provider": "Provider",
|
||||
"kerberosvault_description_provider": "The provider to which your recordings will be send.",
|
||||
"kerberosvault_directory": "Directory (should match Kerberos Hub username)",
|
||||
"kerberosvault_description_directory": "Sub directory the recordings will be stored in your provider.",
|
||||
"kerberosvault_description_provider": "The provider to which your recordings will be sent.",
|
||||
"kerberosvault_directory": "Directory (should match Hub username)",
|
||||
"kerberosvault_description_directory": "Subdirectory the recordings will be stored in your provider.",
|
||||
"kerberosvault_accesskey": "Access key",
|
||||
"kerberosvault_description_accesskey": "The access key of your Kerberos Vault account.",
|
||||
"kerberosvault_description_accesskey": "The access key of your Vault account.",
|
||||
"kerberosvault_secretkey": "Secret key",
|
||||
"kerberosvault_description_secretkey": "The secret key of your Kerberos Vault account.",
|
||||
"kerberosvault_description_secretkey": "The secret key of your Vault account.",
|
||||
"kerberosvault_maxretries": "Max retries",
|
||||
"kerberosvault_description_maxretries": "The maximum number of retries to upload a recording.",
|
||||
"kerberosvault_timeout": "Timeout",
|
||||
"kerberosvault_description_timeout": "If a timeout occurs, recordings will be sent directly to the secondary Vault.",
|
||||
"dropbox_directory": "Directory",
|
||||
"dropbox_description_directory": "The sub directory where the recordings will be stored in your Dropbox account.",
|
||||
"dropbox_description_directory": "The subdirectory where the recordings will be stored in your Dropbox account.",
|
||||
"dropbox_accesstoken": "Access token",
|
||||
"dropbox_description_accesstoken": "The access token of your Dropbox account/app.",
|
||||
"verify_connection": "Verify Connection",
|
||||
"remove_after_upload": "Once recordings are uploaded to some persistence, you might want to remove them from the local Kerberos Agent.",
|
||||
"remove_after_upload": "Once recordings are uploaded to some persistence, you might want to remove them from the local Agent.",
|
||||
"remove_after_upload_description": "Remove recordings after they are uploaded successfully.",
|
||||
"remove_after_upload_enabled": "Enabled delete on upload"
|
||||
"remove_after_upload_enabled": "Enable delete on upload"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "General settings for your Kerberos Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera name",
|
||||
"camera_friendly_name": "Camera friendly name",
|
||||
"timezone": "Timezone",
|
||||
"select_timezone": "Select a timezone",
|
||||
"advanced_configuration": "Advanced configuration",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Username",
|
||||
"turn_password": "Password",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Forwarding and transcoding",
|
||||
"stun_turn_description_forward": "Optimisations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_webrtc": "Forwarding to WebRTC broker",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Having the ability to store your recordings is the beginning of everything. You can choose between our",
|
||||
"description2_persistence": ", or a 3rd party provider",
|
||||
"select_persistence": "Select a persistence",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "The Proxy endpoint for uploading your recordings.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -79,6 +79,7 @@
|
||||
"description_general": "Paramètres généraux pour votre Agent Kerberos",
|
||||
"key": "Clé",
|
||||
"camera_name": "Nom de la caméra",
|
||||
"camera_friendly_name": "Nom convivial de la caméra",
|
||||
"timezone": "Fuseau horaire",
|
||||
"select_timezone": "Sélectionner un fuseau horaire",
|
||||
"advanced_configuration": "Configuration avancée",
|
||||
@@ -144,6 +145,8 @@
|
||||
"turn_server": "Serveur TURN",
|
||||
"turn_username": "Nom d'utilisateur",
|
||||
"turn_password": "Mot de passe",
|
||||
"force_turn": "Forcer l'utilisation de TURN",
|
||||
"force_turn_description": "Forcer l'utilisation de TURN au lieu de STUN",
|
||||
"stun_turn_forward": "Redirection et transcodage",
|
||||
"stun_turn_description_forward": "Optimisations et améliorations pour la communication TURN/STUN.",
|
||||
"stun_turn_webrtc": "Redirection pour l'agent WebRTC",
|
||||
@@ -184,6 +187,8 @@
|
||||
"description_persistence": "Avoir la possibilité de stocker vos enregistrements est le commencement de tout. Vous pouvez choisir entre notre",
|
||||
"description2_persistence": " ou auprès d'un fournisseur tiers",
|
||||
"select_persistence": "Sélectionner une persistance",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "URL du proxy Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "Le point de terminaison du proxy pour téléverser vos enregistrements.",
|
||||
"kerberoshub_apiurl": "URL de l'API Kerberos Hub",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "आपके Kerberos एजेंट के लिए सामान्य सेटिंग्स",
|
||||
"key": "की",
|
||||
"camera_name": "कैमरे का नाम",
|
||||
"camera_friendly_name": "कैमरे का नाम",
|
||||
"timezone": "समय क्षेत्र",
|
||||
"select_timezone": "समयक्षेत्र चुनें",
|
||||
"advanced_configuration": "एडवांस कॉन्फ़िगरेशन",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "उपयोगकर्ता नाम",
|
||||
"turn_password": "पासवर्ड",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "फोरवर्डींग और ट्रांसकोडिंग",
|
||||
"stun_turn_description_forward": "TURN/STUN संचार के लिए अनुकूलन और संवर्द्धन।",
|
||||
"stun_turn_webrtc": "WebRTC ब्रोकर को फोरवर्डींग किया जा रहा है",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "अपनी रिकॉर्डिंग संग्रहीत करने की क्षमता होना हर चीज़ की शुरुआत है। ",
|
||||
"description2_persistence": ", या कोई तृतीय पक्ष प्रदाता",
|
||||
"select_persistence": "एक दृढ़ता का चयन करें",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos हब प्रॉक्सी URL",
|
||||
"kerberoshub_description_proxyurl": "आपकी रिकॉर्डिंग अपलोड करने के लिए प्रॉक्सी एंडपॉइंट।",
|
||||
"kerberoshub_apiurl": "Kerberos हब API URL",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Impostazioni generali del Kerberos Agent",
|
||||
"key": "Chiave",
|
||||
"camera_name": "Nome videocamera",
|
||||
"camera_friendly_name": "Nome amichevole videocamera",
|
||||
"timezone": "Fuso orario",
|
||||
"select_timezone": "Seleziona un fuso orario",
|
||||
"advanced_configuration": "Configurazione avanzata",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Username",
|
||||
"turn_password": "Password",
|
||||
"force_turn": "Forza TURN",
|
||||
"force_turn_description": "Forza l'uso di TURN per lo streaming in diretta.",
|
||||
"stun_turn_forward": "Inoltro e transcodifica",
|
||||
"stun_turn_description_forward": "Ottimizzazioni e miglioramenti per la comunicazione TURN/STUN.",
|
||||
"stun_turn_webrtc": "Inoltro al broker WebRTC",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "La possibilità di poter salvare le tue registrazioni rappresenta l'inizio di tutto. Puoi scegliere tra il nostro",
|
||||
"description2_persistence": ", oppure un provider di terze parti",
|
||||
"select_persistence": "Seleziona una persistenza",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "URL Proxy Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "Endpoint del Proxy per l'upload delle registrazioni.",
|
||||
"kerberoshub_apiurl": "API URL Kerberos Hub",
|
||||
@@ -221,4 +226,4 @@
|
||||
"remove_after_upload_enabled": "Abilita cancellazione al caricamento"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Kerberos エージェントの一般設定",
|
||||
"key": "鍵",
|
||||
"camera_name": "カメラ名",
|
||||
"camera_friendly_name": "カメラのフレンドリー名",
|
||||
"timezone": "タイムゾーン",
|
||||
"select_timezone": "タイムゾーンを選択",
|
||||
"advanced_configuration": "詳細設定",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURNサーバー",
|
||||
"turn_username": "ユーザー名",
|
||||
"turn_password": "パスワード",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "転送とトランスコーディング",
|
||||
"stun_turn_description_forward": "TURN/STUN 通信の最適化と機能強化。",
|
||||
"stun_turn_webrtc": "WebRTC ブローカーへの転送",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "録音を保存する機能を持つことは、すべての始まりです。",
|
||||
"description2_persistence": "、またはサードパーティのプロバイダ",
|
||||
"select_persistence": "永続性を選択",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos ハブ プロキシ URL",
|
||||
"kerberoshub_description_proxyurl": "記録をアップロードするためのプロキシ エンドポイント。",
|
||||
"kerberoshub_apiurl": "ケルベロス ハブ API URL",
|
||||
@@ -221,4 +226,4 @@
|
||||
"remove_after_upload_enabled": "Enabled delete on upload"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Algemene instellingen voor jouw Kerberos Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera naam",
|
||||
"camera_friendly_name": "Camera vriendelijke naam",
|
||||
"timezone": "Tijdzone",
|
||||
"select_timezone": "Selecteer uw tijdzone",
|
||||
"advanced_configuration": "Geavanceerde instellingen",
|
||||
@@ -146,6 +147,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Gebruikersnaam",
|
||||
"turn_password": "Wachtwoord",
|
||||
"force_turn": "Verplicht TURN",
|
||||
"force_turn_description": "Verplicht TURN connectie, ook al is er een STUN connectie mogelijk.",
|
||||
"stun_turn_forward": "Doorsturen en transcoden",
|
||||
"stun_turn_description_forward": "Optimalisatie en verbetering voor TURN/STUN communicatie.",
|
||||
"stun_turn_webrtc": "Doorsturen naar een WebRTC broker",
|
||||
@@ -186,6 +189,8 @@
|
||||
"description_persistence": "De mogelijkheid om jouw opnames op te slaan is het begin van alles. Je kan kiezen tussen ons",
|
||||
"description2_persistence": ", of een 3rd party provider",
|
||||
"select_persistence": "Selecteer een opslagmethode",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "De Proxy url voor het opladen van jouw opnames.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "General settings for your Kerberos Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera name",
|
||||
"camera_friendly_name": "Camera friendly name",
|
||||
"timezone": "Timezone",
|
||||
"select_timezone": "Select a timezone",
|
||||
"advanced_configuration": "Advanced configuration",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN server",
|
||||
"turn_username": "Username",
|
||||
"turn_password": "Password",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Forwarding and transcoding",
|
||||
"stun_turn_description_forward": "Optimisations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_webrtc": "Forwarding to WebRTC broker",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Having the ability to store your recordings is the beginning of everything. You can choose between our",
|
||||
"description2_persistence": ", or a 3rd party provider",
|
||||
"select_persistence": "Select a persistence",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "The Proxy endpoint for uploading your recordings.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Configurações gerais para seu agente Kerberos",
|
||||
"key": "Chave",
|
||||
"camera_name": "Nome da câmera",
|
||||
"camera_friendly_name": "Nome amigável da câmera",
|
||||
"timezone": "Fuso horário",
|
||||
"select_timezone": "Selecione a timezone",
|
||||
"advanced_configuration": "Configurações avançadas",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "Servidor TURN",
|
||||
"turn_username": "Usuario",
|
||||
"turn_password": "Senha",
|
||||
"force_turn": "Forçar TURN",
|
||||
"force_turn_description": "Forçar o uso de TURN em vez de STUN.",
|
||||
"stun_turn_forward": "Encaminhamento e transcodificação",
|
||||
"stun_turn_description_forward": "Otimizações e melhorias para a comunicação TURN/STUN.",
|
||||
"stun_turn_webrtc": "Encaminhamento para broker WebRTC",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Ter a capacidade de armazenar suas gravações é o começo de tudo. Você pode escolher entre nossos",
|
||||
"description2_persistence": ", ou um provedor terceirizado",
|
||||
"select_persistence": "Selecione um provedor de armazenamento",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Url proxy para Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "O endpoint Proxy para enviar suas gravações.",
|
||||
"kerberoshub_apiurl": "Url de API do Kerberos Hub",
|
||||
|
||||
234
ui/public/locales/ro/translation.json
Normal file
234
ui/public/locales/ro/translation.json
Normal file
@@ -0,0 +1,234 @@
|
||||
{
|
||||
"breadcrumb": {
|
||||
"watch_recordings": "Vizionează înregistrări",
|
||||
"configure": "Configurează"
|
||||
},
|
||||
"buttons": {
|
||||
"save": "Salvează",
|
||||
"verify_connection": "Verifică conexiunea"
|
||||
},
|
||||
"navigation": {
|
||||
"profile": "Profil",
|
||||
"admin": "admin",
|
||||
"management": "Management",
|
||||
"dashboard": "Tablou de bord",
|
||||
"recordings": "Înregistrări",
|
||||
"settings": "Setări",
|
||||
"help_support": "Ajutor & Suport",
|
||||
"swagger": "Swagger API",
|
||||
"documentation": "Documentație",
|
||||
"ui_library": "Bibliotecă UI",
|
||||
"layout": "Limbă și aspect",
|
||||
"choose_language": "Alege limba"
|
||||
},
|
||||
"dashboard": {
|
||||
"title": "Tablou de bord",
|
||||
"heading": "Prezentare generală a supravegherii video",
|
||||
"number_of_days": "Număr de zile",
|
||||
"total_recordings": "Înregistrări totale",
|
||||
"connected": "Conectat",
|
||||
"not_connected": "Neconectat",
|
||||
"offline_mode": "Mod offline",
|
||||
"latest_events": "Evenimente recente",
|
||||
"configure_connection": "Configurează conexiunea",
|
||||
"no_events": "Niciun eveniment",
|
||||
"no_events_description": "Nu au fost găsite înregistrări, asigurați-vă că agentul dvs. Kerberos este configurat corect.",
|
||||
"motion_detected": "Mișcare detectată",
|
||||
"live_view": "Vizualizare live",
|
||||
"loading_live_view": "Se încarcă vizualizarea live",
|
||||
"loading_live_view_description": "Așteptați, încărcăm vizualizarea dvs. live. Dacă nu ați configurat conexiunea camerei, actualizați-o în paginile de setări.",
|
||||
"time": "Timp",
|
||||
"description": "Descriere",
|
||||
"name": "Nume"
|
||||
},
|
||||
"recordings": {
|
||||
"title": "Înregistrări",
|
||||
"heading": "Toate înregistrările tale într-un singur loc",
|
||||
"search_media": "Caută media"
|
||||
},
|
||||
"settings": {
|
||||
"title": "Setări",
|
||||
"heading": "Prezentare generală a setărilor camerei și agentului",
|
||||
"submenu": {
|
||||
"all": "Toate",
|
||||
"overview": "General",
|
||||
"camera": "Camera",
|
||||
"recording": "Înregistrare",
|
||||
"streaming": "Streaming",
|
||||
"conditions": "Condiții",
|
||||
"persistence": "Persistență"
|
||||
},
|
||||
"info": {
|
||||
"kerberos_hub_demo": "Aruncă o privire asupra mediului nostru de demonstrație Kerberos Hub, pentru a vedea Kerberos Hub în acțiune!",
|
||||
"configuration_updated_success": "Configurarea ta a fost actualizată cu succes.",
|
||||
"configuration_updated_error": "Ceva a mers prost în timpul salvării.",
|
||||
"verify_hub": "Verificarea setărilor tale Kerberos Hub.",
|
||||
"verify_hub_success": "Setările Kerberos Hub au fost verificate cu succes.",
|
||||
"verify_hub_error": "Ceva a mers prost în timpul verificării Kerberos Hub.",
|
||||
"verify_persistence": "Verificarea setărilor tale de persistență.",
|
||||
"verify_persistence_success": "Setările de persistență au fost verificate cu succes.",
|
||||
"verify_persistence_error": "Ceva a mers prost în timpul verificării persistenței.",
|
||||
"verify_camera": "Verificarea setărilor tale pentru cameră.",
|
||||
"verify_camera_success": "Setările pentru cameră au fost verificate cu succes.",
|
||||
"verify_camera_error": "Ceva a mers prost în timpul verificării setărilor pentru cameră.",
|
||||
"verify_onvif": "Verificarea setărilor tale ONVIF.",
|
||||
"verify_onvif_success": "Setările ONVIF au fost verificate cu succes.",
|
||||
"verify_onvif_error": "Ceva a mers prost în timpul verificării setărilor ONVIF."
|
||||
},
|
||||
"overview": {
|
||||
"general": "General",
|
||||
"description_general": "Setări generale pentru Agentul tău Kerberos",
|
||||
"key": "Cheie",
|
||||
"camera_name": "Numele camerei",
|
||||
"camera_friendly_name": "Nume prietenos",
|
||||
"timezone": "Fus orar",
|
||||
"select_timezone": "Selectează un fus orar",
|
||||
"advanced_configuration": "Configurare avansată",
|
||||
"description_advanced_configuration": "Opțiuni detaliate de configurare pentru activarea sau dezactivarea anumitor părți ale Agentului Kerberos",
|
||||
"offline_mode": "Mod offline",
|
||||
"description_offline_mode": "Dezactivează tot traficul ieșit",
|
||||
"encryption": "Criptare",
|
||||
"description_encryption": "Activează criptarea pentru tot traficul ieșit. Mesajele MQTT și/sau înregistrările vor fi criptate folosind AES-256. O cheie privată este utilizată pentru semnare.",
|
||||
"encryption_enabled": "Activează criptarea MQTT",
|
||||
"description_encryption_enabled": "Activează criptarea pentru toate mesajele MQTT.",
|
||||
"encryption_recordings_enabled": "Activează criptarea înregistrărilor",
|
||||
"description_encryption_recordings_enabled": "Activează criptarea pentru toate înregistrările.",
|
||||
"encryption_fingerprint": "Amprentă",
|
||||
"encryption_privatekey": "Cheie privată",
|
||||
"encryption_symmetrickey": "Cheie simetrică"
|
||||
},
|
||||
"camera": {
|
||||
"camera": "Camera",
|
||||
"description_camera": "Setările camerei sunt necesare pentru a face o conexiune cu camera aleasă de tine.",
|
||||
"only_h264": "În prezent sunt suportate doar fluxurile RTSP H264/H265.",
|
||||
"rtsp_url": "URL RTSP",
|
||||
"rtsp_h264": "O conexiune RTSP H264/H265 la camera ta.",
|
||||
"sub_rtsp_url": "URL RTSP secundar (folosit pentru transmisie live)",
|
||||
"sub_rtsp_h264": "O conexiune RTSP secundară la rezoluția redusă a camerei tale.",
|
||||
"onvif": "ONVIF",
|
||||
"description_onvif": "Credențiale pentru comunicarea cu capabilitățile ONVIF. Acestea sunt folosite pentru funcții PTZ sau alte capabilități oferite de cameră.",
|
||||
"onvif_xaddr": "Adresă ONVIF",
|
||||
"onvif_username": "Nume utilizator ONVIF",
|
||||
"onvif_password": "Parolă ONVIF",
|
||||
"verify_connection": "Verifică conexiunea",
|
||||
"verify_sub_connection": "Verifică conexiunea secundară"
|
||||
},
|
||||
"recording": {
|
||||
"recording": "Înregistrare",
|
||||
"description_recording": "Specificați cum doriți să realizați înregistrări. Puteți avea o configurație continuă 24/7 sau înregistrări bazate pe mișcare.",
|
||||
"continuous_recording": "Înregistrare continuă",
|
||||
"description_continuous_recording": "Realizați înregistrări 24/7 sau bazate pe mișcare.",
|
||||
"max_duration": "durata maximă a videoclipului (secunde)",
|
||||
"description_max_duration": "Durata maximă a unei înregistrări.",
|
||||
"pre_recording": "pre-înregistrare (cadre cheie tamponate)",
|
||||
"description_pre_recording": "Secunde înainte de producerea unui eveniment.",
|
||||
"post_recording": "post-înregistrare (secunde)",
|
||||
"description_post_recording": "Secunde după producerea unui eveniment.",
|
||||
"threshold": "Prag de înregistrare (pixeli)",
|
||||
"description_threshold": "Numărul de pixeli modificați pentru a înregistra.",
|
||||
"autoclean": "Curățare automată",
|
||||
"description_autoclean": "Specificați dacă Agentul Kerberos poate curăța automat înregistrările când se atinge o anumită capacitate de stocare (MB). Se vor șterge cele mai vechi înregistrări când se atinge capacitatea specificată.",
|
||||
"autoclean_enable": "Activează curățarea automată",
|
||||
"autoclean_description_enable": "Șterge cele mai vechi înregistrări când capacitatea este atinsă.",
|
||||
"autoclean_max_directory_size": "Dimensiunea maximă a directorului (MB)",
|
||||
"autoclean_description_max_directory_size": "Maximum de MB stocați în înregistrări.",
|
||||
"fragmentedrecordings": "Înregistrări fragmentate",
|
||||
"description_fragmentedrecordings": "Când înregistrările sunt fragmentate, sunt potrivite pentru un flux HLS. Când este activat, containerul MP4 va arăta puțin diferit.",
|
||||
"fragmentedrecordings_enable": "Activează fragmentarea",
|
||||
"fragmentedrecordings_description_enable": "Înregistrările fragmentate sunt necesare pentru HLS.",
|
||||
"fragmentedrecordings_duration": "durata fragmentului",
|
||||
"fragmentedrecordings_description_duration": "Durata unui singur fragment."
|
||||
},
|
||||
"streaming": {
|
||||
"stun_turn": "STUN/TURN pentru WebRTC",
|
||||
"description_stun_turn": "Pentru transmisii live la rezoluție completă folosim conceptul WebRTC. Una dintre capabilitățile cheie este funcționalitatea ICE-candidate, care permite traversarea NAT folosind conceptele STUN/TURN.",
|
||||
"stun_server": "Server STUN",
|
||||
"turn_server": "Server TURN",
|
||||
"turn_username": "Nume utilizator",
|
||||
"turn_password": "Parolă",
|
||||
"force_turn": "Forțează TURN",
|
||||
"force_turn_description": "Utilizează TURN în mod forțat, chiar și atunci când STUN este disponibil.",
|
||||
"stun_turn_forward": "Redirecționare și transcodare",
|
||||
"stun_turn_description_forward": "Optimizări și îmbunătățiri pentru comunicarea TURN/STUN.",
|
||||
"stun_turn_webrtc": "Redirecționare către broker WebRTC",
|
||||
"stun_turn_description_webrtc": "Redirecționare flux h264 prin MQTT",
|
||||
"stun_turn_transcode": "Transcodare flux",
|
||||
"stun_turn_description_transcode": "Convertire flux la o rezoluție mai mică",
|
||||
"stun_turn_downscale": "Scădere rezoluție (în % din rezoluția originală)",
|
||||
"mqtt": "MQTT",
|
||||
"description_mqtt": "Un broker MQTT este utilizat pentru comunicare de la",
|
||||
"description2_mqtt": "către Agentul Kerberos, pentru a realiza, de exemplu, transmisiuni live sau capabilități ONVIF (PTZ).",
|
||||
"mqtt_brokeruri": "URI broker MQTT",
|
||||
"mqtt_username": "Nume utilizator",
|
||||
"mqtt_password": "Parolă",
|
||||
"realtimeprocessing": "Procesare în timp real",
|
||||
"description_realtimeprocessing": "Prin activarea procesării în timp real, veți primi cadre cheie video în timp real prin conexiunea MQTT specificată mai sus.",
|
||||
"realtimeprocessing_topic": "Topic pentru publicare",
|
||||
"realtimeprocessing_enabled": "Activează procesarea în timp real",
|
||||
"description_realtimeprocessing_enabled": "Trimite cadre video în timp real prin MQTT."
|
||||
},
|
||||
"conditions": {
|
||||
"timeofinterest": "Timpul de Interes",
|
||||
"description_timeofinterest": "Realizează înregistrări doar între intervale de timp specifice (bazate pe fusul orar).",
|
||||
"timeofinterest_enabled": "Activat",
|
||||
"timeofinterest_description_enabled": "Dacă este activat, puteți specifica intervale de timp",
|
||||
"sunday": "Duminică",
|
||||
"monday": "Luni",
|
||||
"tuesday": "Marți",
|
||||
"wednesday": "Miercuri",
|
||||
"thursday": "Joi",
|
||||
"friday": "Vineri",
|
||||
"saturday": "Sâmbătă",
|
||||
"externalcondition": "Condiție Externă",
|
||||
"description_externalcondition": "În funcție de un serviciu web extern, înregistrarea poate fi activată sau dezactivată.",
|
||||
"regionofinterest": "Regiunea de Interes",
|
||||
"description_regionofinterest": "Prin definirea unei sau mai multor regiuni, mișcarea va fi urmărită doar în regiunile pe care le-ați definit."
|
||||
},
|
||||
"persistence": {
|
||||
"kerberoshub": "Kerberos Hub",
|
||||
"description_kerberoshub": "Agenta Kerberos poate trimite semnale de puls către o",
|
||||
"description2_kerberoshub": "instalație centrală. Semnalele de puls și alte informații relevante sunt sincronizate cu Kerberos Hub pentru a afișa informații în timp real despre peisajul video.",
|
||||
"persistence": "Persistență",
|
||||
"saasoffering": "Kerberos Hub (ofertă SAAS)",
|
||||
"description_persistence": "Capacitatea de a stoca înregistrările este începutul fiecărei",
|
||||
"description2_persistence": ", sau de la un furnizor terț",
|
||||
"select_persistence": "Selectați o persistență",
|
||||
"kerberoshub_encryption": "Criptare",
|
||||
"kerberoshub_encryption_description": "Tot traficul de la/spre Kerberos Hub va fi criptat folosind AES-256.",
|
||||
"kerberoshub_proxyurl": "URL Proxy Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "Punctul final Proxy pentru încărcarea înregistrărilor tale.",
|
||||
"kerberoshub_apiurl": "URL API Kerberos Hub",
|
||||
"kerberoshub_description_apiurl": "Punctul final API pentru încărcarea înregistrărilor tale.",
|
||||
"kerberoshub_publickey": "Cheie publică",
|
||||
"kerberoshub_description_publickey": "Cheia publică acordată contului tău Kerberos Hub.",
|
||||
"kerberoshub_privatekey": "Cheie privată",
|
||||
"kerberoshub_description_privatekey": "Cheia privată acordată contului tău Kerberos Hub.",
|
||||
"kerberoshub_site": "Site",
|
||||
"kerberoshub_description_site": "ID-ul site-ului la care aparțin Agenta Kerberos în Kerberos Hub.",
|
||||
"kerberoshub_region": "Regiune",
|
||||
"kerberoshub_description_region": "Regiunea în care sunt stocate înregistrările noastre.",
|
||||
"kerberoshub_bucket": "Bucket",
|
||||
"kerberoshub_description_bucket": "Bucket-ul în care sunt stocate înregistrările noastre.",
|
||||
"kerberoshub_username": "Nume utilizator/Director (trebuie să se potrivească cu numele de utilizator Kerberos Hub)",
|
||||
"kerberoshub_description_username": "Numele de utilizator al contului tău Kerberos Hub.",
|
||||
"kerberosvault_apiurl": "URL API Kerberos Vault",
|
||||
"kerberosvault_description_apiurl": "API-ul Kerberos Vault",
|
||||
"kerberosvault_provider": "Furnizor",
|
||||
"kerberosvault_description_provider": "Furnizorul către care vor fi trimise înregistrările tale.",
|
||||
"kerberosvault_directory": "Director (trebuie să se potrivească cu numele de utilizator Kerberos Hub)",
|
||||
"kerberosvault_description_directory": "Subdirectorul în care vor fi stocate înregistrările la furnizorul tău.",
|
||||
"kerberosvault_accesskey": "Cheie de acces",
|
||||
"kerberosvault_description_accesskey": "Cheia de acces a contului tău Kerberos Vault.",
|
||||
"kerberosvault_secretkey": "Cheie secretă",
|
||||
"kerberosvault_description_secretkey": "Cheia secretă a contului tău Kerberos Vault.",
|
||||
"dropbox_directory": "Director",
|
||||
"dropbox_description_directory": "Subdirectorul în care vor fi stocate înregistrările în contul tău Dropbox.",
|
||||
"dropbox_accesstoken": "Token de acces",
|
||||
"dropbox_description_accesstoken": "Tokenul de acces al contului/aplicației tale Dropbox.",
|
||||
"verify_connection": "Verifică conexiunea",
|
||||
"remove_after_upload": "Odată ce înregistrările sunt încărcate într-o persistență, este posibil să doriți să le ștergeți de pe Agenta Kerberos locală.",
|
||||
"remove_after_upload_description": "Ștergeți înregistrările după ce sunt încărcate cu succes.",
|
||||
"remove_after_upload_enabled": "Ștergere activată la încărcare"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Общие настройки Kerberos Agent",
|
||||
"key": "Ключ",
|
||||
"camera_name": "Название камеры",
|
||||
"camera_friendly_name": "Дружественное название камеры",
|
||||
"timezone": "Часовой пояс",
|
||||
"select_timezone": "Выберите часовой пояс",
|
||||
"advanced_configuration": "Расширенные настройки",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN сервер",
|
||||
"turn_username": "Имя пользователя",
|
||||
"turn_password": "Пароль",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Переадресация и транскодирование",
|
||||
"stun_turn_description_forward": "Оптимизация и усовершенствование связи TURN/STUN.",
|
||||
"stun_turn_webrtc": "Переадресация на WebRTC-брокера",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "Возможность хранения записей - это начало всего. Вы можете выбрать один из наших вариантов",
|
||||
"description2_persistence": ", или стороннего провайдера",
|
||||
"select_persistence": "Выберите хранилище",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "Конечная точка Proxy для загрузки записей.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
234
ui/public/locales/vi/translation.json
Normal file
234
ui/public/locales/vi/translation.json
Normal file
@@ -0,0 +1,234 @@
|
||||
{
|
||||
"breadcrumb": {
|
||||
"watch_recordings": "Xem bản ghi",
|
||||
"configure": "Cấu hình"
|
||||
},
|
||||
"buttons": {
|
||||
"save": "Lưu",
|
||||
"verify_connection": "Xác minh kết nối"
|
||||
},
|
||||
"navigation": {
|
||||
"profile": "Hồ sơ",
|
||||
"admin": "Quản trị",
|
||||
"management": "Quản lý",
|
||||
"dashboard": "Bảng điều khiển",
|
||||
"recordings": "Bản ghi",
|
||||
"settings": "Cài đặt",
|
||||
"help_support": "Trợ giúp & Hỗ trợ",
|
||||
"swagger": "API Swagger",
|
||||
"documentation": "Tài liệu",
|
||||
"ui_library": "Thư viện UI",
|
||||
"layout": "Ngôn ngữ & Bố cục",
|
||||
"choose_language": "Chọn ngôn ngữ"
|
||||
},
|
||||
"dashboard": {
|
||||
"title": "Bảng điều khiển",
|
||||
"heading": "Tổng quan về giám sát video của bạn",
|
||||
"number_of_days": "Số ngày",
|
||||
"total_recordings": "Tổng số bản ghi",
|
||||
"connected": "Đã kết nối",
|
||||
"not_connected": "Chưa kết nối",
|
||||
"offline_mode": "Chế độ ngoại tuyến",
|
||||
"latest_events": "Sự kiện gần đây",
|
||||
"configure_connection": "Cấu hình kết nối",
|
||||
"no_events": "Không có sự kiện",
|
||||
"no_events_description": "Không tìm thấy bản ghi nào, hãy đảm bảo Kerberos Agent của bạn được cấu hình đúng cách.",
|
||||
"motion_detected": "Phát hiện chuyển động",
|
||||
"live_view": "Xem trực tiếp",
|
||||
"loading_live_view": "Đang tải xem trực tiếp",
|
||||
"loading_live_view_description": "Vui lòng chờ trong khi chúng tôi tải xem trực tiếp của bạn. Nếu bạn chưa cấu hình kết nối camera, hãy cập nhật trong trang cài đặt.",
|
||||
"time": "Thời gian",
|
||||
"description": "Mô tả",
|
||||
"name": "Tên"
|
||||
},
|
||||
"recordings": {
|
||||
"title": "Bản ghi",
|
||||
"heading": "Tất cả bản ghi của bạn ở một nơi",
|
||||
"search_media": "Tìm kiếm phương tiện"
|
||||
},
|
||||
"settings": {
|
||||
"title": "Cài đặt",
|
||||
"heading": "Thiết lập camera của bạn",
|
||||
"submenu": {
|
||||
"all": "Tất cả",
|
||||
"overview": "Tổng quan",
|
||||
"camera": "Camera",
|
||||
"recording": "Ghi hình",
|
||||
"streaming": "Truyền phát",
|
||||
"conditions": "Điều kiện",
|
||||
"persistence": "Lưu trữ"
|
||||
},
|
||||
"info": {
|
||||
"kerberos_hub_demo": "Xem thử môi trường demo của Kerberos Hub để thấy Kerberos Hub hoạt động như thế nào!",
|
||||
"configuration_updated_success": "Cấu hình của bạn đã được cập nhật thành công.",
|
||||
"configuration_updated_error": "Đã xảy ra lỗi khi lưu.",
|
||||
"verify_hub": "Đang xác minh cài đặt Kerberos Hub của bạn.",
|
||||
"verify_hub_success": "Cài đặt Kerberos Hub đã được xác minh thành công.",
|
||||
"verify_hub_error": "Đã xảy ra lỗi khi xác minh Kerberos Hub",
|
||||
"verify_persistence": "Đang xác minh cài đặt lưu trữ.",
|
||||
"verify_persistence_success": "Cài đặt lưu trữ đã được xác minh thành công.",
|
||||
"verify_persistence_error": "Đã xảy ra lỗi khi xác minh lưu trữ",
|
||||
"verify_camera": "Đang xác minh cài đặt camera.",
|
||||
"verify_camera_success": "Cài đặt camera đã được xác minh thành công.",
|
||||
"verify_camera_error": "Đã xảy ra lỗi khi xác minh cài đặt camera",
|
||||
"verify_onvif": "Đang xác minh cài đặt ONVIF.",
|
||||
"verify_onvif_success": "Cài đặt ONVIF đã được xác minh thành công.",
|
||||
"verify_onvif_error": "Đã xảy ra lỗi khi xác minh cài đặt ONVIF"
|
||||
},
|
||||
"overview": {
|
||||
"general": "Chung",
|
||||
"description_general": "Cài đặt chung cho Kerberos Agent của bạn",
|
||||
"key": "Khóa",
|
||||
"camera_name": "Tên camera",
|
||||
"camera_friendly_name": "Tên thân thiện",
|
||||
"timezone": "Múi giờ",
|
||||
"select_timezone": "Chọn múi giờ",
|
||||
"advanced_configuration": "Cấu hình nâng cao",
|
||||
"description_advanced_configuration": "Tùy chọn cấu hình chi tiết để bật hoặc tắt các phần cụ thể của Kerberos Agent",
|
||||
"offline_mode": "Chế độ ngoại tuyến",
|
||||
"description_offline_mode": "Vô hiệu hóa toàn bộ lưu lượng đi",
|
||||
"encryption": "Mã hóa",
|
||||
"description_encryption": "Bật mã hóa cho toàn bộ lưu lượng đi. Các tin nhắn MQTT và/hoặc bản ghi sẽ được mã hóa bằng AES-256. Một khóa riêng tư được sử dụng để ký.",
|
||||
"encryption_enabled": "Bật mã hóa MQTT",
|
||||
"description_encryption_enabled": "Bật mã hóa cho toàn bộ tin nhắn MQTT.",
|
||||
"encryption_recordings_enabled": "Bật mã hóa bản ghi",
|
||||
"description_encryption_recordings_enabled": "Bật mã hóa cho tất cả các bản ghi.",
|
||||
"encryption_fingerprint": "Dấu vân tay",
|
||||
"encryption_privatekey": "Khóa riêng tư",
|
||||
"encryption_symmetrickey": "Khóa đối xứng"
|
||||
},
|
||||
"camera": {
|
||||
"camera": "Camera",
|
||||
"description_camera": "Cài đặt camera là bắt buộc để kết nối với camera bạn chọn.",
|
||||
"only_h264": "Hiện tại chỉ hỗ trợ luồng RTSP H264/H265.",
|
||||
"rtsp_url": "URL RTSP",
|
||||
"rtsp_h264": "Kết nối RTSP H264/H265 với camera của bạn.",
|
||||
"sub_rtsp_url": "URL RTSP phụ (dùng để phát trực tiếp)",
|
||||
"sub_rtsp_h264": "Kết nối RTSP phụ với độ phân giải thấp của camera.",
|
||||
"onvif": "ONVIF",
|
||||
"description_onvif": "Thông tin xác thực để giao tiếp với các chức năng ONVIF. Chúng được sử dụng cho PTZ hoặc các khả năng khác do camera cung cấp.",
|
||||
"onvif_xaddr": "Địa chỉ ONVIF",
|
||||
"onvif_username": "Tên người dùng ONVIF",
|
||||
"onvif_password": "Mật khẩu ONVIF",
|
||||
"verify_connection": "Xác minh kết nối",
|
||||
"verify_sub_connection": "Xác minh kết nối phụ"
|
||||
},
|
||||
"recording": {
|
||||
"recording": "Ghi hình",
|
||||
"description_recording": "Chỉ định cách bạn muốn thực hiện ghi hình. Có thể ghi liên tục 24/7 hoặc dựa trên chuyển động.",
|
||||
"continuous_recording": "Ghi hình liên tục",
|
||||
"description_continuous_recording": "Ghi hình liên tục 24/7 hoặc dựa trên chuyển động.",
|
||||
"max_duration": "Thời lượng video tối đa (giây)",
|
||||
"description_max_duration": "Thời lượng tối đa của một bản ghi.",
|
||||
"pre_recording": "Ghi trước (khung hình chính được lưu vào bộ đệm)",
|
||||
"description_pre_recording": "Số giây trước khi sự kiện xảy ra.",
|
||||
"post_recording": "Ghi sau (giây)",
|
||||
"description_post_recording": "Số giây sau khi sự kiện xảy ra.",
|
||||
"threshold": "Ngưỡng ghi hình (pixel)",
|
||||
"description_threshold": "Số pixel thay đổi cần đạt để bắt đầu ghi hình.",
|
||||
"autoclean": "Tự động dọn dẹp",
|
||||
"description_autoclean": "Chỉ định xem Kerberos Agent có thể dọn dẹp các bản ghi khi dung lượng lưu trữ đạt giới hạn nhất định (MB) hay không. Hệ thống sẽ xóa bản ghi cũ nhất khi đạt giới hạn.",
|
||||
"autoclean_enable": "Bật tự động dọn dẹp",
|
||||
"autoclean_description_enable": "Xóa bản ghi cũ nhất khi đạt giới hạn dung lượng.",
|
||||
"autoclean_max_directory_size": "Dung lượng thư mục tối đa (MB)",
|
||||
"autoclean_description_max_directory_size": "Dung lượng tối đa (MB) của các bản ghi được lưu trữ.",
|
||||
"fragmentedrecordings": "Ghi hình phân đoạn",
|
||||
"description_fragmentedrecordings": "Khi các bản ghi được phân đoạn, chúng phù hợp để phát trực tuyến HLS. Khi bật, định dạng MP4 sẽ có một số khác biệt.",
|
||||
"fragmentedrecordings_enable": "Bật ghi hình phân đoạn",
|
||||
"fragmentedrecordings_description_enable": "Ghi hình phân đoạn là bắt buộc đối với HLS.",
|
||||
"fragmentedrecordings_duration": "Thời lượng phân đoạn",
|
||||
"fragmentedrecordings_description_duration": "Thời lượng của một phân đoạn duy nhất."
|
||||
},
|
||||
"streaming": {
|
||||
"stun_turn": "STUN/TURN cho WebRTC",
|
||||
"description_stun_turn": "Để phát trực tiếp độ phân giải đầy đủ, chúng tôi sử dụng khái niệm WebRTC. Một trong những tính năng chính là ICE-candidate, cho phép vượt qua NAT bằng STUN/TURN.",
|
||||
"stun_server": "Máy chủ STUN",
|
||||
"turn_server": "Máy chủ TURN",
|
||||
"turn_username": "Tên người dùng",
|
||||
"turn_password": "Mật khẩu",
|
||||
"force_turn": "Buộc sử dụng TURN",
|
||||
"force_turn_description": "Buộc sử dụng TURN ngay cả khi STUN có sẵn.",
|
||||
"stun_turn_forward": "Chuyển tiếp và mã hóa",
|
||||
"stun_turn_description_forward": "Tối ưu hóa và cải thiện giao tiếp TURN/STUN.",
|
||||
"stun_turn_webrtc": "Chuyển tiếp đến WebRTC broker",
|
||||
"stun_turn_description_webrtc": "Chuyển tiếp luồng H264 qua MQTT",
|
||||
"stun_turn_transcode": "Chuyển mã luồng",
|
||||
"stun_turn_description_transcode": "Chuyển đổi luồng sang độ phân giải thấp hơn",
|
||||
"stun_turn_downscale": "Giảm độ phân giải (theo % của độ phân giải gốc)",
|
||||
"mqtt": "MQTT",
|
||||
"description_mqtt": "Một MQTT broker được sử dụng để giao tiếp từ",
|
||||
"description2_mqtt": "đến Kerberos Agent, nhằm hỗ trợ phát trực tiếp hoặc chức năng ONVIF (PTZ).",
|
||||
"mqtt_brokeruri": "Broker Uri",
|
||||
"mqtt_username": "Tên người dùng",
|
||||
"mqtt_password": "Mật khẩu",
|
||||
"realtimeprocessing": "Xử lý thời gian thực",
|
||||
"description_realtimeprocessing": "Bằng cách bật xử lý thời gian thực, bạn sẽ nhận được các khung hình video thời gian thực qua kết nối MQTT đã chỉ định.",
|
||||
"realtimeprocessing_topic": "Chủ đề để xuất bản",
|
||||
"realtimeprocessing_enabled": "Bật xử lý thời gian thực",
|
||||
"description_realtimeprocessing_enabled": "Gửi khung hình video thời gian thực qua MQTT."
|
||||
},
|
||||
"conditions": {
|
||||
"timeofinterest": "Thời gian quan tâm",
|
||||
"description_timeofinterest": "Chỉ ghi hình trong các khoảng thời gian cụ thể (dựa trên múi giờ).",
|
||||
"timeofinterest_enabled": "Đã bật",
|
||||
"timeofinterest_description_enabled": "Nếu bật, bạn có thể chỉ định các khoảng thời gian ghi hình.",
|
||||
"sunday": "Chủ nhật",
|
||||
"monday": "Thứ hai",
|
||||
"tuesday": "Thứ ba",
|
||||
"wednesday": "Thứ tư",
|
||||
"thursday": "Thứ năm",
|
||||
"friday": "Thứ sáu",
|
||||
"saturday": "Thứ bảy",
|
||||
"externalcondition": "Điều kiện bên ngoài",
|
||||
"description_externalcondition": "Tùy thuộc vào một dịch vụ web bên ngoài, việc ghi hình có thể được bật hoặc tắt.",
|
||||
"regionofinterest": "Khu vực quan tâm",
|
||||
"description_regionofinterest": "Bằng cách xác định một hoặc nhiều khu vực, hệ thống sẽ chỉ theo dõi chuyển động trong các khu vực bạn đã chọn."
|
||||
},
|
||||
"persistence": {
|
||||
"kerberoshub": "Kerberos Hub",
|
||||
"description_kerberoshub": "Các Kerberos Agent có thể gửi tín hiệu nhịp tim đến một hệ thống trung tâm",
|
||||
"description2_kerberoshub": "để đồng bộ hóa thông tin quan trọng với Kerberos Hub, giúp hiển thị trạng thái giám sát video theo thời gian thực.",
|
||||
"persistence": "Lưu trữ",
|
||||
"saasoffering": "Kerberos Hub (dịch vụ SAAS)",
|
||||
"description_persistence": "Khả năng lưu trữ bản ghi là bước khởi đầu của mọi thứ. Bạn có thể chọn giữa dịch vụ của chúng tôi",
|
||||
"description2_persistence": "hoặc một nhà cung cấp bên thứ ba.",
|
||||
"select_persistence": "Chọn phương thức lưu trữ",
|
||||
"kerberoshub_encryption": "Mã hóa",
|
||||
"kerberoshub_encryption_description": "Tất cả lưu lượng đến/từ Kerberos Hub sẽ được mã hóa bằng AES-256.",
|
||||
"kerberoshub_proxyurl": "URL Proxy Kerberos Hub",
|
||||
"kerberoshub_description_proxyurl": "Điểm cuối Proxy để tải bản ghi lên.",
|
||||
"kerberoshub_apiurl": "URL API Kerberos Hub",
|
||||
"kerberoshub_description_apiurl": "Điểm cuối API để tải bản ghi lên.",
|
||||
"kerberoshub_publickey": "Khóa công khai",
|
||||
"kerberoshub_description_publickey": "Khóa công khai được cấp cho tài khoản Kerberos Hub của bạn.",
|
||||
"kerberoshub_privatekey": "Khóa riêng tư",
|
||||
"kerberoshub_description_privatekey": "Khóa riêng tư được cấp cho tài khoản Kerberos Hub của bạn.",
|
||||
"kerberoshub_site": "Trang web",
|
||||
"kerberoshub_description_site": "ID trang web mà các Kerberos Agent thuộc về trong Kerberos Hub.",
|
||||
"kerberoshub_region": "Khu vực",
|
||||
"kerberoshub_description_region": "Khu vực nơi chúng tôi lưu trữ bản ghi.",
|
||||
"kerberoshub_bucket": "Kho lưu trữ",
|
||||
"kerberoshub_description_bucket": "Kho lưu trữ nơi chúng tôi lưu trữ bản ghi.",
|
||||
"kerberoshub_username": "Tên người dùng / Thư mục (phải khớp với tên người dùng Kerberos Hub)",
|
||||
"kerberoshub_description_username": "Tên người dùng tài khoản Kerberos Hub của bạn.",
|
||||
"kerberosvault_apiurl": "URL API Kerberos Vault",
|
||||
"kerberosvault_description_apiurl": "API của Kerberos Vault",
|
||||
"kerberosvault_provider": "Nhà cung cấp",
|
||||
"kerberosvault_description_provider": "Nhà cung cấp nơi bản ghi của bạn sẽ được gửi đến.",
|
||||
"kerberosvault_directory": "Thư mục (phải khớp với tên người dùng Kerberos Hub)",
|
||||
"kerberosvault_description_directory": "Thư mục con nơi các bản ghi sẽ được lưu trữ trong nhà cung cấp của bạn.",
|
||||
"kerberosvault_accesskey": "Khóa truy cập",
|
||||
"kerberosvault_description_accesskey": "Khóa truy cập của tài khoản Kerberos Vault của bạn.",
|
||||
"kerberosvault_secretkey": "Khóa bí mật",
|
||||
"kerberosvault_description_secretkey": "Khóa bí mật của tài khoản Kerberos Vault của bạn.",
|
||||
"dropbox_directory": "Thư mục",
|
||||
"dropbox_description_directory": "Thư mục con nơi bản ghi sẽ được lưu trữ trong tài khoản Dropbox của bạn.",
|
||||
"dropbox_accesstoken": "Mã truy cập",
|
||||
"dropbox_description_accesstoken": "Mã truy cập của tài khoản / ứng dụng Dropbox của bạn.",
|
||||
"verify_connection": "Xác minh kết nối",
|
||||
"remove_after_upload": "Sau khi bản ghi được tải lên một hệ thống lưu trữ, bạn có thể muốn xóa chúng khỏi Kerberos Agent cục bộ.",
|
||||
"remove_after_upload_description": "Xóa bản ghi sau khi chúng được tải lên thành công.",
|
||||
"remove_after_upload_enabled": "Bật xóa sau khi tải lên"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -80,6 +80,7 @@
|
||||
"description_general": "Kerberos Agent 常规设置",
|
||||
"key": "Key",
|
||||
"camera_name": "相机名称",
|
||||
"camera_friendly_name": "相机友好名称",
|
||||
"timezone": "时区",
|
||||
"select_timezone": "选择时区",
|
||||
"advanced_configuration": "高级配置",
|
||||
@@ -145,6 +146,8 @@
|
||||
"turn_server": "TURN 服务",
|
||||
"turn_username": "账户",
|
||||
"turn_password": "密码",
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "转发和转码",
|
||||
"stun_turn_description_forward": "TURN/STUN 通信的优化和增强。",
|
||||
"stun_turn_webrtc": "转发到 WebRTC 代理",
|
||||
@@ -185,6 +188,8 @@
|
||||
"description_persistence": "能够存储您的录像是一切的开始。您可以在我们的",
|
||||
"description2_persistence": ", 或第三方提供商之间进行选择。",
|
||||
"select_persistence": "选择持久化存储",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub 代理 URL",
|
||||
"kerberoshub_description_proxyurl": "用于上传您录像的代理端点",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
|
||||
@@ -100,7 +100,7 @@ class App extends React.Component {
|
||||
</div>
|
||||
)}
|
||||
<div id="page-root">
|
||||
<Sidebar logo={logo} title="Kerberos Agent" version="v1-beta" mobile>
|
||||
<Sidebar logo={logo} title="Kerberos Agent" version="v3.1.8" mobile>
|
||||
<Profilebar
|
||||
username={username}
|
||||
email="support@kerberos.io"
|
||||
|
||||
@@ -4,6 +4,7 @@ import {
|
||||
doVerifyOnvif,
|
||||
doVerifyHub,
|
||||
doVerifyPersistence,
|
||||
doVerifySecondaryPersistence,
|
||||
doGetKerberosAgentTags,
|
||||
doGetDashboardInformation,
|
||||
doGetEvents,
|
||||
@@ -107,6 +108,28 @@ export const verifyPersistence = (config, onSuccess, onError) => {
|
||||
};
|
||||
};
|
||||
|
||||
export const verifySecondaryPersistence = (config, onSuccess, onError) => {
|
||||
return (dispatch) => {
|
||||
doVerifySecondaryPersistence(
|
||||
config,
|
||||
() => {
|
||||
dispatch({
|
||||
type: 'VERIFY_SECONDARY_PERSISTENCE',
|
||||
});
|
||||
if (onSuccess) {
|
||||
onSuccess();
|
||||
}
|
||||
},
|
||||
(error) => {
|
||||
const { data } = error.response.data;
|
||||
if (onError) {
|
||||
onError(data);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
export const verifyHub = (config, onSuccess, onError) => {
|
||||
return (dispatch) => {
|
||||
doVerifyHub(
|
||||
|
||||
@@ -72,6 +72,25 @@ export function doVerifyPersistence(config, onSuccess, onError) {
|
||||
});
|
||||
}
|
||||
|
||||
export function doVerifySecondaryPersistence(config, onSuccess, onError) {
|
||||
const endpoint = API.post(`persistence/secondary/verify`, {
|
||||
...config,
|
||||
});
|
||||
endpoint
|
||||
.then((res) => {
|
||||
if (res.status !== 200) {
|
||||
throw new Error(res.data);
|
||||
}
|
||||
return res.data;
|
||||
})
|
||||
.then((data) => {
|
||||
onSuccess(data);
|
||||
})
|
||||
.catch((error) => {
|
||||
onError(error);
|
||||
});
|
||||
}
|
||||
|
||||
export function doVerifyHub(config, onSuccess, onError) {
|
||||
const endpoint = API.post(`hub/verify`, {
|
||||
...config,
|
||||
|
||||
@@ -26,6 +26,7 @@ const LanguageSelect = () => {
|
||||
ja: { label: '日本', dir: 'rlt', active: false },
|
||||
hi: { label: 'हिंदी', dir: 'ltr', active: false },
|
||||
ru: { label: 'Русский', dir: 'ltr', active: false },
|
||||
ro: { label: 'Română', dir: 'ltr', active: false },
|
||||
};
|
||||
|
||||
if (!languageMap[selected]) {
|
||||
|
||||
@@ -9,9 +9,9 @@ const dev = {
|
||||
ENV: 'dev',
|
||||
// Comment the below lines, when using codespaces or other special DNS names (which you can't control)
|
||||
HOSTNAME: hostname,
|
||||
API_URL: `${protocol}//${hostname}:80/api`,
|
||||
URL: `${protocol}//${hostname}:80`,
|
||||
WS_URL: `${websocketprotocol}//${hostname}:80/ws`,
|
||||
API_URL: `${protocol}//${hostname}:8080/api`,
|
||||
URL: `${protocol}//${hostname}:8080`,
|
||||
WS_URL: `${websocketprotocol}//${hostname}:8080/ws`,
|
||||
MODE: window['env']['mode'],
|
||||
// Uncomment, and comment the above lines, when using codespaces or other special DNS names (which you can't control)
|
||||
// HOSTNAME: externalHost,
|
||||
|
||||
@@ -14,7 +14,7 @@ i18n
|
||||
escapeValue: false,
|
||||
},
|
||||
load: 'languageOnly',
|
||||
whitelist: ['de', 'en', 'nl', 'fr', 'pl', 'es', 'pt', 'ja', 'ru'],
|
||||
whitelist: ['de', 'en', 'nl', 'fr', 'pl', 'es', 'pt', 'ja', 'ru', 'ro'],
|
||||
});
|
||||
|
||||
export default i18n;
|
||||
|
||||
@@ -33,6 +33,7 @@ import {
|
||||
verifyCamera,
|
||||
verifyHub,
|
||||
verifyPersistence,
|
||||
verifySecondaryPersistence,
|
||||
getConfig,
|
||||
updateConfig,
|
||||
} from '../../actions/agent';
|
||||
@@ -63,6 +64,9 @@ class Settings extends React.Component {
|
||||
verifyPersistenceSuccess: false,
|
||||
verifyPersistenceError: false,
|
||||
verifyPersistenceMessage: '',
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: false,
|
||||
verifySecondaryPersistenceMessage: '',
|
||||
verifyCameraSuccess: false,
|
||||
verifyCameraError: false,
|
||||
verifyCameraMessage: '',
|
||||
@@ -70,6 +74,7 @@ class Settings extends React.Component {
|
||||
verifyOnvifError: false,
|
||||
verifyOnvifErrorMessage: '',
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
loadingHub: false,
|
||||
loadingCamera: false,
|
||||
};
|
||||
@@ -125,6 +130,8 @@ class Settings extends React.Component {
|
||||
this.onUpdateTimeline = this.onUpdateTimeline.bind(this);
|
||||
this.initialiseLiveview = this.initialiseLiveview.bind(this);
|
||||
this.verifyPersistenceSettings = this.verifyPersistenceSettings.bind(this);
|
||||
this.verifySecondaryPersistenceSettings =
|
||||
this.verifySecondaryPersistenceSettings.bind(this);
|
||||
this.verifyHubSettings = this.verifyHubSettings.bind(this);
|
||||
this.verifyCameraSettings = this.verifyCameraSettings.bind(this);
|
||||
this.verifySubCameraSettings = this.verifySubCameraSettings.bind(this);
|
||||
@@ -350,6 +357,8 @@ class Settings extends React.Component {
|
||||
configSuccess: false,
|
||||
configError: false,
|
||||
loadingCamera: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
loadingOnvif: true,
|
||||
});
|
||||
|
||||
@@ -390,6 +399,8 @@ class Settings extends React.Component {
|
||||
configError: false,
|
||||
verifyPersistenceSuccess: false,
|
||||
verifyPersistenceError: false,
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: false,
|
||||
verifyHubSuccess: false,
|
||||
verifyHubError: false,
|
||||
verifyHubErrorMessage: '',
|
||||
@@ -401,6 +412,8 @@ class Settings extends React.Component {
|
||||
verifyOnvifSuccess: false,
|
||||
verifyOnvifError: false,
|
||||
loadingHub: true,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
|
||||
// .... test fields
|
||||
@@ -441,6 +454,8 @@ class Settings extends React.Component {
|
||||
verifyHubError: false,
|
||||
verifyPersistenceSuccess: false,
|
||||
verifyPersistenceError: false,
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: false,
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
verifyCameraSuccess: false,
|
||||
@@ -449,6 +464,7 @@ class Settings extends React.Component {
|
||||
verifyOnvifError: false,
|
||||
verifyCameraErrorMessage: '',
|
||||
loading: true,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
|
||||
dispatchVerifyPersistence(
|
||||
@@ -461,6 +477,7 @@ class Settings extends React.Component {
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
},
|
||||
(error) => {
|
||||
@@ -471,6 +488,58 @@ class Settings extends React.Component {
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
verifySecondaryPersistenceSettings() {
|
||||
const { config, dispatchVerifySecondaryPersistence } = this.props;
|
||||
if (config) {
|
||||
this.setState({
|
||||
configSuccess: false,
|
||||
configError: false,
|
||||
verifyHubSuccess: false,
|
||||
verifyHubError: false,
|
||||
verifyPersistenceSuccess: false,
|
||||
verifyPersistenceError: false,
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: false,
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
verifyCameraSuccess: false,
|
||||
verifyCameraError: false,
|
||||
verifyOnvifSuccess: false,
|
||||
verifyOnvifError: false,
|
||||
verifyCameraErrorMessage: '',
|
||||
loading: false,
|
||||
loadingSecondary: true,
|
||||
});
|
||||
|
||||
dispatchVerifySecondaryPersistence(
|
||||
config.config,
|
||||
() => {
|
||||
this.setState({
|
||||
verifySecondaryPersistenceSuccess: true,
|
||||
verifySecondaryPersistenceError: false,
|
||||
verifySecondaryPersistenceMessage: '',
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
},
|
||||
(error) => {
|
||||
this.setState({
|
||||
verifySecondaryPersistenceSuccess: false,
|
||||
verifySecondaryPersistenceError: true,
|
||||
verifySecondaryPersistenceMessage: error,
|
||||
persistenceSuccess: false,
|
||||
persistenceError: false,
|
||||
loading: false,
|
||||
loadingSecondary: false,
|
||||
});
|
||||
}
|
||||
);
|
||||
@@ -537,6 +606,9 @@ class Settings extends React.Component {
|
||||
verifyPersistenceSuccess,
|
||||
verifyPersistenceError,
|
||||
verifyPersistenceMessage,
|
||||
verifySecondaryPersistenceSuccess,
|
||||
verifySecondaryPersistenceError,
|
||||
verifySecondaryPersistenceMessage,
|
||||
verifyCameraSuccess,
|
||||
verifyCameraError,
|
||||
verifyCameraErrorMessage,
|
||||
@@ -546,6 +618,7 @@ class Settings extends React.Component {
|
||||
verifyOnvifErrorMessage,
|
||||
loadingCamera,
|
||||
loading,
|
||||
loadingSecondary,
|
||||
loadingHub,
|
||||
} = this.state;
|
||||
|
||||
@@ -798,6 +871,20 @@ class Settings extends React.Component {
|
||||
)} :${verifyPersistenceMessage}`}
|
||||
/>
|
||||
)}
|
||||
{verifySecondaryPersistenceSuccess && (
|
||||
<InfoBar
|
||||
type="success"
|
||||
message={t('settings.info.verify_persistence_success')}
|
||||
/>
|
||||
)}
|
||||
{verifySecondaryPersistenceError && (
|
||||
<InfoBar
|
||||
type="alert"
|
||||
message={`${t(
|
||||
'settings.info.verify_persistence_error'
|
||||
)} :${verifySecondaryPersistenceMessage}`}
|
||||
/>
|
||||
)}
|
||||
<div className="stats grid-container --two-columns">
|
||||
<div>
|
||||
{/* General settings block */}
|
||||
@@ -824,6 +911,15 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.overview.camera_friendly_name')}
|
||||
defaultValue={config.friendly_name}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField('', 'friendly_name', value, config)
|
||||
}
|
||||
/>
|
||||
|
||||
<Dropdown
|
||||
isRadio
|
||||
icon="world"
|
||||
@@ -1088,6 +1184,101 @@ class Settings extends React.Component {
|
||||
this.onUpdateField('', 'turn_password', value, config)
|
||||
}
|
||||
/>
|
||||
<br />
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.turn_force === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle('', 'turn_force', event, config)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.force_turn')}</span>
|
||||
<p>{t('settings.streaming.force_turn_description')}</p>
|
||||
</div>
|
||||
</div>
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
label={t('buttons.save')}
|
||||
onClick={this.saveConfig}
|
||||
type="default"
|
||||
icon="pencil"
|
||||
/>
|
||||
</BlockFooter>
|
||||
</Block>
|
||||
)}
|
||||
|
||||
{/* STUN/TURN block */}
|
||||
{showStreamingSection && config.offline !== 'true' && (
|
||||
<Block>
|
||||
<BlockHeader>
|
||||
<h4>{t('settings.streaming.stun_turn_forward')}</h4>
|
||||
</BlockHeader>
|
||||
<BlockBody>
|
||||
<p>{t('settings.streaming.stun_turn_description_forward')}</p>
|
||||
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.capture.forwardwebrtc === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle(
|
||||
'capture',
|
||||
'forwardwebrtc',
|
||||
event,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.stun_turn_webrtc')}</span>
|
||||
<p>
|
||||
{t('settings.streaming.stun_turn_description_webrtc')}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.capture.transcodingwebrtc === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle(
|
||||
'capture',
|
||||
'transcodingwebrtc',
|
||||
event,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.stun_turn_transcode')}</span>
|
||||
<p>
|
||||
{t(
|
||||
'settings.streaming.stun_turn_description_transcode'
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{config.capture.transcodingwebrtc === 'true' && (
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.streaming.stun_turn_downscale')}
|
||||
value={config.capture.transcodingresolution}
|
||||
placeholder="The % of the original resolution."
|
||||
onChange={(value) =>
|
||||
this.onUpdateNumberField(
|
||||
'capture',
|
||||
'transcodingresolution',
|
||||
value,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
@@ -1129,7 +1320,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.persistence.kerberoshub_publickey')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberoshub_description_publickey'
|
||||
@@ -1140,7 +1332,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.persistence.kerberoshub_privatekey')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberoshub_description_privatekey'
|
||||
@@ -1161,6 +1354,27 @@ class Settings extends React.Component {
|
||||
this.onUpdateField('', 'hub_site', value, config)
|
||||
}
|
||||
/>
|
||||
|
||||
<br />
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.hub_encryption === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle('', 'hub_encryption', event, config)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>
|
||||
{t('settings.persistence.kerberoshub_encryption')}
|
||||
</span>
|
||||
<p>
|
||||
{t(
|
||||
'settings.persistence.kerberoshub_encryption_description'
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
@@ -1336,7 +1550,8 @@ class Settings extends React.Component {
|
||||
</div>
|
||||
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.overview.encryption_fingerprint')}
|
||||
value={config.encryption.fingerprint}
|
||||
onChange={(value) =>
|
||||
@@ -1349,7 +1564,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.overview.encryption_privatekey')}
|
||||
value={config.encryption.private_key}
|
||||
onChange={(value) =>
|
||||
@@ -1362,7 +1578,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.overview.encryption_symmetrickey')}
|
||||
value={config.encryption.symmetric_key}
|
||||
onChange={(value) =>
|
||||
@@ -1439,75 +1656,55 @@ class Settings extends React.Component {
|
||||
</Block>
|
||||
)}
|
||||
|
||||
{/* STUN/TURN block */}
|
||||
{showStreamingSection && config.offline !== 'true' && (
|
||||
<Block>
|
||||
<BlockHeader>
|
||||
<h4>{t('settings.streaming.stun_turn_forward')}</h4>
|
||||
<h4>{t('settings.streaming.realtimeprocessing')}</h4>
|
||||
</BlockHeader>
|
||||
<BlockBody>
|
||||
<p>{t('settings.streaming.stun_turn_description_forward')}</p>
|
||||
<p>
|
||||
{t('settings.streaming.description_realtimeprocessing')}
|
||||
</p>
|
||||
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.capture.forwardwebrtc === 'true'}
|
||||
on={config.realtimeprocessing === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle(
|
||||
'capture',
|
||||
'forwardwebrtc',
|
||||
'',
|
||||
'realtimeprocessing',
|
||||
event,
|
||||
config.capture
|
||||
config
|
||||
)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.stun_turn_webrtc')}</span>
|
||||
<p>
|
||||
{t('settings.streaming.stun_turn_description_webrtc')}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="toggle-wrapper">
|
||||
<Toggle
|
||||
on={config.capture.transcodingwebrtc === 'true'}
|
||||
disabled={false}
|
||||
onClick={(event) =>
|
||||
this.onUpdateToggle(
|
||||
'capture',
|
||||
'transcodingwebrtc',
|
||||
event,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
<div>
|
||||
<span>{t('settings.streaming.stun_turn_transcode')}</span>
|
||||
<span>
|
||||
{t('settings.streaming.realtimeprocessing_enabled')}
|
||||
</span>
|
||||
<p>
|
||||
{t(
|
||||
'settings.streaming.stun_turn_description_transcode'
|
||||
'settings.streaming.description_realtimeprocessing_enabled'
|
||||
)}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{config.capture.transcodingwebrtc === 'true' && (
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.streaming.stun_turn_downscale')}
|
||||
value={config.capture.transcodingresolution}
|
||||
placeholder="The % of the original resolution."
|
||||
onChange={(value) =>
|
||||
this.onUpdateNumberField(
|
||||
'capture',
|
||||
'transcodingresolution',
|
||||
value,
|
||||
config.capture
|
||||
)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.streaming.realtimeprocessing_topic')}
|
||||
value={config.realtimeprocessing_topic}
|
||||
placeholder="kerberos/keyframes/key"
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'',
|
||||
'realtimeprocessing_topic',
|
||||
value,
|
||||
config
|
||||
)
|
||||
}
|
||||
/>
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
@@ -2296,7 +2493,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t(
|
||||
'settings.persistence.kerberosvault_accesskey'
|
||||
)}
|
||||
@@ -2316,7 +2514,8 @@ class Settings extends React.Component {
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t(
|
||||
'settings.persistence.kerberosvault_secretkey'
|
||||
)}
|
||||
@@ -2337,6 +2536,43 @@ class Settings extends React.Component {
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
label={t(
|
||||
'settings.persistence.kerberosvault_maxretries'
|
||||
)}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_maxretries'
|
||||
)}
|
||||
value={
|
||||
config.kstorage ? config.kstorage.max_retries : ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage',
|
||||
'max_retries',
|
||||
value,
|
||||
config.kstorage
|
||||
)
|
||||
}
|
||||
/>
|
||||
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_timeout')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_timeout'
|
||||
)}
|
||||
value={config.kstorage ? config.kstorage.timeout : ''}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage',
|
||||
'timeout',
|
||||
value,
|
||||
config.kstorage
|
||||
)
|
||||
}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
{config.cloud === this.DROPBOX && (
|
||||
@@ -2396,6 +2632,140 @@ class Settings extends React.Component {
|
||||
</BlockFooter>
|
||||
</Block>
|
||||
)}
|
||||
|
||||
{/* Secondary Vault block */}
|
||||
{showPersistenceSection && config.cloud === this.KERBEROS_VAULT && (
|
||||
<Block>
|
||||
<BlockHeader>
|
||||
<h4>{t('settings.persistence.secondary_persistence')}</h4>
|
||||
</BlockHeader>
|
||||
<BlockBody>
|
||||
<p>
|
||||
{t(
|
||||
'settings.persistence.description_secondary_persistence'
|
||||
)}
|
||||
</p>
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_apiurl')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_apiurl'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.uri
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'uri',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_provider')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_provider'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.provider
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'provider',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_directory')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_directory'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.directory
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'directory',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.persistence.kerberosvault_accesskey')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_accesskey'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.access_key
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'access_key',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
type="password"
|
||||
iconright="activity"
|
||||
label={t('settings.persistence.kerberosvault_secretkey')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_secretkey'
|
||||
)}
|
||||
value={
|
||||
config.kstorage_secondary
|
||||
? config.kstorage_secondary.secret_access_key
|
||||
: ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage_secondary',
|
||||
'secret_access_key',
|
||||
value,
|
||||
config.kstorage_secondary
|
||||
)
|
||||
}
|
||||
/>
|
||||
</BlockBody>
|
||||
<BlockFooter>
|
||||
<Button
|
||||
label={t('settings.persistence.verify_connection')}
|
||||
disabled={loadingSecondary}
|
||||
onClick={this.verifySecondaryPersistenceSettings}
|
||||
type={loadingSecondary ? 'neutral' : 'default'}
|
||||
icon="verify"
|
||||
/>
|
||||
<Button
|
||||
label="Save"
|
||||
type="submit"
|
||||
onClick={this.saveConfig}
|
||||
buttonType="submit"
|
||||
icon="pencil"
|
||||
/>
|
||||
</BlockFooter>
|
||||
</Block>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -2420,6 +2790,8 @@ const mapDispatchToProps = (dispatch /* , ownProps */) => ({
|
||||
dispatch(verifyHub(config, success, error)),
|
||||
dispatchVerifyPersistence: (config, success, error) =>
|
||||
dispatch(verifyPersistence(config, success, error)),
|
||||
dispatchVerifySecondaryPersistence: (config, success, error) =>
|
||||
dispatch(verifySecondaryPersistence(config, success, error)),
|
||||
dispatchGetConfig: (callback) => dispatch(getConfig(callback)),
|
||||
dispatchUpdateConfig: (field, value) => dispatch(updateConfig(field, value)),
|
||||
dispatchSaveConfig: (config, success, error) =>
|
||||
@@ -2437,6 +2809,7 @@ Settings.propTypes = {
|
||||
images: PropTypes.array.isRequired,
|
||||
dispatchVerifyHub: PropTypes.func.isRequired,
|
||||
dispatchVerifyPersistence: PropTypes.func.isRequired,
|
||||
dispatchVerifySecondaryPersistence: PropTypes.func.isRequired,
|
||||
dispatchGetConfig: PropTypes.func.isRequired,
|
||||
dispatchUpdateConfig: PropTypes.func.isRequired,
|
||||
dispatchSaveConfig: PropTypes.func.isRequired,
|
||||
|
||||
@@ -1715,10 +1715,10 @@
|
||||
"@jridgewell/resolve-uri" "^3.0.3"
|
||||
"@jridgewell/sourcemap-codec" "^1.4.10"
|
||||
|
||||
"@kerberos-io/ui@^1.71.0":
|
||||
version "1.71.0"
|
||||
resolved "https://registry.yarnpkg.com/@kerberos-io/ui/-/ui-1.71.0.tgz#06914c94e8b0982068d2099acf8158917a511bfc"
|
||||
integrity sha512-pHCTn/iQTcQEPoCK82eJHGRn6BgzW3wgV4C+mNqdKOtLTquxL+vh7molEgC66tl3DGf7HyjSNa8LuoxYbt9TEg==
|
||||
"@kerberos-io/ui@^1.76.0":
|
||||
version "1.77.0"
|
||||
resolved "https://registry.yarnpkg.com/@kerberos-io/ui/-/ui-1.77.0.tgz#b748b2a9abf793ff2a9ba64ee41f84debc0ca9dc"
|
||||
integrity sha512-CHh4jeLKwrYvJRL5PM3UEN4p2k1fqwMKgSF2U6IR4v0fE2FwPc/2Ry4zGk6pvLDFHbDpR9jUkHX+iNphvStoyQ==
|
||||
dependencies:
|
||||
"@emotion/react" "^11.10.4"
|
||||
"@emotion/styled" "^11.10.4"
|
||||
|
||||
Reference in New Issue
Block a user