mirror of
https://github.com/kerberos-io/agent.git
synced 2026-03-06 18:51:30 +00:00
Compare commits
138 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1974bddfbe | ||
|
|
12cb88e1c1 | ||
|
|
c054526998 | ||
|
|
ffa97598b8 | ||
|
|
f5afbf3a63 | ||
|
|
e666695c96 | ||
|
|
55816e4b7b | ||
|
|
016fb51951 | ||
|
|
550a444650 | ||
|
|
4332e43f27 | ||
|
|
fdc3bfb4a4 | ||
|
|
c17d6b7117 | ||
|
|
5d7a8103c0 | ||
|
|
5d7cb98b8f | ||
|
|
f6046c6a6c | ||
|
|
f59f9d71a9 | ||
|
|
ff72f9647d | ||
|
|
fa604b16cf | ||
|
|
0342869733 | ||
|
|
8685ce31a2 | ||
|
|
0e259f0e7a | ||
|
|
5823abed95 | ||
|
|
86acff58f0 | ||
|
|
d3fc5d4c29 | ||
|
|
50bb40938c | ||
|
|
1977d98ad9 | ||
|
|
448d4a946d | ||
|
|
61ac314bb7 | ||
|
|
c1b144ca28 | ||
|
|
e16987bf9d | ||
|
|
9991597984 | ||
|
|
2c0314cea4 | ||
|
|
0584e52b98 | ||
|
|
1fc90eaee2 | ||
|
|
aef3eacbc9 | ||
|
|
2843568473 | ||
|
|
53ffc8cae0 | ||
|
|
86e654fe19 | ||
|
|
46d57f7664 | ||
|
|
963d8672eb | ||
|
|
9b7a62816a | ||
|
|
237134fe0e | ||
|
|
c8730e8f26 | ||
|
|
acbbe8b444 | ||
|
|
f690016aa5 | ||
|
|
396cfe5d8b | ||
|
|
39fe640ccf | ||
|
|
d389c9b0b6 | ||
|
|
b149686db8 | ||
|
|
c4358cbfad | ||
|
|
cfc5bd3dfe | ||
|
|
c29c1b6a92 | ||
|
|
0f45a2a4b4 | ||
|
|
92edcc13c0 | ||
|
|
5392e2ba90 | ||
|
|
79e1f659c7 | ||
|
|
bf35e5efb6 | ||
|
|
c50137f255 | ||
|
|
f12da749b2 | ||
|
|
a166083423 | ||
|
|
b400d4e773 | ||
|
|
120054d3e5 | ||
|
|
620117c31b | ||
|
|
4e371488c1 | ||
|
|
b154b56308 | ||
|
|
6d92817237 | ||
|
|
b8c1855830 | ||
|
|
a9f7ff4b72 | ||
|
|
b3cd080e14 | ||
|
|
bfde87f888 | ||
|
|
c4453bb8b3 | ||
|
|
40f65a30b3 | ||
|
|
5361de63e0 | ||
|
|
3a8552d362 | ||
|
|
d3840103fc | ||
|
|
d12a9f0612 | ||
|
|
c0d74f7e09 | ||
|
|
8ebea9e4c5 | ||
|
|
89269caf92 | ||
|
|
0c83170f51 | ||
|
|
6081cb4be9 | ||
|
|
ea1dbb3087 | ||
|
|
0523208d36 | ||
|
|
919f21b48b | ||
|
|
2c1c10a2ac | ||
|
|
7e3320b252 | ||
|
|
35ccac8b65 | ||
|
|
dad8165d11 | ||
|
|
ba54188de2 | ||
|
|
3b440c9905 | ||
|
|
42b98b7f20 | ||
|
|
ba3312b57c | ||
|
|
223ba255e9 | ||
|
|
a1df2be207 | ||
|
|
d7f225ca73 | ||
|
|
b3cfabb5df | ||
|
|
5310dd4550 | ||
|
|
cde7dbb58a | ||
|
|
65e68231c7 | ||
|
|
5502555869 | ||
|
|
ad6e7e752f | ||
|
|
63af4660ef | ||
|
|
24fc340001 | ||
|
|
78d786b69d | ||
|
|
756aeaa0eb | ||
|
|
055fb67d7a | ||
|
|
bee522a6bf | ||
|
|
3fbf59c622 | ||
|
|
abd8b8b605 | ||
|
|
abdad47bf3 | ||
|
|
d2c24edf5d | ||
|
|
22f4a7f119 | ||
|
|
a25d3d32e4 | ||
|
|
ed68c32e04 | ||
|
|
4114b3839a | ||
|
|
3f73c009fd | ||
|
|
02fb70c76e | ||
|
|
aaddcb854d | ||
|
|
e73c7a6ecc | ||
|
|
1dc2202f37 | ||
|
|
ac710ae1f5 | ||
|
|
f5ea82ff03 | ||
|
|
ef52325240 | ||
|
|
354855feb1 | ||
|
|
c4cd25b588 | ||
|
|
dbb870229e | ||
|
|
a66fe8c054 | ||
|
|
2352431c79 | ||
|
|
49bc168812 | ||
|
|
98f1ebf20a | ||
|
|
65feb6d182 | ||
|
|
58555d352f | ||
|
|
839a177cf0 | ||
|
|
404517ec40 | ||
|
|
035bd18bc2 | ||
|
|
8bf7a0d244 | ||
|
|
607d8fd0d1 | ||
|
|
12807e289c |
@@ -2,6 +2,10 @@
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/python
|
||||
{
|
||||
"name": "go:1.24-bookworm",
|
||||
"runArgs": [
|
||||
"--name=agent",
|
||||
"--network=host"
|
||||
],
|
||||
"dockerFile": "Dockerfile",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
|
||||
35
.github/workflows/docker.yml
vendored
35
.github/workflows/docker.yml
vendored
@@ -1,8 +1,13 @@
|
||||
name: Release
|
||||
|
||||
name: Create a new release
|
||||
on:
|
||||
release:
|
||||
types: [created]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag for the Docker image"
|
||||
required: true
|
||||
default: "test"
|
||||
|
||||
env:
|
||||
REPO: kerberos/agent
|
||||
@@ -34,13 +39,14 @@ jobs:
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{github.ref_name}} --push .
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}} --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create -t $REPO:${{ github.ref_name }} $REPO-arch:arch-${{matrix.architecture}}-${{github.ref_name}}
|
||||
run: docker buildx imagetools create -t $REPO:${{ github.event.inputs.tag || github.ref_name }} $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{github.ref_name}}
|
||||
run: docker buildx imagetools create -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
if: github.event.inputs.tag == 'test'
|
||||
- name: Run Buildx with output
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{github.ref_name}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{github.event.inputs.tag || github.ref_name}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
- name: Strip binary
|
||||
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
|
||||
- name: Create a release
|
||||
@@ -48,8 +54,8 @@ jobs:
|
||||
with:
|
||||
latest: true
|
||||
allowUpdates: true
|
||||
name: ${{ github.ref_name }}
|
||||
tag: ${{ github.ref_name }}
|
||||
name: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
tag: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
generateReleaseNotes: false
|
||||
omitBodyDuringUpdate: true
|
||||
artifacts: "agent-${{matrix.architecture}}.tar"
|
||||
@@ -92,13 +98,14 @@ jobs:
|
||||
- name: Available platforms
|
||||
run: echo ${{ steps.buildx.outputs.platforms }}
|
||||
- name: Run Buildx
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{github.ref_name}} --push .
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}} --push .
|
||||
- name: Create new and append to manifest
|
||||
run: docker buildx imagetools create --append -t $REPO:${{ github.ref_name }} $REPO-arch:arch-${{matrix.architecture}}-${{github.ref_name}}
|
||||
run: docker buildx imagetools create --append -t $REPO:${{ github.event.inputs.tag || github.ref_name }} $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
- name: Create new and append to manifest latest
|
||||
run: docker buildx imagetools create --append -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{github.ref_name}}
|
||||
run: docker buildx imagetools create --append -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{github.event.inputs.tag || github.ref_name}}
|
||||
if: github.event.inputs.tag == 'test'
|
||||
- name: Run Buildx with output
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{github.ref_name}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{github.event.inputs.tag || github.ref_name}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
|
||||
- name: Strip binary
|
||||
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
|
||||
- name: Create a release
|
||||
@@ -106,8 +113,8 @@ jobs:
|
||||
with:
|
||||
latest: true
|
||||
allowUpdates: true
|
||||
name: ${{ github.ref_name }}
|
||||
tag: ${{ github.ref_name }}
|
||||
name: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
tag: ${{ github.event.inputs.tag || github.ref_name }}
|
||||
generateReleaseNotes: false
|
||||
omitBodyDuringUpdate: true
|
||||
artifacts: "agent-${{matrix.architecture}}.tar"
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -10,6 +10,6 @@ machinery/data/recordings
|
||||
machinery/data/snapshots
|
||||
machinery/test*
|
||||
machinery/init-dev.sh
|
||||
machinery/.env
|
||||
machinery/.env.local
|
||||
machinery/vendor
|
||||
deployments/docker/private-docker-compose.yaml
|
||||
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@@ -16,7 +16,7 @@
|
||||
"-port",
|
||||
"8080"
|
||||
],
|
||||
"envFile": "${workspaceFolder}/machinery/.env",
|
||||
"envFile": "${workspaceFolder}/machinery/.env.local",
|
||||
"buildFlags": "--tags dynamic",
|
||||
},
|
||||
{
|
||||
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,5 +1,6 @@
|
||||
|
||||
FROM kerberos/base:af04230 AS build-machinery
|
||||
ARG BASE_IMAGE_VERSION=70ec57e
|
||||
FROM kerberos/base:${BASE_IMAGE_VERSION} AS build-machinery
|
||||
LABEL AUTHOR=Kerberos.io
|
||||
|
||||
ENV GOROOT=/usr/local/go
|
||||
@@ -43,8 +44,7 @@ RUN cd /go/src/github.com/kerberos-io/agent/machinery && \
|
||||
mkdir -p /agent/data/log && \
|
||||
mkdir -p /agent/data/recordings && \
|
||||
mkdir -p /agent/data/capture-test && \
|
||||
mkdir -p /agent/data/config && \
|
||||
rm -rf /go/src/gitlab.com/
|
||||
mkdir -p /agent/data/config
|
||||
|
||||
####################################
|
||||
# Let's create a /dist folder containing just the files necessary for runtime.
|
||||
@@ -58,18 +58,6 @@ RUN cp -r /agent ./
|
||||
|
||||
RUN /dist/agent/main version
|
||||
|
||||
###############################################
|
||||
# Build Bento4 -> we want fragmented mp4 files
|
||||
|
||||
ENV BENTO4_VERSION 1.6.0-641
|
||||
RUN cd /tmp && git clone https://github.com/axiomatic-systems/Bento4 && cd Bento4 && \
|
||||
git checkout tags/v${BENTO4_VERSION} && \
|
||||
cd Build && \
|
||||
cmake -DCMAKE_BUILD_TYPE=Release .. && \
|
||||
make && \
|
||||
mv /tmp/Bento4/Build/mp4fragment /dist/agent/ && \
|
||||
rm -rf /tmp/Bento4
|
||||
|
||||
FROM node:18.14.0-alpine3.16 AS build-ui
|
||||
|
||||
RUN apk update && apk upgrade --available && sync
|
||||
@@ -111,7 +99,6 @@ RUN apk update && apk add ca-certificates curl libstdc++ libc6-compat --no-cache
|
||||
# Try running agent
|
||||
|
||||
RUN mv /agent/* /home/agent/
|
||||
RUN cp /home/agent/mp4fragment /usr/local/bin/
|
||||
RUN /home/agent/main version
|
||||
|
||||
#######################
|
||||
@@ -148,4 +135,4 @@ HEALTHCHECK CMD curl --fail http://localhost:80 || exit 1
|
||||
# Leeeeettttt'ssss goooooo!!!
|
||||
# Run the shizzle from the right working directory.
|
||||
WORKDIR /home/agent
|
||||
CMD ["./main", "-action", "run", "-port", "80"]
|
||||
CMD ["./main", "-action", "run", "-port", "80"]
|
||||
@@ -257,6 +257,9 @@ Next to attaching the configuration file, it is also possible to override the co
|
||||
| `AGENT_ENCRYPTION_FINGERPRINT` | The fingerprint of the keypair (public/private keys), so you know which one to use. | "" |
|
||||
| `AGENT_ENCRYPTION_PRIVATE_KEY` | The private key (assymetric/RSA) to decrypt and sign requests send over MQTT. | "" |
|
||||
| `AGENT_ENCRYPTION_SYMMETRIC_KEY` | The symmetric key (AES) to encrypt and decrypt requests sent over MQTT. | "" |
|
||||
| `AGENT_SIGNING` | Enable 'true' or disable 'false' for signing recordings. | "true" |
|
||||
| `AGENT_SIGNING_PRIVATE_KEY` | The private key (RSA) to sign the recordings fingerprint to validate origin. | "" - uses default one if empty |
|
||||
|
||||
|
||||
## Encryption
|
||||
|
||||
|
||||
BIN
machinery/.DS_Store
vendored
Normal file
BIN
machinery/.DS_Store
vendored
Normal file
Binary file not shown.
@@ -1,4 +1,31 @@
|
||||
AGENT_NAME=mycamera
|
||||
AGENT_NAME=camera-name
|
||||
AGENT_KEY=uniq-camera-id
|
||||
AGENT_TIMEZONE=Europe/Brussels
|
||||
AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://fake.kerberos.io/stream
|
||||
AGENT_CAPTURE_CONTINUOUS=true
|
||||
#AGENT_CAPTURE_CONTINUOUS=true
|
||||
#AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://fake.kerberos.io/stream
|
||||
#AGENT_CAPTURE_IPCAMERA_SUB_RTSP=rtsp://fake.kerberos.io/stream
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR=x.x.x.x
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME=xxx
|
||||
AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD=xxx
|
||||
AGENT_HUB_URI=https://api.cloud.kerberos.io
|
||||
AGENT_HUB_KEY=AKIXxxx4JBEI
|
||||
AGENT_HUB_PRIVATE_KEY=DIOXxxxAlYpaxxxxXioL0txxx
|
||||
AGENT_HUB_SITE=681xxxxxxx9bcda5
|
||||
|
||||
# By default will send to Hub (=S3), if you wish to send to Kerberos Vault, set to "kstorage"
|
||||
AGENT_CLOUD=s3
|
||||
AGENT_KERBEROSVAULT_URI=
|
||||
AGENT_KERBEROSVAULT_PROVIDER=
|
||||
AGENT_KERBEROSVAULT_DIRECTORY=
|
||||
AGENT_KERBEROSVAULT_ACCESS_KEY=
|
||||
AGENT_KERBEROSVAULT_SECRET_KEY=
|
||||
AGENT_KERBEROSVAULT_MAX_RETRIES=10
|
||||
AGENT_KERBEROSVAULT_TIMEOUT=120
|
||||
AGENT_KERBEROSVAULT_SECONDARY_URI=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_PROVIDER=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_DIRECTORY=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_ACCESS_KEY=
|
||||
AGENT_KERBEROSVAULT_SECONDARY_SECRET_KEY=
|
||||
|
||||
# Open telemetry tracing endpoint
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT=
|
||||
@@ -26,6 +26,7 @@
|
||||
"recording": "true",
|
||||
"snapshots": "true",
|
||||
"liveview": "true",
|
||||
"liveview_chunking": "false",
|
||||
"motion": "true",
|
||||
"postrecording": 20,
|
||||
"prerecording": 10,
|
||||
@@ -116,6 +117,7 @@
|
||||
"hub_site": "",
|
||||
"condition_uri": "",
|
||||
"encryption": {},
|
||||
"signing": {},
|
||||
"realtimeprocessing": "false",
|
||||
"realtimeprocessing_topic": ""
|
||||
}
|
||||
}
|
||||
136
machinery/go.mod
136
machinery/go.mod
@@ -1,21 +1,24 @@
|
||||
module github.com/kerberos-io/agent/machinery
|
||||
|
||||
go 1.24.1
|
||||
go 1.24.2
|
||||
|
||||
replace google.golang.org/genproto => google.golang.org/genproto v0.0.0-20250519155744-55703ea1f237
|
||||
|
||||
require (
|
||||
github.com/Eyevinn/mp4ff v0.48.0
|
||||
github.com/InVisionApp/conjungo v1.1.0
|
||||
github.com/appleboy/gin-jwt/v2 v2.10.3
|
||||
github.com/bluenviron/gortsplib/v4 v4.13.0
|
||||
github.com/bluenviron/gortsplib/v4 v4.14.1
|
||||
github.com/bluenviron/mediacommon v1.14.0
|
||||
github.com/cedricve/go-onvif v0.0.0-20200222191200-567e8ce298f6
|
||||
github.com/dromara/carbon/v2 v2.6.2
|
||||
github.com/dromara/carbon/v2 v2.6.8
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/eclipse/paho.mqtt.golang v1.5.0
|
||||
github.com/elastic/go-sysinfo v1.15.3
|
||||
github.com/gin-contrib/cors v1.7.5
|
||||
github.com/gin-contrib/pprof v1.5.3
|
||||
github.com/gin-gonic/contrib v0.0.0-20250113154928-93b827325fec
|
||||
github.com/gin-gonic/gin v1.10.0
|
||||
github.com/gin-gonic/contrib v0.0.0-20250521004450-2b1292699c15
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/gofrs/uuid v4.4.0+incompatible
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
@@ -23,123 +26,90 @@ require (
|
||||
github.com/kerberos-io/joy4 v1.0.64
|
||||
github.com/kerberos-io/onvif v1.0.0
|
||||
github.com/minio/minio-go/v6 v6.0.57
|
||||
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/pion/rtp v1.8.13
|
||||
github.com/pion/webrtc/v4 v4.0.14
|
||||
github.com/pion/interceptor v0.1.40
|
||||
github.com/pion/rtp v1.8.19
|
||||
github.com/pion/webrtc/v4 v4.1.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/swaggo/files v1.0.1
|
||||
github.com/swaggo/gin-swagger v1.6.0
|
||||
github.com/swaggo/swag v1.16.4
|
||||
github.com/tevino/abool v1.2.0
|
||||
github.com/yapingcat/gomedia v0.0.0-20240906162731-17feea57090c
|
||||
github.com/zaf/g711 v1.4.0
|
||||
go.mongodb.org/mongo-driver v1.17.3
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.72.2
|
||||
go.opentelemetry.io/otel v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0
|
||||
go.opentelemetry.io/otel/sdk v1.36.0
|
||||
go.opentelemetry.io/otel/trace v1.36.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/DataDog/appsec-internal-go v1.9.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/proto v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/trace v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/util/log v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.58.0 // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
|
||||
github.com/DataDog/go-libddwaf/v3 v3.5.1 // indirect
|
||||
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20241206090539-a14610dc22b6 // indirect
|
||||
github.com/DataDog/go-sqllexer v0.0.14 // indirect
|
||||
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
|
||||
github.com/DataDog/gostackparse v0.7.0 // indirect
|
||||
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect
|
||||
github.com/DataDog/sketches-go v1.4.5 // indirect
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beevik/etree v1.2.0 // indirect
|
||||
github.com/bluenviron/mediacommon/v2 v2.1.0 // indirect
|
||||
github.com/bluenviron/mediacommon/v2 v2.2.0 // indirect
|
||||
github.com/bytedance/sonic v1.13.2 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/clbanning/mxj v1.8.4 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.7.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
|
||||
github.com/ebitengine/purego v0.6.0-alpha.5 // indirect
|
||||
github.com/elastic/go-windows v1.0.2 // indirect
|
||||
github.com/elgs/gostrgen v0.0.0-20161222160715-9d61ae07eeae // indirect
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/gin-contrib/sse v1.0.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/spec v0.20.4 // indirect
|
||||
github.com/go-openapi/swag v0.19.15 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect
|
||||
github.com/icholy/digest v0.1.23 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/juju/errors v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/compress v1.16.7 // indirect
|
||||
github.com/klauspost/cpuid v1.2.3 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/kylelemons/go-gypsy v1.0.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lib/pq v1.10.2 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.0 // indirect
|
||||
github.com/minio/sha256-simd v0.1.1 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/outcaste-io/ristretto v0.2.3 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.4 // indirect
|
||||
github.com/pion/ice/v4 v4.0.8 // indirect
|
||||
github.com/pion/interceptor v0.1.37 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/logging v0.2.3 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/sctp v1.8.37 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.11 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.4 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.5 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.24.4 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/tinylib/msgp v1.2.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
@@ -147,35 +117,23 @@ require (
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/ziutek/mymysql v1.5.4 // indirect
|
||||
go.opentelemetry.io/collector/component v0.104.0 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.11.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata/pprofile v0.104.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.104.0 // indirect
|
||||
go.opentelemetry.io/otel v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.27.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.6.0 // indirect
|
||||
golang.org/x/arch v0.16.0 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
golang.org/x/mod v0.20.0 // indirect
|
||||
golang.org/x/net v0.38.0 // indirect
|
||||
golang.org/x/oauth2 v0.18.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
golang.org/x/tools v0.24.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240520151616-dc85e6b867a5 // indirect
|
||||
google.golang.org/grpc v1.64.1 // indirect
|
||||
golang.org/x/crypto v0.38.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250519155744-55703ea1f237 // indirect
|
||||
google.golang.org/grpc v1.72.1 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/ini.v1 v1.42.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
|
||||
1779
machinery/go.sum
1779
machinery/go.sum
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -11,48 +12,62 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/onvif"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
|
||||
|
||||
configService "github.com/kerberos-io/agent/machinery/src/config"
|
||||
"github.com/kerberos-io/agent/machinery/src/routers"
|
||||
"github.com/kerberos-io/agent/machinery/src/utils"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/profiler"
|
||||
)
|
||||
|
||||
var VERSION = utils.VERSION
|
||||
|
||||
func main() {
|
||||
// You might be interested in debugging the agent.
|
||||
if os.Getenv("DATADOG_AGENT_ENABLED") == "true" {
|
||||
if os.Getenv("DATADOG_AGENT_K8S_ENABLED") == "true" {
|
||||
tracer.Start()
|
||||
defer tracer.Stop()
|
||||
} else {
|
||||
service := os.Getenv("DATADOG_AGENT_SERVICE")
|
||||
environment := os.Getenv("DATADOG_AGENT_ENVIRONMENT")
|
||||
log.Log.Info("Starting Datadog Agent with service: " + service + " and environment: " + environment)
|
||||
rules := []tracer.SamplingRule{tracer.RateRule(1)}
|
||||
tracer.Start(
|
||||
tracer.WithSamplingRules(rules),
|
||||
tracer.WithService(service),
|
||||
tracer.WithEnv(environment),
|
||||
)
|
||||
defer tracer.Stop()
|
||||
err := profiler.Start(
|
||||
profiler.WithService(service),
|
||||
profiler.WithEnv(environment),
|
||||
profiler.WithProfileTypes(
|
||||
profiler.CPUProfile,
|
||||
profiler.HeapProfile,
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
log.Log.Fatal(err.Error())
|
||||
}
|
||||
defer profiler.Stop()
|
||||
}
|
||||
func startTracing(agentKey string, otelEndpoint string) (*trace.TracerProvider, error) {
|
||||
serviceName := "agent-" + agentKey
|
||||
headers := map[string]string{
|
||||
"content-type": "application/json",
|
||||
}
|
||||
|
||||
exporter, err := otlptrace.New(
|
||||
context.Background(),
|
||||
otlptracehttp.NewClient(
|
||||
otlptracehttp.WithEndpoint(otelEndpoint),
|
||||
otlptracehttp.WithHeaders(headers),
|
||||
otlptracehttp.WithInsecure(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating new exporter: %w", err)
|
||||
}
|
||||
|
||||
tracerprovider := trace.NewTracerProvider(
|
||||
trace.WithBatcher(
|
||||
exporter,
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
trace.WithBatchTimeout(trace.DefaultScheduleDelay*time.Millisecond),
|
||||
trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize),
|
||||
),
|
||||
trace.WithResource(
|
||||
resource.NewWithAttributes(
|
||||
semconv.SchemaURL,
|
||||
semconv.ServiceNameKey.String(serviceName),
|
||||
attribute.String("environment", "develop"),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
otel.SetTracerProvider(tracerprovider)
|
||||
|
||||
return tracerprovider, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
// Start the show ;)
|
||||
// We'll parse the flags (named variables), and start the agent.
|
||||
|
||||
@@ -114,7 +129,7 @@ func main() {
|
||||
|
||||
case "run":
|
||||
{
|
||||
// Print Kerberos.io ASCII art
|
||||
// Print Agent ASCII art
|
||||
utils.PrintASCIIArt()
|
||||
|
||||
// Print the environment variables which include "AGENT_" as prefix.
|
||||
@@ -127,12 +142,29 @@ func main() {
|
||||
configuration.Name = name
|
||||
configuration.Port = port
|
||||
|
||||
// Open this configuration either from Kerberos Agent or Kerberos Factory.
|
||||
// Open this configuration either from Agent or Factory.
|
||||
configService.OpenConfig(configDirectory, &configuration)
|
||||
|
||||
// We will override the configuration with the environment variables
|
||||
configService.OverrideWithEnvironmentVariables(&configuration)
|
||||
|
||||
// Start OpenTelemetry tracing
|
||||
if otelEndpoint := os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT"); otelEndpoint == "" {
|
||||
log.Log.Info("main.Main(): No OpenTelemetry endpoint provided, skipping tracing")
|
||||
} else {
|
||||
log.Log.Info("main.Main(): Starting OpenTelemetry tracing with endpoint: " + otelEndpoint)
|
||||
agentKey := configuration.Config.Key
|
||||
traceProvider, err := startTracing(agentKey, otelEndpoint)
|
||||
if err != nil {
|
||||
log.Log.Error("traceprovider: " + err.Error())
|
||||
}
|
||||
defer func() {
|
||||
if err := traceProvider.Shutdown(context.Background()); err != nil {
|
||||
log.Log.Error("traceprovider: " + err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Printing final configuration
|
||||
utils.PrintConfiguration(&configuration)
|
||||
|
||||
@@ -175,7 +207,7 @@ func main() {
|
||||
HandleBootstrap: make(chan string, 1),
|
||||
}
|
||||
|
||||
go components.Bootstrap(configDirectory, &configuration, &communication, &capture)
|
||||
go components.Bootstrap(ctx, configDirectory, &configuration, &communication, &capture)
|
||||
|
||||
// Start the REST API.
|
||||
routers.StartWebserver(configDirectory, &configuration, &communication, &capture)
|
||||
|
||||
@@ -38,16 +38,16 @@ func (c *Capture) SetBackChannelClient(rtspUrl string) *Golibrtsp {
|
||||
// RTSPClient is a interface that abstracts the RTSP client implementation.
|
||||
type RTSPClient interface {
|
||||
// Connect to the RTSP server.
|
||||
Connect(ctx context.Context) error
|
||||
Connect(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Connect to a backchannel RTSP server.
|
||||
ConnectBackChannel(ctx context.Context) error
|
||||
ConnectBackChannel(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
Start(ctx context.Context, streamType string, queue *packets.Queue, configuration *models.Configuration, communication *models.Communication) error
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
StartBackChannel(ctx context.Context) (err error)
|
||||
StartBackChannel(ctx context.Context, otelContext context.Context) error
|
||||
|
||||
// Decode a packet into a image.
|
||||
DecodePacket(pkt packets.Packet) (image.YCbCr, error)
|
||||
@@ -59,7 +59,7 @@ type RTSPClient interface {
|
||||
WritePacket(pkt packets.Packet) error
|
||||
|
||||
// Close the connection to the RTSP server.
|
||||
Close() error
|
||||
Close(ctx context.Context) error
|
||||
|
||||
// Get a list of streams from the RTSP server.
|
||||
GetStreams() ([]packets.Stream, error)
|
||||
|
||||
@@ -33,8 +33,11 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/packets"
|
||||
"github.com/pion/rtp"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
var tracer = otel.Tracer("github.com/kerberos-io/agent/machinery/src/capture")
|
||||
|
||||
// Implements the RTSPClient interface.
|
||||
type Golibrtsp struct {
|
||||
RTSPClient
|
||||
@@ -81,6 +84,21 @@ type Golibrtsp struct {
|
||||
AudioMPEG4Decoder *rtpmpeg4audio.Decoder
|
||||
|
||||
Streams []packets.Stream
|
||||
|
||||
// FPS calculation fields
|
||||
lastFrameTime time.Time
|
||||
frameTimeBuffer []time.Duration
|
||||
frameBufferSize int
|
||||
frameBufferIndex int
|
||||
fpsMutex sync.Mutex
|
||||
|
||||
// I-frame interval tracking fields
|
||||
packetsSinceLastKeyframe int
|
||||
lastKeyframePacketCount int
|
||||
keyframeIntervals []int
|
||||
keyframeBufferSize int
|
||||
keyframeBufferIndex int
|
||||
keyframeMutex sync.Mutex
|
||||
}
|
||||
|
||||
// Init function
|
||||
@@ -103,7 +121,10 @@ func init() {
|
||||
}
|
||||
|
||||
// Connect to the RTSP server.
|
||||
func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) Connect(ctx context.Context, ctxOtel context.Context) (err error) {
|
||||
|
||||
_, span := tracer.Start(ctxOtel, "Connect")
|
||||
defer span.End()
|
||||
|
||||
transport := gortsplib.TransportTCP
|
||||
g.Client = gortsplib.Client{
|
||||
@@ -131,8 +152,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Iniatlise the mutex.
|
||||
// Initialize the mutex and FPS calculation.
|
||||
g.VideoDecoderMutex = &sync.Mutex{}
|
||||
g.initFPSCalculation()
|
||||
|
||||
// find the H264 media and format
|
||||
var formaH264 *format.H264
|
||||
@@ -156,7 +178,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
// but try to fetch it later on.
|
||||
if errSPS != nil {
|
||||
log.Log.Debug("capture.golibrtsp.Connect(H264): " + errSPS.Error())
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH264.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
@@ -168,7 +192,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
IsBackChannel: false,
|
||||
})
|
||||
} else {
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH264.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
@@ -216,8 +242,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
log.Log.Info("capture.golibrtsp.Connect(H265): " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: formaH265.Codec(),
|
||||
IsVideo: true,
|
||||
IsAudio: false,
|
||||
@@ -265,8 +292,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
log.Log.Error("capture.golibrtsp.Connect(G711): " + err.Error())
|
||||
} else {
|
||||
g.AudioG711Decoder = audiortpDec
|
||||
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "PCM_MULAW",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -300,8 +328,9 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
log.Log.Error("capture.golibrtsp.Connect(Opus): " + err.Error())
|
||||
} else {
|
||||
g.AudioOpusDecoder = audiortpDec
|
||||
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "OPUS",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -328,11 +357,15 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
// Something went wrong .. Do something
|
||||
log.Log.Error("capture.golibrtsp.Connect(MPEG4): " + err.Error())
|
||||
} else {
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "AAC",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
IsBackChannel: false,
|
||||
SampleRate: audioFormaMPEG4.Config.SampleRate,
|
||||
Channels: audioFormaMPEG4.Config.ChannelCount,
|
||||
})
|
||||
|
||||
// Set the index for the audio
|
||||
@@ -352,7 +385,11 @@ func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (g *Golibrtsp) ConnectBackChannel(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) ConnectBackChannel(ctx context.Context, ctxRunAgent context.Context) (err error) {
|
||||
|
||||
_, span := tracer.Start(ctxRunAgent, "ConnectBackChannel")
|
||||
defer span.End()
|
||||
|
||||
// Transport TCP
|
||||
transport := gortsplib.TransportTCP
|
||||
g.Client = gortsplib.Client{
|
||||
@@ -397,7 +434,9 @@ func (g *Golibrtsp) ConnectBackChannel(ctx context.Context) (err error) {
|
||||
g.HasBackChannel = false
|
||||
} else {
|
||||
g.HasBackChannel = true
|
||||
streamIndex := len(g.Streams)
|
||||
g.Streams = append(g.Streams, packets.Stream{
|
||||
Index: streamIndex,
|
||||
Name: "PCM_MULAW",
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -439,6 +478,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CompositionTime: pts2,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
Idx: g.AudioG711Index,
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -480,6 +520,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CompositionTime: pts2,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
Idx: g.AudioG711Index,
|
||||
IsVideo: false,
|
||||
IsAudio: true,
|
||||
@@ -551,6 +592,9 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
var sps h264.SPS
|
||||
errSPS := sps.Unmarshal(nalu)
|
||||
if errSPS == nil {
|
||||
// Debug SPS information
|
||||
g.debugSPSInfo(&sps, streamType)
|
||||
|
||||
// Get width
|
||||
g.Streams[g.VideoH264Index].Width = sps.Width()
|
||||
if streamType == "main" {
|
||||
@@ -565,12 +609,14 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
} else if streamType == "sub" {
|
||||
configuration.Config.Capture.IPCamera.SubHeight = sps.Height()
|
||||
}
|
||||
// Get FPS
|
||||
g.Streams[g.VideoH264Index].FPS = sps.FPS()
|
||||
// Get FPS using enhanced method
|
||||
fps := g.getEnhancedFPS(&sps, g.VideoH264Index)
|
||||
g.Streams[g.VideoH264Index].FPS = fps
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.Start(%s): Final FPS=%.2f", streamType, fps))
|
||||
g.VideoH264Forma.SPS = nalu
|
||||
|
||||
}
|
||||
case h264.NALUTypePPS:
|
||||
// Read out pps
|
||||
g.VideoH264Forma.PPS = nalu
|
||||
}
|
||||
filteredAU = append(filteredAU, nalu)
|
||||
@@ -587,19 +633,13 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
return
|
||||
}
|
||||
|
||||
// Extract DTS from RTP packets
|
||||
//dts2, err := dtsExtractor.Extract(filteredAU, pts2)
|
||||
//if err != nil {
|
||||
// log.Log.Error("capture.golibrtsp.Start(): " + err.Error())
|
||||
// return
|
||||
//}
|
||||
|
||||
pkt := packets.Packet{
|
||||
IsKeyFrame: idrPresent,
|
||||
Packet: rtppkt,
|
||||
Data: enc,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
CompositionTime: pts2,
|
||||
Idx: g.VideoH264Index,
|
||||
IsVideo: true,
|
||||
@@ -607,6 +647,21 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
Codec: "H264",
|
||||
}
|
||||
|
||||
// Track keyframe intervals
|
||||
keyframeInterval := g.trackKeyframeInterval(idrPresent)
|
||||
if idrPresent && keyframeInterval > 0 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
gopDuration := float64(keyframeInterval) / g.Streams[g.VideoH265Index].FPS
|
||||
gopSize := int(avgInterval) // Store GOP size in a separate variable
|
||||
g.Streams[g.VideoH264Index].GopSize = gopSize
|
||||
log.Log.Info(fmt.Sprintf("capture.golibrtsp.Start(%s): Keyframe interval=%d packets, Avg=%.1f, GOP=%.1fs, GOPSize=%d",
|
||||
streamType, keyframeInterval, avgInterval, gopDuration, gopSize))
|
||||
preRecording := configuration.Config.Capture.PreRecording
|
||||
if preRecording > 0 && int(gopDuration) > 0 {
|
||||
queue.SetMaxGopCount(int(preRecording)/int(gopDuration) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
pkt.Data = pkt.Data[4:]
|
||||
if pkt.IsKeyFrame {
|
||||
annexbNALUStartCode := func() []byte { return []byte{0x00, 0x00, 0x00, 0x01} }
|
||||
@@ -729,6 +784,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
Data: enc,
|
||||
Time: pts2,
|
||||
TimeLegacy: pts,
|
||||
CurrentTime: time.Now().UnixMilli(),
|
||||
CompositionTime: pts2,
|
||||
Idx: g.VideoH265Index,
|
||||
IsVideo: true,
|
||||
@@ -736,6 +792,21 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
Codec: "H265",
|
||||
}
|
||||
|
||||
// Track keyframe intervals for H265
|
||||
keyframeInterval := g.trackKeyframeInterval(isRandomAccess)
|
||||
if isRandomAccess && keyframeInterval > 0 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
gopDuration := float64(keyframeInterval) / g.Streams[g.VideoH265Index].FPS
|
||||
gopSize := int(avgInterval) // Store GOP size in a separate variable
|
||||
g.Streams[g.VideoH265Index].GopSize = gopSize
|
||||
log.Log.Info(fmt.Sprintf("capture.golibrtsp.Start(%s): Keyframe interval=%d packets, Avg=%.1f, GOP=%.1fs, GOPSize=%d",
|
||||
streamType, keyframeInterval, avgInterval, gopDuration, gopSize))
|
||||
preRecording := configuration.Config.Capture.PreRecording
|
||||
if preRecording > 0 && int(gopDuration) > 0 {
|
||||
queue.SetMaxGopCount(int(preRecording)/int(gopDuration) + 1)
|
||||
}
|
||||
}
|
||||
|
||||
queue.WritePacket(pkt)
|
||||
|
||||
// This will check if we need to stop the thread,
|
||||
@@ -778,7 +849,7 @@ func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets
|
||||
}
|
||||
|
||||
// Start the RTSP client, and start reading packets.
|
||||
func (g *Golibrtsp) StartBackChannel(ctx context.Context) (err error) {
|
||||
func (g *Golibrtsp) StartBackChannel(ctx context.Context, ctxRunAgent context.Context) (err error) {
|
||||
log.Log.Info("capture.golibrtsp.StartBackChannel(): started")
|
||||
// Wait for a second, so we can be sure the stream is playing.
|
||||
time.Sleep(1 * time.Second)
|
||||
@@ -860,8 +931,8 @@ func (g *Golibrtsp) DecodePacketRaw(pkt packets.Packet) (image.Gray, error) {
|
||||
}
|
||||
|
||||
// Get a list of streams from the RTSP server.
|
||||
func (j *Golibrtsp) GetStreams() ([]packets.Stream, error) {
|
||||
return j.Streams, nil
|
||||
func (g *Golibrtsp) GetStreams() ([]packets.Stream, error) {
|
||||
return g.Streams, nil
|
||||
}
|
||||
|
||||
// Get a list of video streams from the RTSP server.
|
||||
@@ -887,7 +958,11 @@ func (g *Golibrtsp) GetAudioStreams() ([]packets.Stream, error) {
|
||||
}
|
||||
|
||||
// Close the connection to the RTSP server.
|
||||
func (g *Golibrtsp) Close() error {
|
||||
func (g *Golibrtsp) Close(ctxOtel context.Context) error {
|
||||
|
||||
_, span := tracer.Start(ctxOtel, "Close")
|
||||
defer span.End()
|
||||
|
||||
// Close the demuxer.
|
||||
g.Client.Close()
|
||||
|
||||
@@ -1101,3 +1176,185 @@ func WriteMPEG4Audio(forma *format.MPEG4Audio, aus [][]byte) ([]byte, error) {
|
||||
}
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
// Initialize FPS calculation buffers
|
||||
func (g *Golibrtsp) initFPSCalculation() {
|
||||
g.frameBufferSize = 30 // Store last 30 frame intervals
|
||||
g.frameTimeBuffer = make([]time.Duration, g.frameBufferSize)
|
||||
g.frameBufferIndex = 0
|
||||
g.lastFrameTime = time.Time{}
|
||||
|
||||
// Initialize I-frame interval tracking
|
||||
g.keyframeBufferSize = 10 // Store last 10 keyframe intervals
|
||||
g.keyframeIntervals = make([]int, g.keyframeBufferSize)
|
||||
g.keyframeBufferIndex = 0
|
||||
g.packetsSinceLastKeyframe = 0
|
||||
g.lastKeyframePacketCount = 0
|
||||
}
|
||||
|
||||
// Calculate FPS from frame timestamps
|
||||
func (g *Golibrtsp) calculateFPSFromTimestamps() float64 {
|
||||
g.fpsMutex.Lock()
|
||||
defer g.fpsMutex.Unlock()
|
||||
|
||||
if g.lastFrameTime.IsZero() {
|
||||
g.lastFrameTime = time.Now()
|
||||
return 0
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
interval := now.Sub(g.lastFrameTime)
|
||||
g.lastFrameTime = now
|
||||
|
||||
// Store the interval
|
||||
g.frameTimeBuffer[g.frameBufferIndex] = interval
|
||||
g.frameBufferIndex = (g.frameBufferIndex + 1) % g.frameBufferSize
|
||||
|
||||
// Calculate average FPS from stored intervals
|
||||
var totalInterval time.Duration
|
||||
validSamples := 0
|
||||
|
||||
for _, interval := range g.frameTimeBuffer {
|
||||
if interval > 0 {
|
||||
totalInterval += interval
|
||||
validSamples++
|
||||
}
|
||||
}
|
||||
|
||||
if validSamples == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
avgInterval := totalInterval / time.Duration(validSamples)
|
||||
if avgInterval == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return float64(time.Second) / float64(avgInterval)
|
||||
}
|
||||
|
||||
// Get enhanced FPS information from SPS with fallback
|
||||
func (g *Golibrtsp) getEnhancedFPS(sps *h264.SPS, streamIndex int8) float64 {
|
||||
// First try to get FPS from SPS
|
||||
spsFPS := sps.FPS()
|
||||
|
||||
// Check if SPS FPS is reasonable (between 1 and 120 fps)
|
||||
if spsFPS > 0 && spsFPS <= 120 {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.getEnhancedFPS(): SPS FPS: %.2f", spsFPS))
|
||||
return spsFPS
|
||||
}
|
||||
|
||||
// Fallback to timestamp-based calculation
|
||||
timestampFPS := g.calculateFPSFromTimestamps()
|
||||
if timestampFPS > 0 && timestampFPS <= 120 {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.getEnhancedFPS(): Timestamp FPS: %.2f", timestampFPS))
|
||||
return timestampFPS
|
||||
}
|
||||
|
||||
// Return SPS FPS even if it seems unreasonable, or default
|
||||
if spsFPS > 0 {
|
||||
return spsFPS
|
||||
}
|
||||
|
||||
return 25.0 // Default fallback FPS
|
||||
}
|
||||
|
||||
// Track I-frame intervals by counting packets between keyframes
|
||||
func (g *Golibrtsp) trackKeyframeInterval(isKeyframe bool) int {
|
||||
g.keyframeMutex.Lock()
|
||||
defer g.keyframeMutex.Unlock()
|
||||
|
||||
g.packetsSinceLastKeyframe++
|
||||
|
||||
if isKeyframe {
|
||||
// Store the interval since the last keyframe
|
||||
if g.lastKeyframePacketCount > 0 {
|
||||
interval := g.packetsSinceLastKeyframe
|
||||
g.keyframeIntervals[g.keyframeBufferIndex] = interval
|
||||
g.keyframeBufferIndex = (g.keyframeBufferIndex + 1) % g.keyframeBufferSize
|
||||
}
|
||||
|
||||
// Reset counter for next interval
|
||||
g.lastKeyframePacketCount = g.packetsSinceLastKeyframe
|
||||
g.packetsSinceLastKeyframe = 0
|
||||
|
||||
return g.lastKeyframePacketCount
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// Get average keyframe interval (GOP size)
|
||||
func (g *Golibrtsp) getAverageKeyframeInterval() float64 {
|
||||
g.keyframeMutex.Lock()
|
||||
defer g.keyframeMutex.Unlock()
|
||||
|
||||
var totalInterval int
|
||||
validSamples := 0
|
||||
|
||||
for _, interval := range g.keyframeIntervals {
|
||||
if interval > 0 {
|
||||
totalInterval += interval
|
||||
validSamples++
|
||||
}
|
||||
}
|
||||
|
||||
if validSamples == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return float64(totalInterval) / float64(validSamples)
|
||||
}
|
||||
|
||||
// Calculate GOP size in seconds based on FPS and keyframe interval
|
||||
func (g *Golibrtsp) getGOPDuration(fps float64) float64 {
|
||||
avgInterval := g.getAverageKeyframeInterval()
|
||||
if avgInterval > 0 && fps > 0 {
|
||||
return avgInterval / fps
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Get detailed SPS timing information
|
||||
func (g *Golibrtsp) getSPSTimingInfo(sps *h264.SPS) (hasVUI bool, timeScale uint32, numUnitsInTick uint32, fps float64) {
|
||||
// Try to get FPS from SPS
|
||||
fps = sps.FPS()
|
||||
|
||||
// Note: The gortsplib SPS struct may not expose VUI parameters directly
|
||||
// but we can still work with the calculated FPS
|
||||
if fps > 0 {
|
||||
hasVUI = true
|
||||
// These are estimated values based on common patterns
|
||||
if fps == 25.0 {
|
||||
timeScale = 50
|
||||
numUnitsInTick = 1
|
||||
} else if fps == 30.0 {
|
||||
timeScale = 60
|
||||
numUnitsInTick = 1
|
||||
} else if fps == 24.0 {
|
||||
timeScale = 48
|
||||
numUnitsInTick = 1
|
||||
} else {
|
||||
// Generic calculation
|
||||
timeScale = uint32(fps * 2)
|
||||
numUnitsInTick = 1
|
||||
}
|
||||
}
|
||||
|
||||
return hasVUI, timeScale, numUnitsInTick, fps
|
||||
}
|
||||
|
||||
// Debug SPS information
|
||||
func (g *Golibrtsp) debugSPSInfo(sps *h264.SPS, streamType string) {
|
||||
hasVUI, timeScale, numUnitsInTick, fps := g.getSPSTimingInfo(sps)
|
||||
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): Width=%d, Height=%d",
|
||||
streamType, sps.Width(), sps.Height()))
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): HasVUI=%t, FPS=%.2f",
|
||||
streamType, hasVUI, fps))
|
||||
|
||||
if hasVUI {
|
||||
log.Log.Debug(fmt.Sprintf("capture.golibrtsp.debugSPSInfo(%s): TimeScale=%d, NumUnitsInTick=%d",
|
||||
streamType, timeScale, numUnitsInTick))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,8 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/packets"
|
||||
"github.com/kerberos-io/agent/machinery/src/utils"
|
||||
"github.com/yapingcat/gomedia/go-mp4"
|
||||
"github.com/kerberos-io/agent/machinery/src/video"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
func CleanupRecordingDirectory(configDirectory string, configuration *models.Configuration) {
|
||||
@@ -63,26 +64,46 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
} else {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(): started")
|
||||
|
||||
recordingPeriod := config.Capture.PostRecording // number of seconds to record.
|
||||
maxRecordingPeriod := config.Capture.MaxLengthRecording // maximum number of seconds to record.
|
||||
preRecording := config.Capture.PreRecording * 1000
|
||||
postRecording := config.Capture.PostRecording * 1000 // number of seconds to record.
|
||||
maxRecordingPeriod := config.Capture.MaxLengthRecording * 1000 // maximum number of seconds to record.
|
||||
|
||||
// Synchronise the last synced time
|
||||
now := time.Now().Unix()
|
||||
startRecording := now
|
||||
timestamp := now
|
||||
// We will calculate the maxRecordingPeriod based on the preRecording and postRecording values.
|
||||
if maxRecordingPeriod == 0 {
|
||||
// If maxRecordingPeriod is not set, we will use the preRecording and postRecording values
|
||||
maxRecordingPeriod = preRecording + postRecording
|
||||
}
|
||||
|
||||
if maxRecordingPeriod < preRecording+postRecording {
|
||||
log.Log.Error("capture.main.HandleRecordStream(): maxRecordingPeriod is less than preRecording + postRecording, this is not allowed. Setting maxRecordingPeriod to preRecording + postRecording.")
|
||||
maxRecordingPeriod = preRecording + postRecording
|
||||
}
|
||||
|
||||
if config.FriendlyName != "" {
|
||||
config.Name = config.FriendlyName
|
||||
}
|
||||
|
||||
// For continuous and motion based recording we will use a single file.
|
||||
var file *os.File
|
||||
// Get the audio and video codec from the camera.
|
||||
// We only expect one audio and one video codec.
|
||||
// If there are multiple audio or video streams, we will use the first one.
|
||||
audioCodec := ""
|
||||
videoCodec := ""
|
||||
audioStreams, _ := rtspClient.GetAudioStreams()
|
||||
videoStreams, _ := rtspClient.GetVideoStreams()
|
||||
if len(audioStreams) > 0 {
|
||||
audioCodec = audioStreams[0].Name
|
||||
config.Capture.IPCamera.SampleRate = audioStreams[0].SampleRate
|
||||
config.Capture.IPCamera.Channels = audioStreams[0].Channels
|
||||
}
|
||||
if len(videoStreams) > 0 {
|
||||
videoCodec = videoStreams[0].Name
|
||||
}
|
||||
|
||||
// Check if continuous recording.
|
||||
if config.Capture.Continuous == "true" {
|
||||
|
||||
//var cws *cacheWriterSeeker
|
||||
var myMuxer *mp4.Movmuxer
|
||||
var mp4Video *video.MP4
|
||||
var videoTrack uint32
|
||||
var audioTrack uint32
|
||||
var name string
|
||||
@@ -90,15 +111,15 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// Do not do anything!
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): start recording")
|
||||
|
||||
now = time.Now().Unix()
|
||||
timestamp = now
|
||||
start := false
|
||||
|
||||
// If continuous record the full length
|
||||
recordingPeriod = maxRecordingPeriod
|
||||
postRecording = maxRecordingPeriod
|
||||
// Recording file name
|
||||
fullName := ""
|
||||
|
||||
var startRecording int64 = 0 // start recording timestamp in milliseconds
|
||||
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
@@ -114,20 +135,21 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
|
||||
nextPkt, cursorError = recordingCursor.ReadPacket()
|
||||
|
||||
now := time.Now().Unix()
|
||||
now := time.Now().UnixMilli()
|
||||
|
||||
if start && // If already recording and current frame is a keyframe and we should stop recording
|
||||
nextPkt.IsKeyFrame && (timestamp+recordingPeriod-now <= 0 || now-startRecording >= maxRecordingPeriod) {
|
||||
nextPkt.IsKeyFrame && (startRecording+postRecording-now <= 0 || now-startRecording > maxRecordingPeriod-500) {
|
||||
|
||||
// Write the last packet
|
||||
ttime := convertPTS(pkt.TimeLegacy)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
// Write the last packet
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
// Write the last packet
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
@@ -136,21 +158,44 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
}
|
||||
|
||||
// This will write the trailer a well.
|
||||
if err := myMuxer.WriteTrailer(); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
|
||||
// Close mp4
|
||||
mp4Video.Close(&config)
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): recording finished: file save: " + name)
|
||||
|
||||
// Cleanup muxer
|
||||
start = false
|
||||
file.Close()
|
||||
file = nil
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
// Update the name with the duration in milliseconds.
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
"0-0-0-0" + "_" + // region coordinates, we
|
||||
"-1" + "_" + // token
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -197,7 +242,6 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
|
||||
start = true
|
||||
timestamp = now
|
||||
|
||||
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
|
||||
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
|
||||
@@ -208,13 +252,17 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// - Number of changes
|
||||
// - Token
|
||||
|
||||
startRecording = time.Now().Unix() // we mark the current time when the record started.ss
|
||||
s := strconv.FormatInt(startRecording, 10) + "_" +
|
||||
"6" + "-" +
|
||||
"967003" + "_" +
|
||||
config.Name + "_" +
|
||||
"200-200-400-400" + "_0_" +
|
||||
"769"
|
||||
startRecording = pkt.CurrentTime
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" + // start timestamp in seconds
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" + // length of milliseconds
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" + // milliseconds
|
||||
config.Name + "_" + // device name
|
||||
"0-0-0-0" + "_" + // region coordinates, we will not use this for continuous recording
|
||||
"0" + "_" + // token
|
||||
"0" + "_" //+ // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
@@ -222,53 +270,61 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// Running...
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): recording started")
|
||||
|
||||
file, err = os.Create(fullName)
|
||||
if err == nil {
|
||||
//cws = newCacheWriterSeeker(4096)
|
||||
myMuxer, _ = mp4.CreateMp4Muxer(file)
|
||||
// We choose between H264 and H265
|
||||
width := configuration.Config.Capture.IPCamera.Width
|
||||
height := configuration.Config.Capture.IPCamera.Height
|
||||
widthOption := mp4.WithVideoWidth(uint32(width))
|
||||
heightOption := mp4.WithVideoHeight(uint32(height))
|
||||
if pkt.Codec == "H264" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H264, widthOption, heightOption)
|
||||
} else if pkt.Codec == "H265" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H265, widthOption, heightOption)
|
||||
}
|
||||
// For an MP4 container, AAC is the only audio codec supported.
|
||||
audioTrack = myMuxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
|
||||
} else {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
// Get width and height from the camera.
|
||||
width := configuration.Config.Capture.IPCamera.Width
|
||||
height := configuration.Config.Capture.IPCamera.Height
|
||||
|
||||
// Get SPS and PPS NALUs from the camera.
|
||||
spsNALUS := configuration.Config.Capture.IPCamera.SPSNALUs
|
||||
ppsNALUS := configuration.Config.Capture.IPCamera.PPSNALUs
|
||||
vpsNALUS := configuration.Config.Capture.IPCamera.VPSNALUs
|
||||
|
||||
// Create a video file, and set the dimensions.
|
||||
mp4Video = video.NewMP4(fullName, spsNALUS, ppsNALUS, vpsNALUS)
|
||||
mp4Video.SetWidth(width)
|
||||
mp4Video.SetHeight(height)
|
||||
|
||||
if videoCodec == "H264" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H264")
|
||||
} else if videoCodec == "H265" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H265")
|
||||
}
|
||||
if audioCodec == "AAC" {
|
||||
audioTrack = mp4Video.AddAudioTrack("AAC")
|
||||
} else if audioCodec == "PCM_MULAW" {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
|
||||
ttime := convertPTS(pkt.TimeLegacy)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
// TODO: transcode to AAC, some work to do..
|
||||
// We might need to use ffmpeg to transcode the audio to AAC.
|
||||
// For now we will skip the audio track.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
}
|
||||
|
||||
recordingStatus = "started"
|
||||
|
||||
} else if start {
|
||||
ttime := convertPTS(pkt.TimeLegacy)
|
||||
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
// New method using new mp4 library
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
@@ -277,7 +333,6 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pkt = nextPkt
|
||||
}
|
||||
|
||||
@@ -285,21 +340,43 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// If this happens we need to check to properly close the recording.
|
||||
if cursorError != nil {
|
||||
if recordingStatus == "started" {
|
||||
// This will write the trailer a well.
|
||||
if err := myMuxer.WriteTrailer(); err != nil {
|
||||
log.Log.Error(err.Error())
|
||||
}
|
||||
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): Recording finished: file save: " + name)
|
||||
|
||||
// Cleanup muxer
|
||||
start = false
|
||||
file.Close()
|
||||
file = nil
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
// Update the name with the duration in milliseconds.
|
||||
startRecordingSeconds := startRecording / 1000 // convert to seconds
|
||||
startRecordingMilliseconds := startRecording % 1000 // convert to milliseconds
|
||||
s := strconv.FormatInt(startRecordingSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(startRecordingMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(startRecordingMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
"0-0-0-0" + "_" + // region coordinates, we
|
||||
"-1" + "_" + // token
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(continuous): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -337,33 +414,44 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): Start motion based recording ")
|
||||
|
||||
var lastDuration int64
|
||||
var lastRecordingTime int64
|
||||
var lastRecordingTime int64 = 0 // last recording timestamp in milliseconds
|
||||
var displayTime int64 = 0 // display time in milliseconds
|
||||
|
||||
//var cws *cacheWriterSeeker
|
||||
var myMuxer *mp4.Movmuxer
|
||||
var videoTrack uint32
|
||||
var audioTrack uint32
|
||||
|
||||
for motion := range communication.HandleMotion {
|
||||
|
||||
timestamp = time.Now().Unix()
|
||||
startRecording = time.Now().Unix() // we mark the current time when the record started.
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var nextPkt packets.Packet
|
||||
recordingCursor := queue.Oldest() // Start from the latest packet in the queue)
|
||||
|
||||
// If we have prerecording we will substract the number of seconds.
|
||||
// Taking into account FPS = GOP size (Keyfram interval)
|
||||
if config.Capture.PreRecording > 0 {
|
||||
now := time.Now().UnixMilli()
|
||||
motionTimestamp := now
|
||||
|
||||
// Might be that recordings are coming short after each other.
|
||||
// Therefore we do some math with the current time and the last recording time.
|
||||
start := false
|
||||
|
||||
timeBetweenNowAndLastRecording := startRecording - lastRecordingTime
|
||||
if timeBetweenNowAndLastRecording > int64(config.Capture.PreRecording) {
|
||||
startRecording = startRecording - int64(config.Capture.PreRecording) + 1
|
||||
} else {
|
||||
startRecording = startRecording - timeBetweenNowAndLastRecording
|
||||
}
|
||||
if cursorError == nil {
|
||||
pkt, cursorError = recordingCursor.ReadPacket()
|
||||
}
|
||||
|
||||
displayTime = pkt.CurrentTime
|
||||
startRecording := pkt.CurrentTime
|
||||
|
||||
// We have more packets in the queue (which might still be older than where we close the previous recording).
|
||||
// In that case we will use the last recording time to determine the start time of the recording, otherwise
|
||||
// we will have duplicate frames in the recording.
|
||||
if startRecording < lastRecordingTime {
|
||||
displayTime = lastRecordingTime
|
||||
startRecording = lastRecordingTime
|
||||
}
|
||||
|
||||
// If startRecording is 0, we will continue as it might be we are in a state of restarting the agent.
|
||||
if startRecording == 0 {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): startRecording is 0, we will continue as it might be we are in a state of restarting the agent.")
|
||||
continue
|
||||
}
|
||||
|
||||
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
|
||||
@@ -375,47 +463,56 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// - Number of changes
|
||||
// - Token
|
||||
|
||||
s := strconv.FormatInt(startRecording, 10) + "_" +
|
||||
"6" + "-" +
|
||||
"967003" + "_" +
|
||||
config.Name + "_" +
|
||||
"200-200-400-400" + "_" +
|
||||
strconv.Itoa(numberOfChanges) + "_" +
|
||||
"769"
|
||||
displayTimeSeconds := displayTime / 1000 // convert to seconds
|
||||
displayTimeMilliseconds := displayTime % 1000 // convert to milliseconds
|
||||
motionRectangleString := "0-0-0-0"
|
||||
if motion.Rectangle.X != 0 || motion.Rectangle.Y != 0 ||
|
||||
motion.Rectangle.Width != 0 || motion.Rectangle.Height != 0 {
|
||||
motionRectangleString = strconv.Itoa(motion.Rectangle.X) + "-" + strconv.Itoa(motion.Rectangle.Y) + "-" +
|
||||
strconv.Itoa(motion.Rectangle.Width) + "-" + strconv.Itoa(motion.Rectangle.Height)
|
||||
}
|
||||
|
||||
// Get the number of changes from the motion detection.
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
|
||||
s := strconv.FormatInt(displayTimeSeconds, 10) + "_" + // start timestamp in seconds
|
||||
strconv.Itoa(len(strconv.FormatInt(displayTimeMilliseconds, 10))) + "-" + // length of milliseconds
|
||||
strconv.FormatInt(displayTimeMilliseconds, 10) + "_" + // milliseconds
|
||||
config.Name + "_" + // device name
|
||||
motionRectangleString + "_" + // region coordinates, we will not use this for continuous recording
|
||||
strconv.Itoa(numberOfChanges) + "_" + // number of changes
|
||||
"0" // + "_" + // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
name := s + ".mp4"
|
||||
fullName := configDirectory + "/data/recordings/" + name
|
||||
|
||||
// Running...
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): recording started")
|
||||
file, _ = os.Create(fullName)
|
||||
myMuxer, _ = mp4.CreateMp4Muxer(file)
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): recording started (" + name + ")" + " at " + strconv.FormatInt(displayTimeSeconds, 10) + " unix")
|
||||
|
||||
// Check which video codec we need to use.
|
||||
videoSteams, _ := rtspClient.GetVideoStreams()
|
||||
for _, stream := range videoSteams {
|
||||
width := configuration.Config.Capture.IPCamera.Width
|
||||
height := configuration.Config.Capture.IPCamera.Height
|
||||
widthOption := mp4.WithVideoWidth(uint32(width))
|
||||
heightOption := mp4.WithVideoHeight(uint32(height))
|
||||
if stream.Name == "H264" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H264, widthOption, heightOption)
|
||||
} else if stream.Name == "H265" {
|
||||
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H265, widthOption, heightOption)
|
||||
}
|
||||
// Get width and height from the camera.
|
||||
width := configuration.Config.Capture.IPCamera.Width
|
||||
height := configuration.Config.Capture.IPCamera.Height
|
||||
|
||||
// Get SPS and PPS NALUs from the camera.
|
||||
spsNALUS := configuration.Config.Capture.IPCamera.SPSNALUs
|
||||
ppsNALUS := configuration.Config.Capture.IPCamera.PPSNALUs
|
||||
vpsNALUS := configuration.Config.Capture.IPCamera.VPSNALUs
|
||||
|
||||
// Create a video file, and set the dimensions.
|
||||
mp4Video := video.NewMP4(fullName, spsNALUS, ppsNALUS, vpsNALUS)
|
||||
mp4Video.SetWidth(width)
|
||||
mp4Video.SetHeight(height)
|
||||
|
||||
if videoCodec == "H264" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H264")
|
||||
} else if videoCodec == "H265" {
|
||||
videoTrack = mp4Video.AddVideoTrack("H265")
|
||||
}
|
||||
// For an MP4 container, AAC is the only audio codec supported.
|
||||
audioTrack = myMuxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
|
||||
start := false
|
||||
|
||||
// Get as much packets we need.
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var nextPkt packets.Packet
|
||||
recordingCursor := queue.DelayedGopCount(int(config.Capture.PreRecording + 1))
|
||||
|
||||
if cursorError == nil {
|
||||
pkt, cursorError = recordingCursor.ReadPacket()
|
||||
if audioCodec == "AAC" {
|
||||
audioTrack = mp4Video.AddAudioTrack("AAC")
|
||||
} else if audioCodec == "PCM_MULAW" {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
|
||||
for cursorError == nil {
|
||||
@@ -425,68 +522,91 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + cursorError.Error())
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
now = time.Now().UnixMilli()
|
||||
select {
|
||||
case motion := <-communication.HandleMotion:
|
||||
timestamp = now
|
||||
motionTimestamp = now
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): motion detected while recording. Expanding recording.")
|
||||
numberOfChanges = motion.NumberOfChanges
|
||||
numberOfChanges := motion.NumberOfChanges
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): Received message with recording data, detected changes to save: " + strconv.Itoa(numberOfChanges))
|
||||
default:
|
||||
}
|
||||
|
||||
if (timestamp+recordingPeriod-now < 0 || now-startRecording > maxRecordingPeriod) && nextPkt.IsKeyFrame {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): closing recording (timestamp: " + strconv.FormatInt(timestamp, 10) + ", recordingPeriod: " + strconv.FormatInt(recordingPeriod, 10) + ", now: " + strconv.FormatInt(now, 10) + ", startRecording: " + strconv.FormatInt(startRecording, 10) + ", maxRecordingPeriod: " + strconv.FormatInt(maxRecordingPeriod, 10))
|
||||
if (motionTimestamp+postRecording-now < 0 || now-startRecording > maxRecordingPeriod-500) && nextPkt.IsKeyFrame {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): timestamp+postRecording-now < 0 - " + strconv.FormatInt(motionTimestamp+postRecording-now, 10) + " < 0")
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): now-startRecording > maxRecordingPeriod-500 - " + strconv.FormatInt(now-startRecording, 10) + " > " + strconv.FormatInt(maxRecordingPeriod-500, 10))
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): closing recording (timestamp: " + strconv.FormatInt(motionTimestamp, 10) + ", postRecording: " + strconv.FormatInt(postRecording, 10) + ", now: " + strconv.FormatInt(now, 10) + ", startRecording: " + strconv.FormatInt(startRecording, 10) + ", maxRecordingPeriod: " + strconv.FormatInt(maxRecordingPeriod, 10))
|
||||
break
|
||||
}
|
||||
if pkt.IsKeyFrame && !start && pkt.Time >= lastDuration {
|
||||
if pkt.IsKeyFrame && !start && pkt.CurrentTime >= startRecording {
|
||||
// We start the recording if we have a keyframe and the last duration is 0 or less than the current packet time.
|
||||
// It could be start we start from the beginning of the recording.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): write frames")
|
||||
start = true
|
||||
}
|
||||
if start {
|
||||
ttime := convertPTS(pkt.TimeLegacy)
|
||||
pts := convertPTS(pkt.TimeLegacy)
|
||||
if pkt.IsVideo {
|
||||
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): add video sample")
|
||||
if err := mp4Video.AddSampleToTrack(videoTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): add audio sample")
|
||||
if pkt.Codec == "AAC" {
|
||||
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
|
||||
if err := mp4Video.AddSampleToTrack(audioTrack, pkt.IsKeyFrame, pkt.Data, pts); err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
}
|
||||
} else if pkt.Codec == "PCM_MULAW" {
|
||||
// TODO: transcode to AAC, some work to do..
|
||||
// We might need to use ffmpeg to transcode the audio to AAC.
|
||||
// For now we will skip the audio track.
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): no AAC audio codec detected, skipping audio track.")
|
||||
}
|
||||
}
|
||||
|
||||
// We will sync to file every keyframe.
|
||||
if pkt.IsKeyFrame {
|
||||
err := file.Sync()
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
|
||||
} else {
|
||||
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): synced file " + name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pkt = nextPkt
|
||||
}
|
||||
|
||||
// This will write the trailer a well.
|
||||
myMuxer.WriteTrailer()
|
||||
// Update the last duration and last recording time.
|
||||
// This is used to determine if we need to start a new recording.
|
||||
lastRecordingTime = pkt.CurrentTime
|
||||
|
||||
// This will close the recording and write the last packet.
|
||||
mp4Video.Close(&config)
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): file save: " + name)
|
||||
|
||||
lastDuration = pkt.Time
|
||||
lastRecordingTime = time.Now().Unix()
|
||||
file.Close()
|
||||
file = nil
|
||||
// Update the name of the recording with the duration.
|
||||
// We will update the name of the recording with the duration in milliseconds.
|
||||
if mp4Video.VideoTotalDuration > 0 {
|
||||
duration := mp4Video.VideoTotalDuration
|
||||
|
||||
// Check if need to convert to fragmented using bento
|
||||
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
|
||||
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
|
||||
// Update the name with the duration in milliseconds.
|
||||
s := strconv.FormatInt(displayTimeSeconds, 10) + "_" +
|
||||
strconv.Itoa(len(strconv.FormatInt(displayTimeMilliseconds, 10))) + "-" +
|
||||
strconv.FormatInt(displayTimeMilliseconds, 10) + "_" +
|
||||
config.Name + "_" +
|
||||
motionRectangleString + "_" +
|
||||
strconv.Itoa(numberOfChanges) + "_" + // number of changes
|
||||
strconv.FormatInt(int64(duration), 10) // + "_" + // duration of recording in milliseconds
|
||||
//utils.VERSION // version of the agent
|
||||
|
||||
oldName := name
|
||||
name = s + ".mp4"
|
||||
fullName = configDirectory + "/data/recordings/" + name
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): renamed file from: " + oldName + " to: " + name)
|
||||
|
||||
// Rename the file to the new name.
|
||||
err := os.Rename(
|
||||
configDirectory+"/data/recordings/"+oldName,
|
||||
configDirectory+"/data/recordings/"+s+".mp4")
|
||||
|
||||
if err != nil {
|
||||
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error renaming file: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("capture.main.HandleRecordStream(motiondetection): no video data recorded, not renaming file.")
|
||||
}
|
||||
|
||||
// Check if we need to encrypt the recording.
|
||||
@@ -534,6 +654,10 @@ func HandleRecordStream(queue *packets.Queue, configDirectory string, configurat
|
||||
// @Success 200 {object} models.APIResponse
|
||||
func VerifyCamera(c *gin.Context) {
|
||||
|
||||
// Start OpenTelemetry tracing
|
||||
ctxVerifyCamera, span := tracer.Start(context.Background(), "VerifyCamera", trace.WithSpanKind(trace.SpanKindServer))
|
||||
defer span.End()
|
||||
|
||||
var cameraStreams models.CameraStreams
|
||||
err := c.BindJSON(&cameraStreams)
|
||||
|
||||
@@ -559,12 +683,11 @@ func VerifyCamera(c *gin.Context) {
|
||||
Url: rtspUrl,
|
||||
}
|
||||
|
||||
err := rtspClient.Connect(ctx)
|
||||
err := rtspClient.Connect(ctx, ctxVerifyCamera)
|
||||
if err == nil {
|
||||
|
||||
// Get the streams from the rtsp client.
|
||||
streams, _ := rtspClient.GetStreams()
|
||||
|
||||
videoIdx := -1
|
||||
audioIdx := -1
|
||||
for i, stream := range streams {
|
||||
@@ -575,7 +698,7 @@ func VerifyCamera(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
err := rtspClient.Close()
|
||||
err := rtspClient.Close(ctxVerifyCamera)
|
||||
if err == nil {
|
||||
if videoIdx > -1 {
|
||||
c.JSON(200, models.APIResponse{
|
||||
@@ -634,7 +757,8 @@ func Base64Image(captureDevice *Capture, communication *models.Communication) st
|
||||
var img image.YCbCr
|
||||
img, err = (*rtspClient).DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
imageResized, _ := utils.ResizeImage(&img, 100000)
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
encodedImage = base64.StdEncoding.EncodeToString(bytes)
|
||||
break
|
||||
} else {
|
||||
@@ -695,6 +819,6 @@ func convertPTS(v time.Duration) uint64 {
|
||||
return uint64(v.Milliseconds())
|
||||
}
|
||||
|
||||
func convertPTS2(v int64) uint64 {
|
||||
/*func convertPTS2(v int64) uint64 {
|
||||
return uint64(v) / 100
|
||||
}
|
||||
}*/
|
||||
|
||||
@@ -131,7 +131,7 @@ func HandleUpload(configDirectory string, configuration *models.Configuration, c
|
||||
log.Log.Error("HandleUpload: " + err.Error())
|
||||
}
|
||||
} else {
|
||||
delay = 20 * time.Second // slow down
|
||||
delay = 5 * time.Second // slow down
|
||||
if err != nil {
|
||||
log.Log.Error("HandleUpload: " + err.Error())
|
||||
}
|
||||
@@ -672,6 +672,7 @@ func HandleLiveStreamSD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
// Check if we need to enable the live stream
|
||||
if config.Capture.Liveview != "false" {
|
||||
|
||||
deviceId := config.Key
|
||||
hubKey := ""
|
||||
if config.Cloud == "s3" && config.S3 != nil && config.S3.Publickey != "" {
|
||||
hubKey = config.S3.Publickey
|
||||
@@ -705,25 +706,79 @@ func HandleLiveStreamSD(livestreamCursor *packets.QueueCursor, configuration *mo
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): Sending base64 encoded images to MQTT.")
|
||||
img, err := rtspClient.DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
encoded := base64.StdEncoding.EncodeToString(bytes)
|
||||
imageResized, _ := utils.ResizeImage(&img, 100000)
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["image"] = encoded
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: configuration.Config.Key,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
chunking := config.Capture.LiveviewChunking
|
||||
|
||||
if chunking == "true" {
|
||||
|
||||
// Split encoded image into chunks of 2kb
|
||||
// This is to prevent the MQTT message to be too large.
|
||||
// By default, bytes are not encoded to base64 here; you are splitting the raw JPEG/PNG bytes.
|
||||
// However, in MQTT and web contexts, binary data may not be handled well, so base64 is often used.
|
||||
// To avoid base64 encoding, just send the raw []byte chunks as you do here.
|
||||
// If you want to avoid base64, make sure the receiver can handle binary payloads.
|
||||
|
||||
chunkSize := 25 * 1024 // 25KB chunks
|
||||
var chunks [][]byte
|
||||
for i := 0; i < len(bytes); i += chunkSize {
|
||||
end := i + chunkSize
|
||||
if end > len(bytes) {
|
||||
end = len(bytes)
|
||||
}
|
||||
chunk := bytes[i:end]
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
log.Log.Infof("cloud.HandleLiveStreamSD(): Sending %d chunks of size %d bytes.", len(chunks), chunkSize)
|
||||
|
||||
timestamp := time.Now().Unix()
|
||||
for i, chunk := range chunks {
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["id"] = timestamp
|
||||
valueMap["chunk"] = chunk
|
||||
valueMap["chunkIndex"] = i
|
||||
valueMap["chunkSize"] = chunkSize
|
||||
valueMap["chunkCount"] = len(chunks)
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Version: "v1.0.0",
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: deviceId,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey+"/"+deviceId, 1, false, payload)
|
||||
log.Log.Infof("cloud.HandleLiveStreamSD(): sent chunk %d/%d to MQTT topic kerberos/hub/%s/%s", i+1, len(chunks), hubKey, deviceId)
|
||||
time.Sleep(33 * time.Millisecond) // Sleep to avoid flooding the MQTT broker with messages
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
valueMap["image"] = bytes
|
||||
message := models.Message{
|
||||
Payload: models.Payload{
|
||||
Action: "receive-sd-stream",
|
||||
DeviceId: configuration.Config.Key,
|
||||
Value: valueMap,
|
||||
},
|
||||
}
|
||||
payload, err := models.PackageMQTTMessage(configuration, message)
|
||||
if err == nil {
|
||||
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
|
||||
} else {
|
||||
log.Log.Info("cloud.HandleLiveStreamSD(): something went wrong while sending acknowledge config to hub: " + string(payload))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
time.Sleep(1000 * time.Millisecond) // Sleep to avoid flooding the MQTT broker with messages
|
||||
}
|
||||
|
||||
} else {
|
||||
@@ -810,7 +865,8 @@ func HandleRealtimeProcessing(processingCursor *packets.QueueCursor, configurati
|
||||
log.Log.Info("cloud.RealtimeProcessing(): Sending base64 encoded images to MQTT.")
|
||||
img, err := rtspClient.DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
imageResized, _ := utils.ResizeImage(&img, 100000)
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
encoded := base64.StdEncoding.EncodeToString(bytes)
|
||||
|
||||
valueMap := make(map[string]interface{})
|
||||
|
||||
@@ -3,14 +3,20 @@ package cloud
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
)
|
||||
|
||||
// We will count the number of retries we have done.
|
||||
// If we have done more than "kstorageRetryPolicy" retries, we will stop, and start sending to the secondary storage.
|
||||
var kstorageRetryCount = 0
|
||||
var kstorageRetryTimeout = time.Now().Unix()
|
||||
|
||||
func UploadKerberosVault(configuration *models.Configuration, fileName string) (bool, bool, error) {
|
||||
|
||||
config := configuration.Config
|
||||
@@ -19,7 +25,7 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string) (
|
||||
config.KStorage.SecretAccessKey == "" ||
|
||||
config.KStorage.Directory == "" ||
|
||||
config.KStorage.URI == "" {
|
||||
err := "UploadKerberosVault: Kerberos Vault not properly configured."
|
||||
err := "UploadKerberosVault: Kerberos Vault not properly configured"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
@@ -42,7 +48,7 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string) (
|
||||
defer file.Close()
|
||||
}
|
||||
if err != nil {
|
||||
err := "UploadKerberosVault: Upload Failed, file doesn't exists anymore."
|
||||
err := "UploadKerberosVault: Upload Failed, file doesn't exists anymore"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
@@ -52,76 +58,95 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string) (
|
||||
publicKey = config.HubKey
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", config.KStorage.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault: error reading request, " + config.KStorage.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", config.KStorage.AccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", config.KStorage.SecretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", config.KStorage.Provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorage.Directory)
|
||||
// We need to check if we are in a retry timeout.
|
||||
if kstorageRetryTimeout <= time.Now().Unix() {
|
||||
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
req, err := http.NewRequest("POST", config.KStorage.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault: error reading request, " + config.KStorage.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
req.Header.Set("Content-Type", "video/mp4")
|
||||
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
|
||||
req.Header.Set("X-Kerberos-Storage-AccessKey", config.KStorage.AccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-SecretAccessKey", config.KStorage.SecretAccessKey)
|
||||
req.Header.Set("X-Kerberos-Storage-Provider", config.KStorage.Provider)
|
||||
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
|
||||
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
|
||||
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
|
||||
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorage.Directory)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
var client *http.Client
|
||||
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
} else {
|
||||
client = &http.Client{}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
resp, err := client.Do(req)
|
||||
if resp != nil {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
log.Log.Info("UploadKerberosVault: Upload Finished, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + resp.Status + ", " + string(body))
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if resp != nil {
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
kstorageRetryCount = 0
|
||||
log.Log.Info("UploadKerberosVault: Upload Finished, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
// We increase the retry count, and set the timeout.
|
||||
// If we have reached the retry policy, we set the timeout.
|
||||
// This means we will not retry for the next 5 minutes.
|
||||
if kstorageRetryCount < config.KStorage.MaxRetries {
|
||||
kstorageRetryCount = (kstorageRetryCount + 1)
|
||||
}
|
||||
if kstorageRetryCount == config.KStorage.MaxRetries {
|
||||
kstorageRetryTimeout = time.Now().Add(time.Duration(config.KStorage.Timeout) * time.Second).Unix()
|
||||
}
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + resp.Status + ", " + string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed, " + err.Error())
|
||||
}
|
||||
|
||||
// We might need to check if we can upload to our secondary storage.
|
||||
|
||||
if config.KStorageSecondary.AccessKey == "" ||
|
||||
config.KStorageSecondary.SecretAccessKey == "" ||
|
||||
config.KStorageSecondary.Directory == "" ||
|
||||
config.KStorageSecondary.URI == "" {
|
||||
log.Log.Info("UploadKerberosVault: Secondary Kerberos Vault not properly configured.")
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Secondary Kerberos Vault not properly configured.")
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Uploading to Secondary Kerberos Vault (" + config.KStorageSecondary.URI + ")")
|
||||
|
||||
if kstorageRetryCount < config.KStorage.MaxRetries {
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Do not upload to secondary storage, we are still in retry policy.")
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Uploading to Secondary Kerberos Vault (" + config.KStorageSecondary.URI + ")")
|
||||
|
||||
file, err = os.OpenFile(fullname, os.O_RDWR, 0755)
|
||||
if file != nil {
|
||||
defer file.Close()
|
||||
}
|
||||
if err != nil {
|
||||
err := "UploadKerberosVault: Upload Failed, file doesn't exists anymore."
|
||||
err := "UploadKerberosVault (Secondary): Upload Failed, file doesn't exists anymore"
|
||||
log.Log.Info(err)
|
||||
return false, false, errors.New(err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", config.KStorageSecondary.URI+"/storage", file)
|
||||
if err != nil {
|
||||
errorMessage := "UploadKerberosVault: error reading request, " + config.KStorageSecondary.URI + "/storage: " + err.Error()
|
||||
errorMessage := "UploadKerberosVault (Secondary): error reading request, " + config.KStorageSecondary.URI + "/storage: " + err.Error()
|
||||
log.Log.Error(errorMessage)
|
||||
return false, true, errors.New(errorMessage)
|
||||
}
|
||||
@@ -152,13 +177,13 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string) (
|
||||
|
||||
if err == nil {
|
||||
if resp != nil {
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
if resp.StatusCode == 200 {
|
||||
log.Log.Info("UploadKerberosVault: Upload Finished to secondary, " + resp.Status + ", " + string(body))
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Upload Finished to secondary, " + resp.Status + ", " + string(body))
|
||||
return true, true, nil
|
||||
} else {
|
||||
log.Log.Info("UploadKerberosVault: Upload Failed to secondary, " + resp.Status + ", " + string(body))
|
||||
log.Log.Info("UploadKerberosVault (Secondary): Upload Failed to secondary, " + resp.Status + ", " + string(body))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
mqtt "github.com/eclipse/paho.mqtt.golang"
|
||||
"github.com/gin-gonic/gin"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
"github.com/kerberos-io/agent/machinery/src/capture"
|
||||
"github.com/kerberos-io/agent/machinery/src/cloud"
|
||||
@@ -23,9 +24,15 @@ import (
|
||||
"github.com/tevino/abool"
|
||||
)
|
||||
|
||||
func Bootstrap(configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
var tracer = otel.Tracer("github.com/kerberos-io/agent/machinery/src/components")
|
||||
|
||||
func Bootstrap(ctx context.Context, configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
|
||||
|
||||
log.Log.Debug("components.Kerberos.Bootstrap(): bootstrapping the kerberos agent.")
|
||||
|
||||
bootstrapContext := context.Background()
|
||||
_, span := tracer.Start(bootstrapContext, "Bootstrap")
|
||||
|
||||
// We will keep track of the Kerberos Agent up time
|
||||
// This is send to Kerberos Hub in a heartbeat.
|
||||
uptimeStart := time.Now()
|
||||
@@ -78,6 +85,8 @@ func Bootstrap(configDirectory string, configuration *models.Configuration, comm
|
||||
// Configure a MQTT client which helps for a bi-directional communication
|
||||
mqttClient := routers.ConfigureMQTT(configDirectory, configuration, communication)
|
||||
|
||||
span.End()
|
||||
|
||||
// Run the agent and fire up all the other
|
||||
// goroutines which do image capture, motion detection, onvif, etc.
|
||||
for {
|
||||
@@ -114,6 +123,9 @@ func Bootstrap(configDirectory string, configuration *models.Configuration, comm
|
||||
|
||||
func RunAgent(configDirectory string, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, uptimeStart time.Time, cameraSettings *models.Camera, captureDevice *capture.Capture) string {
|
||||
|
||||
ctx := context.Background()
|
||||
ctxRunAgent, span := tracer.Start(ctx, "RunAgent")
|
||||
|
||||
log.Log.Info("components.Kerberos.RunAgent(): Creating camera and processing threads.")
|
||||
config := configuration.Config
|
||||
|
||||
@@ -124,10 +136,10 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
rtspUrl := config.Capture.IPCamera.RTSP
|
||||
rtspClient := captureDevice.SetMainClient(rtspUrl)
|
||||
if rtspUrl != "" {
|
||||
err := rtspClient.Connect(context.Background())
|
||||
err := rtspClient.Connect(ctx, ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error connecting to RTSP stream: " + err.Error())
|
||||
rtspClient.Close()
|
||||
rtspClient.Close(ctxRunAgent)
|
||||
rtspClient = nil
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
@@ -145,7 +157,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
videoStreams, err := rtspClient.GetVideoStreams()
|
||||
if err != nil || len(videoStreams) == 0 {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): no video stream found, might be the wrong codec (we only support H264 for the moment)")
|
||||
rtspClient.Close()
|
||||
rtspClient.Close(ctxRunAgent)
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
}
|
||||
@@ -161,6 +173,12 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
configuration.Config.Capture.IPCamera.Width = width
|
||||
configuration.Config.Capture.IPCamera.Height = height
|
||||
|
||||
// Set the SPS and PPS values in the configuration.
|
||||
configuration.Config.Capture.IPCamera.SPSNALUs = [][]byte{videoStream.SPS}
|
||||
configuration.Config.Capture.IPCamera.PPSNALUs = [][]byte{videoStream.PPS}
|
||||
configuration.Config.Capture.IPCamera.VPSNALUs = [][]byte{videoStream.VPS}
|
||||
|
||||
// Define queues for the main and sub stream.
|
||||
var queue *packets.Queue
|
||||
var subQueue *packets.Queue
|
||||
|
||||
@@ -182,7 +200,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
rtspSubClient := captureDevice.SetSubClient(subRtspUrl)
|
||||
captureDevice.RTSPSubClient = rtspSubClient
|
||||
|
||||
err := rtspSubClient.Connect(context.Background())
|
||||
err := rtspSubClient.Connect(ctx, ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error connecting to RTSP sub stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
@@ -194,7 +212,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
videoSubStreams, err = rtspSubClient.GetVideoStreams()
|
||||
if err != nil || len(videoSubStreams) == 0 {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): no video sub stream found, might be the wrong codec (we only support H264 for the moment)")
|
||||
rtspSubClient.Close()
|
||||
rtspSubClient.Close(ctxRunAgent)
|
||||
time.Sleep(time.Second * 3)
|
||||
return status
|
||||
}
|
||||
@@ -217,28 +235,28 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
// Set the maximum GOP count, this is used to determine the pre-recording time.
|
||||
log.Log.Info("components.Kerberos.RunAgent(): SetMaxGopCount was set with: " + strconv.Itoa(int(config.Capture.PreRecording)+1))
|
||||
queue.SetMaxGopCount(int(config.Capture.PreRecording) + 1) // GOP time frame is set to prerecording (we'll add 2 gops to leave some room).
|
||||
queue.SetMaxGopCount(1) // We will adjust this later on, when we have the GOP size.
|
||||
queue.WriteHeader(videoStreams)
|
||||
go rtspClient.Start(context.Background(), "main", queue, configuration, communication)
|
||||
go rtspClient.Start(ctx, "main", queue, configuration, communication)
|
||||
|
||||
// Main stream is connected and ready to go.
|
||||
communication.MainStreamConnected = true
|
||||
|
||||
// Try to create backchannel
|
||||
rtspBackChannelClient := captureDevice.SetBackChannelClient(rtspUrl)
|
||||
err = rtspBackChannelClient.ConnectBackChannel(context.Background())
|
||||
err = rtspBackChannelClient.ConnectBackChannel(ctx, ctxRunAgent)
|
||||
if err == nil {
|
||||
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP backchannel stream: " + rtspUrl)
|
||||
go rtspBackChannelClient.StartBackChannel(context.Background())
|
||||
go rtspBackChannelClient.StartBackChannel(ctx, ctxRunAgent)
|
||||
}
|
||||
|
||||
rtspSubClient := captureDevice.RTSPSubClient
|
||||
if subStreamEnabled && rtspSubClient != nil {
|
||||
subQueue = packets.NewQueue()
|
||||
communication.SubQueue = subQueue
|
||||
subQueue.SetMaxGopCount(3) // GOP time frame is set to prerecording (we'll add 2 gops to leave some room).
|
||||
subQueue.SetMaxGopCount(1) // GOP time frame is set to 1 for motion detection and livestreaming.
|
||||
subQueue.WriteHeader(videoSubStreams)
|
||||
go rtspSubClient.Start(context.Background(), "sub", subQueue, configuration, communication)
|
||||
go rtspSubClient.Start(ctx, "sub", subQueue, configuration, communication)
|
||||
|
||||
// Sub stream is connected and ready to go.
|
||||
communication.SubStreamConnected = true
|
||||
@@ -301,6 +319,9 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
// If we reach this point, we have a working RTSP connection.
|
||||
communication.CameraConnected = true
|
||||
|
||||
// Otel end span
|
||||
span.End()
|
||||
|
||||
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
// This will go into a blocking state, once this channel is triggered
|
||||
// the agent will cleanup and restart.
|
||||
@@ -344,7 +365,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
|
||||
time.Sleep(time.Second * 3)
|
||||
|
||||
err = rtspClient.Close()
|
||||
err = rtspClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
@@ -356,7 +377,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
communication.Queue = nil
|
||||
|
||||
if subStreamEnabled {
|
||||
err = rtspSubClient.Close()
|
||||
err = rtspSubClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP sub stream: " + err.Error())
|
||||
time.Sleep(time.Second * 3)
|
||||
@@ -367,7 +388,7 @@ func RunAgent(configDirectory string, configuration *models.Configuration, commu
|
||||
communication.SubQueue = nil
|
||||
}
|
||||
|
||||
err = rtspBackChannelClient.Close()
|
||||
err = rtspBackChannelClient.Close(ctxRunAgent)
|
||||
if err != nil {
|
||||
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP backchannel stream: " + err.Error())
|
||||
}
|
||||
@@ -677,7 +698,8 @@ func GetSnapshotRaw(c *gin.Context, captureDevice *capture.Capture, configuratio
|
||||
image := capture.JpegImage(captureDevice, communication)
|
||||
|
||||
// encode image to jpeg
|
||||
bytes, _ := utils.ImageToBytes(&image)
|
||||
imageResized, _ := utils.ResizeImage(&image, 100000)
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
|
||||
// Return image/jpeg
|
||||
c.Data(200, "image/jpeg", bytes)
|
||||
|
||||
@@ -21,6 +21,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
|
||||
var isPixelChangeThresholdReached = false
|
||||
var changesToReturn = 0
|
||||
var motionRectangle models.MotionRectangle
|
||||
|
||||
pixelThreshold := config.Capture.PixelChangeThreshold
|
||||
// Might not be set in the config file, so set it to 150
|
||||
@@ -132,7 +133,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
if detectMotion {
|
||||
|
||||
// Remember additional information about the result of findmotion
|
||||
isPixelChangeThresholdReached, changesToReturn = FindMotion(imageArray, coordinatesToCheck, pixelThreshold)
|
||||
isPixelChangeThresholdReached, changesToReturn, motionRectangle = FindMotion(imageArray, coordinatesToCheck, pixelThreshold)
|
||||
if isPixelChangeThresholdReached {
|
||||
|
||||
// If offline mode is disabled, send a message to the hub
|
||||
@@ -164,6 +165,7 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
dataToPass := models.MotionDataPartial{
|
||||
Timestamp: time.Now().Unix(),
|
||||
NumberOfChanges: changesToReturn,
|
||||
Rectangle: motionRectangle,
|
||||
}
|
||||
communication.HandleMotion <- dataToPass //Save data to the channel
|
||||
}
|
||||
@@ -185,24 +187,58 @@ func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Conf
|
||||
log.Log.Debug("computervision.main.ProcessMotion(): stop the motion detection.")
|
||||
}
|
||||
|
||||
func FindMotion(imageArray [3]*image.Gray, coordinatesToCheck []int, pixelChangeThreshold int) (thresholdReached bool, changesDetected int) {
|
||||
func FindMotion(imageArray [3]*image.Gray, coordinatesToCheck []int, pixelChangeThreshold int) (thresholdReached bool, changesDetected int, motionRectangle models.MotionRectangle) {
|
||||
image1 := imageArray[0]
|
||||
image2 := imageArray[1]
|
||||
image3 := imageArray[2]
|
||||
threshold := 60
|
||||
changes := AbsDiffBitwiseAndThreshold(image1, image2, image3, threshold, coordinatesToCheck)
|
||||
return changes > pixelChangeThreshold, changes
|
||||
changes, motionRectangle := AbsDiffBitwiseAndThreshold(image1, image2, image3, threshold, coordinatesToCheck)
|
||||
return changes > pixelChangeThreshold, changes, motionRectangle
|
||||
}
|
||||
|
||||
func AbsDiffBitwiseAndThreshold(img1 *image.Gray, img2 *image.Gray, img3 *image.Gray, threshold int, coordinatesToCheck []int) int {
|
||||
func AbsDiffBitwiseAndThreshold(img1 *image.Gray, img2 *image.Gray, img3 *image.Gray, threshold int, coordinatesToCheck []int) (int, models.MotionRectangle) {
|
||||
changes := 0
|
||||
var pixelList [][]int
|
||||
for i := 0; i < len(coordinatesToCheck); i++ {
|
||||
pixel := coordinatesToCheck[i]
|
||||
diff := int(img3.Pix[pixel]) - int(img1.Pix[pixel])
|
||||
diff2 := int(img3.Pix[pixel]) - int(img2.Pix[pixel])
|
||||
if (diff > threshold || diff < -threshold) && (diff2 > threshold || diff2 < -threshold) {
|
||||
changes++
|
||||
// Store the pixel coordinates where the change is detected
|
||||
pixelList = append(pixelList, []int{pixel % img1.Bounds().Dx(), pixel / img1.Bounds().Dx()})
|
||||
}
|
||||
}
|
||||
return changes
|
||||
|
||||
// Calculate rectangle of pixelList (startX, startY, endX, endY)
|
||||
var motionRectangle models.MotionRectangle
|
||||
if len(pixelList) > 0 {
|
||||
startX := pixelList[0][0]
|
||||
startY := pixelList[0][1]
|
||||
endX := startX
|
||||
endY := startY
|
||||
for _, pixel := range pixelList {
|
||||
if pixel[0] < startX {
|
||||
startX = pixel[0]
|
||||
}
|
||||
if pixel[1] < startY {
|
||||
startY = pixel[1]
|
||||
}
|
||||
if pixel[0] > endX {
|
||||
endX = pixel[0]
|
||||
}
|
||||
if pixel[1] > endY {
|
||||
endY = pixel[1]
|
||||
}
|
||||
}
|
||||
log.Log.Debugf("Rectangle of changes detected: startX: %d, startY: %d, endX: %d, endY: %d", startX, startY, endX, endY)
|
||||
motionRectangle = models.MotionRectangle{
|
||||
X: startX,
|
||||
Y: startY,
|
||||
Width: endX - startX,
|
||||
Height: endY - startY,
|
||||
}
|
||||
log.Log.Debugf("Motion rectangle: %+v", motionRectangle)
|
||||
}
|
||||
return changes, motionRectangle
|
||||
}
|
||||
|
||||
@@ -392,6 +392,11 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.MQTTPassword = value
|
||||
break
|
||||
|
||||
/* MQTT chunking of low-resolution images into multiple messages */
|
||||
case "AGENT_CAPTURE_LIVEVIEW_CHUNKING":
|
||||
configuration.Config.Capture.LiveviewChunking = value
|
||||
break
|
||||
|
||||
/* Real-time streaming of keyframes to a MQTT topic */
|
||||
case "AGENT_REALTIME_PROCESSING":
|
||||
configuration.Config.RealtimeProcessing = value
|
||||
@@ -463,6 +468,20 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
configuration.Config.KStorage.Directory = value
|
||||
break
|
||||
|
||||
/* Retry policy and timeout */
|
||||
case "AGENT_KERBEROSVAULT_MAX_RETRIES":
|
||||
maxRetries, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
configuration.Config.KStorage.MaxRetries = maxRetries
|
||||
}
|
||||
break
|
||||
case "AGENT_KERBEROSVAULT_TIMEOUT":
|
||||
timeout, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
configuration.Config.KStorage.Timeout = timeout
|
||||
}
|
||||
break
|
||||
|
||||
/* When storing in a secondary Vault */
|
||||
case "AGENT_KERBEROSVAULT_SECONDARY_URI":
|
||||
configuration.Config.KStorageSecondary.URI = value
|
||||
@@ -505,9 +524,26 @@ func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
|
||||
case "AGENT_ENCRYPTION_SYMMETRIC_KEY":
|
||||
configuration.Config.Encryption.SymmetricKey = value
|
||||
break
|
||||
|
||||
/* When signing is enabled */
|
||||
case "AGENT_SIGNING":
|
||||
configuration.Config.Signing.Enabled = value
|
||||
break
|
||||
case "AGENT_SIGNING_PRIVATE_KEY":
|
||||
signingPrivateKey := strings.ReplaceAll(value, "\\n", "\n")
|
||||
configuration.Config.Signing.PrivateKey = signingPrivateKey
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signing is a new feature, so if empty we set default values.
|
||||
if configuration.Config.Signing == nil || configuration.Config.Signing.PrivateKey == "" {
|
||||
configuration.Config.Signing = &models.Signing{
|
||||
Enabled: "true",
|
||||
PrivateKey: "-----BEGIN PRIVATE KEY-----\nMIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDoSxjyw08lRxF4Yoqmcaewjq3XjB55dMy4tlN5MGLdr8aAPuNR9Mwh3jlh1bDpwQXNgZkHDV/q9bpdPGGi7SQo2xw+rDuo5Y1f3wdzz+iuCTPbzoGFalE+1PZlU5TEtUtlbt7MRc4pxTaLP3u0P3EtW3KnzcUarcJWZJYxzv7gqVNCA/47BN+1ptqjwz3LAlah5yaftEvVjkaANOsafUswbS4VT44XfSlbKgebORCKDuNgQiyhuV5gU+J0TOaqRWwwMAWV0UoScyJLfhHRBCrUwrCUTwqH9jfkB7pgRFsYoZJd4MKMeHJjFSum+QXCBqInSnwu8c2kJChiLMWqJ+mhpTdfUAmSkeUSStfbbcavIPbDABvMgzOcmYMIVXXe57twU0xdu3AqWLtc9kw1BkUgZblM9pSSpYrIDheEyMs2/hiLgXsIaM0nVQtqwrA7rbeEGuPblzA6hvHgwN9K6HaBqdlGSlpYZ0v3SWIMwmxRB+kIojlyuggm8Qa4mqL97GFDGl6gOBGlNUFTBUVEa3EaJ7NJpGobRGsh/9dXzcW4aYmT9WxlzTlIKksI1ro6KdRfuVWfEs4AnG8bVEJmofK8EUrueB9IdXlcJZB49xolnOZPFohtMe/0U7evQOQP3sZnX+KotCsE7OXJvL09oF58JKoqmK9lPp0+pFBU4g6NjQIDAQABAoICAA+RSWph1t+q5R3nxUxFTYMrhv5IjQe2mDxJpF3B409zolC9OHxgGUisobTY3pBqs0DtKbxUeH2A0ehUH/axEosWHcz3cmIbgxHE9kdlJ9B3Lmss6j/uw+PWutu1sgm5phaIFIvuNNRWhPB6yXUwU4sLRat1+Z9vTmIQiKdtLIrtJz/n2VDvrJxn1N+yAsE20fnrksFKyZuxVsJaZPiX/t5Yv1/z0LjFjVoL7GUA5/Si7csN4ftqEhUrkNr2BvcZlTyffrF4lZCXrtl76RNUaxhqIu3H0gFbV2UfBpuckkfAhNRpXJ4iFSxm4nQbk4ojV8+l21RFOBeDN2Z7Ocu6auP5MnzpopR66vmDCmPoid498VGgDzFQEVkOar8WAa4v9h85QgLKrth6FunmaWJUT6OggQD3yY58GSwp5+ARMETMBP2x6Eld+PGgqoJvPT1+l/e9gOw7/SJ+Wz6hRXZAm/eiXMppHtB7sfea5rscNanPjJkK9NvPM0MX9cq/iA6QjXuETkMbubjo+Cxk3ydZiIQmWQDAx/OgxTyHbeRCVhLPcAphX0clykCuHZpI9Mvvj643/LoE0mjTByWJXf/WuGJA8ElHkjSdokVJ7jumz8OZZHfq0+V7+la2opsObeQANHW5MLWrnHlRVzTGV0IRZDXh7h1ptUJ4ubdvw/GJ2NeTAoIBAQD0lXXdjYKWC4uZ4YlgydP8b1CGda9cBV5RcPt7q9Ya1R2E4ieYyohmzltopvdaOXdsTZzhtdzOzKF+2qNcbBKhBTleYZ8GN5RKbo7HwXWpzfCTjseKHOD/QPwvBKXzLVWNtXn1NrLR79Rv0wbkYF6DtoqpEPf5kMs4bx79yW+mz8FUgdEeMjKphx6Jd5RYlTUxS64K6bnK7gjHNCF2cwdxsh4B6EB649GKeNz4JXi+oQBmOcX5ncXnkJrbju+IjtCkQ40HINVNdX7XeEaaw6KGaImVjw61toPUuDaioYUojufayoyXaUJnDbHQ2tNekEpq5iwnenZCbUKWmSeRe7dLAoIBAQDzIscYujsrmPxiTj2prhG0v36NRNP99mShnnJGowiIs+UBS0EMdOmBFa2sC9uFs/VnreQNYPDJdfr7O5VK9kfbH/PSiiKJ+wVebfdAlWkJYH27JN2Kl2l/OsvRVelNvF3BWIYF46qzGxIM0axaz3T2ZAJ9SrUgeAYhak6uyM4fbexEWXxDgPGu6C0jB6IAzmHJnnh+j5+4ZXqjVyUxBYtUsWXF/TXomVcT9jxj7aUmS2/Us0XTVOVNpALqqYcekrzsX/wX0OEi5HkivYXHcNaDHx3NuUf6KdYof5DwPUM76qe+5/kWlSIHP3M6rIFK3pYFUnkHn2E8jNWcO97Aio+HAoIBAA+bcff/TbPxbKkXIUMR3fsfx02tONFwbkJYKVQM9Q6lRsrx+4Dee7HDvUWCUgpp3FsG4NnuVvbDTBLiNMZzBwVLZgvFwvYMmePeBjJs/+sj/xQLamQ/z4O6S91cOJK589mlGPEy2lpXKYExQCFWnPFetp5vPMOqH62sOZgMQJmubDHOTt/UaDM1Mhenj8nPS6OnpqV/oKF4awr7Ip+CW5k/unZ4sZSl8PsbF06mZXwUngfn6+Av1y8dpSQZjONz6ZBx1w/7YmEc/EkXnbnGfhqBlTX7+P5TdTofvyzFjc+2vsjRYANRbjFRSGWBcTd5kaYcpfim8eDvQ+6EO2gnMt0CggEAH2ln1Y8B5AEQ4lZ/avOdP//ZhsDUrqPtnl/NHckkahzrwj4JumVEYbP+SxMBGoYEd4+kvgG/OhfvBBRPlm65G9tF8fZ8vdzbdba5UfO7rUV1GP+LS8OCErjy6imySaPDbR5Vul8Oh7NAor1YCidxUf/bvnovanF3QUvtvHEfCDp4YuA4yLPZBaLjaforePUw9w5tPNSravRZYs74dBvmQ1vj7S9ojpN5B5AxfyuNwaPPX+iFZec69MvywISEe3Ozysof1Kfc3lgsOkvIA9tVK32SqSh93xkWnQbWH+OaUxxe7bAko0FDMzKEXZk53wVg1nEwR8bUljEPy+6EOdXs8wKCAQEAsEOWYMY5m7HkeG2XTTvX7ECmmdGl/c4ZDVwzB4IPxqUG7XfLmtsON8YoKOEUpJoc4ANafLXzmU+esUGbH4Ph22IWgP9jzws7jxaN/Zoku64qrSjgEZFTRIpKyhFk/ImWbS9laBW4l+m0tqTTRqoE0QEJf/2uv/04q65zrA70X9z2+KTrAtqOiRQPWl/IxRe9U4OEeGL+oD+YlXKCDsnJ3rwUIOZgJx0HWZg7K35DKwqs1nVi56FBdljiTRKAjVLRedjgDCSfGS1yUZ3krHzpaPt1qgnT3rdtYcIdbYDr66V2/gEEaz6XMGHuTk/ewjzUJxq9UTVeXOCbkRPXgVJg1w==\n-----END PRIVATE KEY-----",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func SaveConfig(configDirectory string, config models.Config, configuration *models.Configuration, communication *models.Communication) error {
|
||||
|
||||
@@ -118,6 +118,16 @@ func (self *Logging) Info(sentence string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Infof(format string, args ...interface{}) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
gologging.Infof(format, args...)
|
||||
case "logrus":
|
||||
logrus.Infof(format, args...)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Warning(sentence string) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
@@ -138,6 +148,16 @@ func (self *Logging) Debug(sentence string) {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Debugf(format string, args ...interface{}) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
gologging.Debugf(format, args...)
|
||||
case "logrus":
|
||||
logrus.Debugf(format, args...)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (self *Logging) Error(sentence string) {
|
||||
switch self.Logger {
|
||||
case "go-logging":
|
||||
|
||||
@@ -46,6 +46,7 @@ type Config struct {
|
||||
HubSite string `json:"hub_site" bson:"hub_site"`
|
||||
ConditionURI string `json:"condition_uri" bson:"condition_uri"`
|
||||
Encryption *Encryption `json:"encryption,omitempty" bson:"encryption,omitempty"`
|
||||
Signing *Signing `json:"signing,omitempty" bson:"signing,omitempty"`
|
||||
RealtimeProcessing string `json:"realtimeprocessing,omitempty" bson:"realtimeprocessing,omitempty"`
|
||||
RealtimeProcessingTopic string `json:"realtimeprocessing_topic" bson:"realtimeprocessing_topic"`
|
||||
}
|
||||
@@ -61,9 +62,11 @@ type Capture struct {
|
||||
Snapshots string `json:"snapshots,omitempty"`
|
||||
Motion string `json:"motion,omitempty"`
|
||||
Liveview string `json:"liveview,omitempty"`
|
||||
LiveviewChunking string `json:"liveview_chunking,omitempty" bson:"liveview_chunking,omitempty"`
|
||||
Continuous string `json:"continuous,omitempty"`
|
||||
PostRecording int64 `json:"postrecording"`
|
||||
PreRecording int64 `json:"prerecording"`
|
||||
GopSize int `json:"gopsize,omitempty" bson:"gopsize,omitempty"` // GOP size in seconds, used for pre-recording
|
||||
MaxLengthRecording int64 `json:"maxlengthrecording"`
|
||||
TranscodingWebRTC string `json:"transcodingwebrtc"`
|
||||
TranscodingResolution int64 `json:"transcodingresolution"`
|
||||
@@ -76,18 +79,23 @@ type Capture struct {
|
||||
// IPCamera configuration, such as the RTSP url of the IPCamera and the FPS.
|
||||
// Also includes ONVIF integration
|
||||
type IPCamera struct {
|
||||
RTSP string `json:"rtsp"`
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
FPS string `json:"fps"`
|
||||
SubRTSP string `json:"sub_rtsp"`
|
||||
SubWidth int `json:"sub_width"`
|
||||
SubHeight int `json:"sub_height"`
|
||||
SubFPS string `json:"sub_fps"`
|
||||
ONVIF string `json:"onvif,omitempty" bson:"onvif"`
|
||||
ONVIFXAddr string `json:"onvif_xaddr" bson:"onvif_xaddr"`
|
||||
ONVIFUsername string `json:"onvif_username" bson:"onvif_username"`
|
||||
ONVIFPassword string `json:"onvif_password" bson:"onvif_password"`
|
||||
RTSP string `json:"rtsp"`
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
FPS string `json:"fps"`
|
||||
SubRTSP string `json:"sub_rtsp"`
|
||||
SubWidth int `json:"sub_width"`
|
||||
SubHeight int `json:"sub_height"`
|
||||
SubFPS string `json:"sub_fps"`
|
||||
ONVIF string `json:"onvif,omitempty" bson:"onvif"`
|
||||
ONVIFXAddr string `json:"onvif_xaddr" bson:"onvif_xaddr"`
|
||||
ONVIFUsername string `json:"onvif_username" bson:"onvif_username"`
|
||||
ONVIFPassword string `json:"onvif_password" bson:"onvif_password"`
|
||||
SPSNALUs [][]byte `json:"sps_nalus,omitempty" bson:"sps_nalus,omitempty"`
|
||||
PPSNALUs [][]byte `json:"pps_nalus,omitempty" bson:"pps_nalus,omitempty"`
|
||||
VPSNALUs [][]byte `json:"vps_nalus,omitempty" bson:"vps_nalus,omitempty"`
|
||||
SampleRate int `json:"sample_rate,omitempty" bson:"sample_rate,omitempty"`
|
||||
Channels int `json:"channels,omitempty" bson:"channels,omitempty"`
|
||||
}
|
||||
|
||||
// USBCamera configuration, such as the device path (/dev/video*)
|
||||
@@ -159,6 +167,8 @@ type KStorage struct {
|
||||
SecretAccessKey string `json:"secret_access_key,omitempty" bson:"secret_access_key,omitempty"`
|
||||
Provider string `json:"provider,omitempty" bson:"provider,omitempty"`
|
||||
Directory string `json:"directory,omitempty" bson:"directory,omitempty"`
|
||||
MaxRetries int `json:"max_retries,omitempty" bson:"max_retries,omitempty"`
|
||||
Timeout int `json:"timeout,omitempty" bson:"timeout,omitempty"`
|
||||
}
|
||||
|
||||
// Dropbox integration
|
||||
@@ -175,3 +185,9 @@ type Encryption struct {
|
||||
PrivateKey string `json:"private_key" bson:"private_key"`
|
||||
SymmetricKey string `json:"symmetric_key" bson:"symmetric_key"`
|
||||
}
|
||||
|
||||
// Signing
|
||||
type Signing struct {
|
||||
Enabled string `json:"enabled" bson:"enabled"`
|
||||
PrivateKey string `json:"private_key" bson:"private_key"`
|
||||
}
|
||||
|
||||
@@ -132,6 +132,7 @@ type Message struct {
|
||||
// The payload structure which is used to send over
|
||||
// and receive messages from the MQTT broker
|
||||
type Payload struct {
|
||||
Version string `json:"version"` // Version of the message, e.g. "1.0"
|
||||
Action string `json:"action"`
|
||||
DeviceId string `json:"device_id"`
|
||||
Signature string `json:"signature"`
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package models
|
||||
|
||||
type MotionDataPartial struct {
|
||||
Timestamp int64 `json:"timestamp" bson:"timestamp"`
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Timestamp int64 `json:"timestamp" bson:"timestamp"`
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Rectangle MotionRectangle `json:"rectangle" bson:"rectangle"`
|
||||
}
|
||||
|
||||
type MotionDataFull struct {
|
||||
@@ -14,3 +15,10 @@ type MotionDataFull struct {
|
||||
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
|
||||
Token int `json:"token" bson:"token"`
|
||||
}
|
||||
|
||||
type MotionRectangle struct {
|
||||
X int `json:"x" bson:"x"`
|
||||
Y int `json:"y" bson:"y"`
|
||||
Width int `json:"width" bson:"width"`
|
||||
Height int `json:"height" bson:"height"`
|
||||
}
|
||||
|
||||
@@ -17,5 +17,7 @@ type Packet struct {
|
||||
CompositionTime int64 // packet presentation time minus decode time for H264 B-Frame
|
||||
Time int64 // packet decode time
|
||||
TimeLegacy time.Duration
|
||||
CurrentTime int64 // current time in milliseconds (UNIX timestamp)
|
||||
Data []byte // packet data
|
||||
Gopsize int // size of the GOP
|
||||
}
|
||||
|
||||
@@ -45,6 +45,11 @@ func (self *Queue) SetMaxGopCount(n int) {
|
||||
return
|
||||
}
|
||||
|
||||
func (self *Queue) GetMaxGopCount() int {
|
||||
n := self.maxgopcount
|
||||
return n
|
||||
}
|
||||
|
||||
func (self *Queue) WriteHeader(streams []Stream) error {
|
||||
self.lock.Lock()
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package packets
|
||||
|
||||
type Stream struct {
|
||||
// The ID of the stream.
|
||||
Index int `json:"index" bson:"index"`
|
||||
|
||||
// The name of the stream.
|
||||
Name string
|
||||
|
||||
@@ -39,4 +42,13 @@ type Stream struct {
|
||||
|
||||
// IsBackChannel is true if this stream is a back channel.
|
||||
IsBackChannel bool
|
||||
|
||||
// SampleRate is the sample rate of the audio stream.
|
||||
SampleRate int
|
||||
|
||||
// Channels is the number of audio channels.
|
||||
Channels int
|
||||
|
||||
// GopSize is the size of the GOP (Group of Pictures).
|
||||
GopSize int
|
||||
}
|
||||
|
||||
@@ -159,7 +159,8 @@ logreader:
|
||||
var img image.YCbCr
|
||||
img, err = (*rtspClient).DecodePacket(pkt)
|
||||
if err == nil {
|
||||
bytes, _ := utils.ImageToBytes(&img)
|
||||
imageResized, _ := utils.ResizeImage(&img, 100000)
|
||||
bytes, _ := utils.ImageToBytes(imageResized)
|
||||
encodedImage = base64.StdEncoding.EncodeToString(bytes)
|
||||
} else {
|
||||
continue
|
||||
|
||||
@@ -21,9 +21,11 @@ import (
|
||||
"github.com/kerberos-io/agent/machinery/src/encryption"
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
|
||||
"github.com/nfnt/resize"
|
||||
)
|
||||
|
||||
const VERSION = "3.3.5"
|
||||
const VERSION = "3.5.0"
|
||||
|
||||
const letterBytes = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
@@ -401,9 +403,21 @@ func Decrypt(directoryOrFile string, symmetricKey []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func ImageToBytes(img image.Image) ([]byte, error) {
|
||||
func ImageToBytes(img *image.Image) ([]byte, error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
w := bufio.NewWriter(buffer)
|
||||
err := jpeg.Encode(w, img, &jpeg.Options{Quality: 15})
|
||||
err := jpeg.Encode(w, *img, &jpeg.Options{Quality: 35})
|
||||
log.Log.Debug("ImageToBytes() - buffer size: " + strconv.Itoa(buffer.Len()))
|
||||
return buffer.Bytes(), err
|
||||
}
|
||||
|
||||
func ResizeImage(img image.Image, maxSize uint64) (*image.Image, error) {
|
||||
if img == nil {
|
||||
return nil, errors.New("image is nil")
|
||||
}
|
||||
|
||||
// resize to width 640 using Lanczos resampling
|
||||
// and preserve aspect ratio
|
||||
m := resize.Resize(640, 0, img, resize.Lanczos3)
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
992
machinery/src/video/mp4.go
Normal file
992
machinery/src/video/mp4.go
Normal file
@@ -0,0 +1,992 @@
|
||||
package video
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
mp4ff "github.com/Eyevinn/mp4ff/mp4"
|
||||
"github.com/kerberos-io/agent/machinery/src/encryption"
|
||||
"github.com/kerberos-io/agent/machinery/src/log"
|
||||
"github.com/kerberos-io/agent/machinery/src/models"
|
||||
"github.com/kerberos-io/agent/machinery/src/utils"
|
||||
)
|
||||
|
||||
var LastPTS uint64 = 0 // Last PTS for the current segment
|
||||
|
||||
type MP4 struct {
|
||||
// FileName is the name of the file
|
||||
FileName string
|
||||
width int
|
||||
height int
|
||||
Segments []*mp4ff.MediaSegment // List of media segments
|
||||
Segment *mp4ff.MediaSegment
|
||||
MultiTrackFragment *mp4ff.Fragment
|
||||
TrackIDs []uint32
|
||||
FileWriter *os.File
|
||||
Writer *bufio.Writer
|
||||
SegmentCount int
|
||||
SampleCount int
|
||||
StartPTS uint64
|
||||
VideoTotalDuration uint64
|
||||
AudioTotalDuration uint64
|
||||
AudioPTS uint64
|
||||
Start bool
|
||||
SPSNALUs [][]byte // SPS NALUs for H264
|
||||
PPSNALUs [][]byte // PPS NALUs for H264
|
||||
VPSNALUs [][]byte // VPS NALUs for H264
|
||||
FreeBoxSize int64
|
||||
MoofBoxes int64 // Number of moof boxes in the file
|
||||
MoofBoxSizes []int64 // Sizes of each moof box
|
||||
StartTime uint64 // Start time of the MP4 file
|
||||
VideoTrackName string // Name of the video track
|
||||
VideoTrack int // Track ID for the video track
|
||||
AudioTrackName string // Name of the audio track
|
||||
AudioTrack int // Track ID for the audio track
|
||||
VideoFullSample *mp4ff.FullSample // Full sample for video track
|
||||
AudioFullSample *mp4ff.FullSample // Full sample for audio track
|
||||
LastAudioSampleDTS uint64 // Last PTS for audio sample
|
||||
LastVideoSampleDTS uint64 // Last PTS for video sample
|
||||
SampleType string // Type of the sample (e.g., "video", "audio", "subtitle")
|
||||
}
|
||||
|
||||
// NewMP4 creates a new MP4 object
|
||||
func NewMP4(fileName string, spsNALUs [][]byte, ppsNALUs [][]byte, vpsNALUs [][]byte) *MP4 {
|
||||
|
||||
init := mp4ff.NewMP4Init()
|
||||
|
||||
// Add a free box to the init segment
|
||||
// Prepend a free box to the init segment with a size of 1000
|
||||
freeBoxSize := 2048
|
||||
free := mp4ff.NewFreeBox(make([]byte, freeBoxSize))
|
||||
init.AddChild(free)
|
||||
|
||||
// Create a writer
|
||||
ofd, err := os.Create(fileName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create a buffered writer
|
||||
bufferedWriter := bufio.NewWriterSize(ofd, 64*1024) // 64KB buffer
|
||||
|
||||
// We will write the empty init segment to the file
|
||||
// so we can overwrite it later with the actual init segment.
|
||||
err = init.Encode(bufferedWriter)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &MP4{
|
||||
FileName: fileName,
|
||||
StartTime: uint64(time.Now().Unix()),
|
||||
FreeBoxSize: int64(freeBoxSize),
|
||||
FileWriter: ofd,
|
||||
Writer: bufferedWriter,
|
||||
SPSNALUs: spsNALUs,
|
||||
PPSNALUs: ppsNALUs,
|
||||
VPSNALUs: vpsNALUs,
|
||||
}
|
||||
}
|
||||
|
||||
// SetWidth sets the width of the video
|
||||
func (mp4 *MP4) SetWidth(width int) {
|
||||
// Set the width of the video
|
||||
mp4.width = width
|
||||
}
|
||||
|
||||
// SetHeight sets the height of the video
|
||||
func (mp4 *MP4) SetHeight(height int) {
|
||||
// Set the height of the video
|
||||
mp4.height = height
|
||||
}
|
||||
|
||||
// AddVideoTrack
|
||||
// Add a video track to the MP4 file
|
||||
func (mp4 *MP4) AddVideoTrack(codec string) uint32 {
|
||||
nextTrack := uint32(len(mp4.TrackIDs) + 1)
|
||||
mp4.VideoTrack = int(nextTrack)
|
||||
mp4.TrackIDs = append(mp4.TrackIDs, nextTrack)
|
||||
mp4.VideoTrackName = codec
|
||||
return nextTrack
|
||||
}
|
||||
|
||||
// AddAudioTrack
|
||||
// Add an audio track to the MP4 file
|
||||
func (mp4 *MP4) AddAudioTrack(codec string) uint32 {
|
||||
nextTrack := uint32(len(mp4.TrackIDs) + 1)
|
||||
mp4.AudioTrack = int(nextTrack)
|
||||
mp4.TrackIDs = append(mp4.TrackIDs, nextTrack)
|
||||
mp4.AudioTrackName = codec
|
||||
return nextTrack
|
||||
}
|
||||
|
||||
func (mp4 *MP4) AddMediaSegment(segNr int) {
|
||||
}
|
||||
|
||||
func (mp4 *MP4) AddSampleToTrack(trackID uint32, isKeyframe bool, data []byte, pts uint64) error {
|
||||
|
||||
if isKeyframe {
|
||||
|
||||
// Write the segment to the file
|
||||
if mp4.Start {
|
||||
mp4.MoofBoxes = mp4.MoofBoxes + 1
|
||||
mp4.MoofBoxSizes = append(mp4.MoofBoxSizes, int64(mp4.Segment.Size()))
|
||||
err := mp4.Segment.Encode(mp4.Writer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mp4.Segments = append(mp4.Segments, mp4.Segment)
|
||||
}
|
||||
|
||||
mp4.Start = true
|
||||
|
||||
// Increment the segment count
|
||||
mp4.SegmentCount = mp4.SegmentCount + 1
|
||||
|
||||
// Create a new media segment
|
||||
seg := mp4ff.NewMediaSegment()
|
||||
|
||||
// Create a video fragment
|
||||
multiTrackFragment, err := mp4ff.CreateMultiTrackFragment(uint32(mp4.SegmentCount), mp4.TrackIDs) // Assuming 1 for video track and 2 for audio track
|
||||
if err != nil {
|
||||
}
|
||||
mp4.MultiTrackFragment = multiTrackFragment
|
||||
seg.AddFragment(multiTrackFragment)
|
||||
|
||||
// Set to MP4 struct
|
||||
mp4.Segment = seg
|
||||
|
||||
// Set the start PTS for the next segment
|
||||
mp4.StartPTS = pts
|
||||
}
|
||||
|
||||
if mp4.Start {
|
||||
|
||||
if trackID == uint32(mp4.VideoTrack) {
|
||||
|
||||
var lengthPrefixed []byte
|
||||
var err error
|
||||
if mp4.VideoTrackName == "H264" || mp4.VideoTrackName == "AVC1" { // Convert Annex B to length-prefixed NAL units if H264
|
||||
lengthPrefixed, err = annexBToLengthPrefixed(data)
|
||||
} else if mp4.VideoTrackName == "H265" || mp4.VideoTrackName == "HVC1" { // Convert H265 Annex B to length-prefixed NAL units
|
||||
lengthPrefixed, err = annexBToLengthPrefixed(data)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if mp4.VideoFullSample != nil {
|
||||
duration := pts - mp4.VideoFullSample.DecodeTime
|
||||
log.Log.Debug("Adding sample to track " + fmt.Sprintf("%d, PTS: %d, Duration: %d, size: %d, Keyframe: %t", trackID, pts, duration, len(lengthPrefixed), isKeyframe))
|
||||
|
||||
mp4.LastVideoSampleDTS = duration
|
||||
//fmt.Printf("Adding sample to track %d, PTS: %d, Duration: %d, size: %d, Keyframe: %t\n", trackID, pts, duration, len(mp4.VideoFullSample.Data), isKeyframe)
|
||||
mp4.VideoTotalDuration += duration
|
||||
mp4.VideoFullSample.DecodeTime = mp4.VideoTotalDuration - duration
|
||||
mp4.VideoFullSample.Sample.Dur = uint32(duration)
|
||||
err := mp4.MultiTrackFragment.AddFullSampleToTrack(*mp4.VideoFullSample, trackID)
|
||||
if err != nil {
|
||||
//log.Printf("Error adding sample to track %d: %v", trackID, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set the sample data
|
||||
var fullSample mp4ff.FullSample
|
||||
flags := uint32(33554432)
|
||||
if !isKeyframe {
|
||||
flags = uint32(16842752)
|
||||
}
|
||||
fullSample.DecodeTime = pts
|
||||
fullSample.Data = lengthPrefixed
|
||||
fullSample.Sample = mp4ff.Sample{
|
||||
Size: uint32(len(fullSample.Data)),
|
||||
Flags: flags,
|
||||
CompositionTimeOffset: 0, // No composition time offset for video
|
||||
}
|
||||
mp4.VideoFullSample = &fullSample
|
||||
mp4.SampleType = "video"
|
||||
}
|
||||
} else if trackID == uint32(mp4.AudioTrack) {
|
||||
if mp4.AudioFullSample != nil {
|
||||
SplitAACFrame(mp4.AudioFullSample.Data, func(started bool, aac []byte) {
|
||||
sampleToAdd := *mp4.AudioFullSample
|
||||
dts := pts - mp4.AudioFullSample.DecodeTime
|
||||
if pts < mp4.AudioFullSample.DecodeTime {
|
||||
//log.Printf("Warning: PTS %d is less than previous sample's DecodeTime %d, resetting AudioFullSample", pts, mp4.AudioFullSample.DecodeTime)
|
||||
dts = 1
|
||||
}
|
||||
if started {
|
||||
dts = 1
|
||||
}
|
||||
mp4.LastAudioSampleDTS = dts
|
||||
//fmt.Printf("Adding sample to track %d, PTS: %d, Duration: %d, size: %d\n", trackID, pts, dts, len(aac[7:]))
|
||||
mp4.AudioTotalDuration += dts
|
||||
mp4.AudioPTS += dts
|
||||
sampleToAdd.Data = aac[7:] // Remove the ADTS header (first 7 bytes)
|
||||
sampleToAdd.DecodeTime = mp4.AudioPTS - dts
|
||||
sampleToAdd.Sample.Dur = uint32(dts)
|
||||
sampleToAdd.Sample.Size = uint32(len(aac[7:]))
|
||||
err := mp4.MultiTrackFragment.AddFullSampleToTrack(sampleToAdd, trackID)
|
||||
if err != nil {
|
||||
log.Log.Error("mp4.AddSampleToTrack(): error adding sample to track " + fmt.Sprintf("%d: %v", trackID, err))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Set the sample data
|
||||
//flags := uint32(33554432)
|
||||
var fullSample mp4ff.FullSample
|
||||
fullSample.DecodeTime = pts
|
||||
fullSample.Data = data
|
||||
fullSample.Sample = mp4ff.Sample{
|
||||
Size: uint32(len(fullSample.Data)),
|
||||
Flags: 0,
|
||||
CompositionTimeOffset: 0, // No composition time offset for audio
|
||||
}
|
||||
mp4.AudioFullSample = &fullSample
|
||||
mp4.SampleType = "audio"
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mp4 *MP4) Close(config *models.Config) {
|
||||
|
||||
// Add the last sample to the track, we will predict the duration based on the last sample
|
||||
// We are not insert the last sample as we might corrupt playback (as we do not know accurately the next PTS).
|
||||
// In theory it means we will lose the last sample, so there is millisecond dataloss, but it is better than corrupting playback.
|
||||
// We could this by using a delayed packet reader, and look for the next PTS (closest one), but that would require a lot of memory and CPU.
|
||||
|
||||
/*duration := uint64(0)
|
||||
trackID := uint32(1)
|
||||
if mp4.SampleType == "video" {
|
||||
duration = mp4.LastVideoSampleDTS
|
||||
trackID = uint32(mp4.VideoTrack)
|
||||
} else if mp4.SampleType == "audio" {
|
||||
duration = 21 //mp4.LastAudioSampleDTS
|
||||
|
||||
} else {
|
||||
log.Println("mp4.Close(): unknown sample type, cannot calculate duration")
|
||||
}
|
||||
|
||||
if duration > 0 {
|
||||
mp4.VideoTotalDuration += duration
|
||||
mp4.VideoFullSample.DecodeTime = mp4.VideoTotalDuration - duration
|
||||
mp4.VideoFullSample.Sample.Dur = uint32(duration)
|
||||
err := mp4.MultiTrackFragment.AddFullSampleToTrack(*mp4.VideoFullSample, trackID)
|
||||
if err != nil {
|
||||
}
|
||||
mp4.Segments = append(mp4.Segments, mp4.Segment)
|
||||
}*/
|
||||
|
||||
if mp4.VideoTotalDuration == 0 && mp4.AudioTotalDuration == 0 {
|
||||
log.Log.Error("mp4.Close(): no video or audio samples added, cannot create MP4 file")
|
||||
}
|
||||
|
||||
// Encode the last segment
|
||||
if mp4.Segment != nil {
|
||||
err := mp4.Segment.Encode(mp4.Writer)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
mp4.Writer.Flush()
|
||||
defer mp4.FileWriter.Close()
|
||||
|
||||
// Now we have all the moof and mdat boxes written to the file.
|
||||
// We can now generate the ftyp and moov boxes, and replace it with the free box we added earlier (size of 10008 bytes).
|
||||
init := mp4ff.NewMP4Init()
|
||||
|
||||
// Create a new ftyp box
|
||||
majorBrand := "isom"
|
||||
minorVersion := uint32(512)
|
||||
compatibleBrands := []string{"iso2", "avc1", "hvc1", "mp41"}
|
||||
ftyp := mp4ff.NewFtyp(majorBrand, minorVersion, compatibleBrands)
|
||||
init.AddChild(ftyp)
|
||||
|
||||
// Create a new moov box
|
||||
moov := mp4ff.NewMoovBox()
|
||||
init.AddChild(moov)
|
||||
|
||||
// Set the creation time and modification time for the moov box
|
||||
videoTimescale := uint32(1000)
|
||||
audioTimescale := uint32(1000)
|
||||
mvhd := &mp4ff.MvhdBox{
|
||||
Version: 0,
|
||||
Flags: 0,
|
||||
CreationTime: mp4.StartTime,
|
||||
ModificationTime: mp4.StartTime,
|
||||
Timescale: videoTimescale,
|
||||
Duration: mp4.VideoTotalDuration,
|
||||
}
|
||||
init.Moov.AddChild(mvhd)
|
||||
|
||||
// Set the total duration in the moov box
|
||||
mvex := mp4ff.NewMvexBox()
|
||||
mvex.AddChild(&mp4ff.MehdBox{FragmentDuration: int64(mp4.VideoTotalDuration)})
|
||||
init.Moov.AddChild(mvex)
|
||||
|
||||
// Add a track for the video
|
||||
if mp4.VideoTrackName == "H264" || mp4.VideoTrackName == "AVC1" {
|
||||
init.AddEmptyTrack(videoTimescale, "video", "und")
|
||||
includePS := true
|
||||
err := init.Moov.Traks[0].SetAVCDescriptor("avc1", mp4.SPSNALUs, mp4.PPSNALUs, includePS)
|
||||
if err != nil {
|
||||
//panic(err)
|
||||
}
|
||||
init.Moov.Traks[0].Tkhd.Duration = mp4.VideoTotalDuration
|
||||
init.Moov.Traks[0].Mdia.Hdlr.Name = "agent " + utils.VERSION
|
||||
//init.Moov.Traks[0].Mdia.Mdhd.Duration = mp4.VideoTotalDuration
|
||||
} else if mp4.VideoTrackName == "H265" || mp4.VideoTrackName == "HVC1" {
|
||||
init.AddEmptyTrack(videoTimescale, "video", "und")
|
||||
includePS := true
|
||||
err := init.Moov.Traks[0].SetHEVCDescriptor("hvc1", mp4.VPSNALUs, mp4.SPSNALUs, mp4.PPSNALUs, [][]byte{}, includePS)
|
||||
if err != nil {
|
||||
//panic(err)
|
||||
}
|
||||
init.Moov.Traks[0].Tkhd.Duration = mp4.VideoTotalDuration
|
||||
init.Moov.Traks[0].Mdia.Hdlr.Name = "agent " + utils.VERSION
|
||||
//init.Moov.Traks[0].Mdia.Mdhd.Duration = mp4.VideoTotalDuration
|
||||
}
|
||||
|
||||
// Try adding audio track if available
|
||||
if mp4.AudioTrackName == "AAC" || mp4.AudioTrackName == "MP4A" {
|
||||
// Add an audio track to the moov box
|
||||
init.AddEmptyTrack(audioTimescale, "audio", "und")
|
||||
|
||||
// Check if the same sample rate is set, otherwise we default to 48000
|
||||
audioSampleRate := 48000
|
||||
if config.Capture.IPCamera.SampleRate > 0 {
|
||||
audioSampleRate = config.Capture.IPCamera.SampleRate
|
||||
}
|
||||
// Set the audio descriptor
|
||||
err := init.Moov.Traks[1].SetAACDescriptor(29, audioSampleRate)
|
||||
if err != nil {
|
||||
//panic(err)
|
||||
}
|
||||
init.Moov.Traks[1].Tkhd.Duration = mp4.AudioTotalDuration
|
||||
init.Moov.Traks[1].Mdia.Hdlr.Name = "agent " + utils.VERSION
|
||||
//init.Moov.Traks[1].Mdia.Mdhd.Duration = mp4.AudioTotalDuration
|
||||
}
|
||||
|
||||
// Try adding subtitle track if available
|
||||
if mp4.VideoTrackName == "VTT" || mp4.VideoTrackName == "WebVTT" {
|
||||
// Add a subtitle track to the moov box
|
||||
init.AddEmptyTrack(videoTimescale, "subtitle", "und")
|
||||
// Set the subtitle descriptor
|
||||
err := init.Moov.Traks[2].SetWvttDescriptor("")
|
||||
if err != nil {
|
||||
//log.Log.Error("mp4.Close(): error setting VTT descriptor: " + err.Error())
|
||||
//return
|
||||
}
|
||||
init.Moov.Traks[2].Mdia.Hdlr.Name = "agent " + utils.VERSION
|
||||
}
|
||||
|
||||
// We will create a fingerprint that's be encrypted with the public key, so we can verify the integrity of the file later.
|
||||
// The fingerprint will be a UUID box, which is a custom box that we can use to store the fingerprint.
|
||||
// Following fields are included in the fingerprint (UUID):
|
||||
// - Moov.Mvhd.CreationTime (the time the file was created)
|
||||
// - Moov.Mvhd.Duration (the total duration of the video)
|
||||
// - Moov.Trak.Hdlr.Name // (the name of the handler, which is the agent and version)
|
||||
// - len(Moof) // (the number of moof boxes in the file)
|
||||
// - size(Moof1) // (the size of the first moof box)
|
||||
// - size(Moof2) // (the size of the second moof box)
|
||||
// ..
|
||||
//
|
||||
// All attributes of the fingerprint are concatenated into a single string, which is then hashed using SHA-256
|
||||
// and encrypted with the public key.
|
||||
|
||||
fingerprint := fmt.Sprintf("%d", init.Moov.Mvhd.CreationTime) + "_" +
|
||||
fmt.Sprintf("%d", init.Moov.Mvhd.Duration) + "_" +
|
||||
init.Moov.Trak.Mdia.Hdlr.Name + "_" +
|
||||
fmt.Sprintf("%d", mp4.MoofBoxes) + "_" // Number of moof boxes
|
||||
|
||||
for i, size := range mp4.MoofBoxSizes {
|
||||
fingerprint += fmt.Sprintf("%d", size)
|
||||
if i < len(mp4.MoofBoxSizes)-1 {
|
||||
fingerprint += "_"
|
||||
}
|
||||
}
|
||||
// Remove trailing underscore if present
|
||||
if len(fingerprint) > 0 && fingerprint[len(fingerprint)-1] == '_' {
|
||||
fingerprint = fingerprint[:len(fingerprint)-1]
|
||||
}
|
||||
|
||||
// Load the private key from the configuration
|
||||
privateKey := config.Signing.PrivateKey
|
||||
r := strings.NewReader(privateKey)
|
||||
pemBytes, _ := ioutil.ReadAll(r)
|
||||
block, _ := pem.Decode(pemBytes)
|
||||
|
||||
if block == nil {
|
||||
//log.Log.Error("mp4.Close(): error decoding PEM block containing private key")
|
||||
//return
|
||||
} else {
|
||||
// Parse private key
|
||||
b := block.Bytes
|
||||
key, err := x509.ParsePKCS8PrivateKey(b)
|
||||
if err != nil {
|
||||
//log.Log.Error("mp4.Close(): error parsing private key: " + err.Error())
|
||||
//return
|
||||
} else {
|
||||
// Conver key to *rsa.PrivateKey
|
||||
rsaKey, _ := key.(*rsa.PrivateKey)
|
||||
fingerprintBytes := []byte(fingerprint)
|
||||
signature, err := encryption.SignWithPrivateKey(fingerprintBytes, rsaKey)
|
||||
if err == nil && len(signature) > 0 {
|
||||
uuid := &mp4ff.UUIDBox{}
|
||||
uuid.SetUUID("6b0c1f8e-3d2a-4f5b-9c7d-8f1e2b3c4d5e")
|
||||
uuid.UnknownPayload = signature
|
||||
init.Moov.AddChild(uuid)
|
||||
} else {
|
||||
//log.Log.Error("mp4.Close(): error signing fingerprint: " + err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We will also calculate the SIDX box, which is a segment index box that contains information about the segments in the file.
|
||||
// This is useful for seeking in the file, and for streaming the file.
|
||||
/*sidx := &mp4ff.SidxBox{
|
||||
Version: 0,
|
||||
Flags: 0,
|
||||
ReferenceID: 0,
|
||||
Timescale: videoTimescale,
|
||||
EarliestPresentationTime: 0,
|
||||
FirstOffset: 0,
|
||||
SidxRefs: make([]mp4ff.SidxRef, 0),
|
||||
}
|
||||
referenceTrak := init.Moov.Trak
|
||||
trex, ok := init.Moov.Mvex.GetTrex(referenceTrak.Tkhd.TrackID)
|
||||
if !ok {
|
||||
// We have an issue.
|
||||
}
|
||||
|
||||
segDatas, err := findSegmentData(mp4.Segments, referenceTrak, trex)
|
||||
if err != nil {
|
||||
// We have an issue.
|
||||
}
|
||||
fillSidx(sidx, referenceTrak, segDatas, true)
|
||||
|
||||
// Add the SIDX box to the moov box
|
||||
init.AddChild(sidx)*/
|
||||
|
||||
// Get a bit slice writer for the init segment
|
||||
// Get a byte buffer of 10008 bytes to write the init segment
|
||||
buffer := bytes.NewBuffer(make([]byte, 0))
|
||||
init.Encode(buffer)
|
||||
|
||||
// The first 10008 bytes of the file is a free box, so we can read it and replace it with the moov box.
|
||||
// The init box might not be 10008 bytes, so we need to read the first 10008 bytes and then replace it with the moov box.
|
||||
// while the remaining bytes are for a new free box.
|
||||
// Write the init segment at the beginning of the file, replacing the free box
|
||||
if _, err := mp4.FileWriter.WriteAt(buffer.Bytes(), 0); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Calculate the remaining size for the free box
|
||||
remainingSize := mp4.FreeBoxSize - int64(buffer.Len())
|
||||
if remainingSize > 0 {
|
||||
newFreeBox := mp4ff.NewFreeBox(make([]byte, remainingSize))
|
||||
var freeBuf bytes.Buffer
|
||||
if err := newFreeBox.Encode(&freeBuf); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if _, err := mp4.FileWriter.WriteAt(freeBuf.Bytes(), int64(buffer.Len())); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type segData struct {
|
||||
startPos uint64
|
||||
presentationTime uint64
|
||||
baseDecodeTime uint64
|
||||
dur uint32
|
||||
size uint32
|
||||
}
|
||||
|
||||
func fillSidx(sidx *mp4ff.SidxBox, refTrak *mp4ff.TrakBox, segDatas []segData, nonZeroEPT bool) {
|
||||
ept := uint64(0)
|
||||
if nonZeroEPT {
|
||||
ept = segDatas[0].presentationTime
|
||||
}
|
||||
sidx.Version = 1
|
||||
sidx.Timescale = refTrak.Mdia.Mdhd.Timescale
|
||||
sidx.ReferenceID = 1
|
||||
sidx.EarliestPresentationTime = ept
|
||||
sidx.FirstOffset = 0
|
||||
sidx.SidxRefs = make([]mp4ff.SidxRef, 0, len(segDatas))
|
||||
|
||||
for _, segData := range segDatas {
|
||||
size := segData.size
|
||||
sidx.SidxRefs = append(sidx.SidxRefs, mp4ff.SidxRef{
|
||||
ReferencedSize: size,
|
||||
SubSegmentDuration: segData.dur,
|
||||
StartsWithSAP: 1,
|
||||
SAPType: 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// findSegmentData returns a slice of segment media data using a reference track.
|
||||
func findSegmentData(segs []*mp4ff.MediaSegment, refTrak *mp4ff.TrakBox, trex *mp4ff.TrexBox) ([]segData, error) {
|
||||
segDatas := make([]segData, 0, len(segs))
|
||||
for _, seg := range segs {
|
||||
var firstCompositionTimeOffest int64
|
||||
dur := uint32(0)
|
||||
var baseTime uint64
|
||||
for fIdx, frag := range seg.Fragments {
|
||||
for _, traf := range frag.Moof.Trafs {
|
||||
tfhd := traf.Tfhd
|
||||
if tfhd.TrackID == refTrak.Tkhd.TrackID { // Find track that gives sidx time values
|
||||
if fIdx == 0 {
|
||||
baseTime = traf.Tfdt.BaseMediaDecodeTime()
|
||||
}
|
||||
for i, trun := range traf.Truns {
|
||||
trun.AddSampleDefaultValues(tfhd, trex)
|
||||
samples := trun.GetSamples()
|
||||
for j, sample := range samples {
|
||||
if fIdx == 0 && i == 0 && j == 0 {
|
||||
firstCompositionTimeOffest = int64(sample.CompositionTimeOffset)
|
||||
}
|
||||
dur += sample.Dur
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sd := segData{
|
||||
startPos: seg.StartPos,
|
||||
presentationTime: uint64(int64(baseTime) + firstCompositionTimeOffest),
|
||||
baseDecodeTime: baseTime,
|
||||
dur: dur,
|
||||
size: uint32(seg.Size()),
|
||||
}
|
||||
segDatas = append(segDatas, sd)
|
||||
}
|
||||
return segDatas, nil
|
||||
}
|
||||
|
||||
// annexBToLengthPrefixed converts Annex B formatted H264 data (with start codes)
|
||||
// into length-prefixed NAL units (4-byte length before each NAL unit).
|
||||
func annexBToLengthPrefixed(data []byte) ([]byte, error) {
|
||||
var out bytes.Buffer
|
||||
|
||||
// Find start codes and split NAL units
|
||||
nalus := splitNALUs(data)
|
||||
if len(nalus) == 0 {
|
||||
return nil, fmt.Errorf("no NAL units found")
|
||||
}
|
||||
|
||||
for _, nalu := range nalus {
|
||||
// Remove Annex B start codes (0x000001 or 0x00000001) from the beginning of each NALU
|
||||
nalu = removeAnnexBStartCode(nalu)
|
||||
if len(nalu) == 0 {
|
||||
continue
|
||||
}
|
||||
// Write 4-byte big-endian length
|
||||
length := uint32(len(nalu))
|
||||
lenBytes := []byte{
|
||||
byte(length >> 24),
|
||||
byte(length >> 16),
|
||||
byte(length >> 8),
|
||||
byte(length),
|
||||
}
|
||||
out.Write(lenBytes)
|
||||
out.Write(nalu)
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
// removeAnnexBStartCode removes a leading Annex B start code from a NALU if present.
|
||||
func removeAnnexBStartCode(nalu []byte) []byte {
|
||||
if len(nalu) >= 4 && nalu[0] == 0x00 && nalu[1] == 0x00 {
|
||||
if nalu[2] == 0x01 {
|
||||
return nalu[3:]
|
||||
}
|
||||
if nalu[2] == 0x00 && nalu[3] == 0x01 {
|
||||
return nalu[4:]
|
||||
}
|
||||
}
|
||||
return nalu
|
||||
}
|
||||
|
||||
// splitNALUs splits Annex B data into raw NAL units without start codes.
|
||||
func splitNALUs(data []byte) [][]byte {
|
||||
var nalus [][]byte
|
||||
start := 0
|
||||
|
||||
for start < len(data) {
|
||||
// Find next start code (0x000001 or 0x00000001)
|
||||
i := findStartCode(data, start+3)
|
||||
if i < 0 {
|
||||
// Last NALU till end of data
|
||||
nalus = append(nalus, data[start:])
|
||||
break
|
||||
}
|
||||
// NAL unit is between start and i
|
||||
nalus = append(nalus, data[start:i])
|
||||
start = i
|
||||
}
|
||||
|
||||
return nalus
|
||||
}
|
||||
|
||||
// findStartCode returns the index of the next Annex B start code (0x000001 or 0x00000001) after pos, or -1 if none.
|
||||
func findStartCode(data []byte, pos int) int {
|
||||
for i := pos; i+3 < len(data); i++ {
|
||||
if data[i] == 0x00 && data[i+1] == 0x00 {
|
||||
if data[i+2] == 0x01 {
|
||||
return i
|
||||
}
|
||||
if i+3 < len(data) && data[i+2] == 0x00 && data[i+3] == 0x01 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// FindSyncword searches for the AAC syncword (0xFFF0) in the given byte slice starting from the specified offset.
|
||||
func FindSyncword(aac []byte, offset int) int {
|
||||
for i := offset; i < len(aac)-1; i++ {
|
||||
if aac[i] == 0xFF && aac[i+1]&0xF0 == 0xF0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Table 31 – Profiles
|
||||
// index profile
|
||||
// 0 Main profile
|
||||
// 1 Low Complexity profile (LC)
|
||||
// 2 Scalable Sampling Rate profile (SSR)
|
||||
// 3 (reserved)
|
||||
|
||||
type START_CODE_TYPE int
|
||||
|
||||
const (
|
||||
START_CODE_3 START_CODE_TYPE = 3
|
||||
START_CODE_4 START_CODE_TYPE = 4
|
||||
)
|
||||
|
||||
func FindStartCode(nalu []byte, offset int) (int, START_CODE_TYPE) {
|
||||
idx := bytes.Index(nalu[offset:], []byte{0x00, 0x00, 0x01})
|
||||
switch {
|
||||
case idx > 0:
|
||||
if nalu[offset+idx-1] == 0x00 {
|
||||
return offset + idx - 1, START_CODE_4
|
||||
}
|
||||
fallthrough
|
||||
case idx == 0:
|
||||
return offset + idx, START_CODE_3
|
||||
}
|
||||
return -1, START_CODE_3
|
||||
}
|
||||
|
||||
func SplitFrame(frames []byte, onFrame func(nalu []byte) bool) {
|
||||
beg, sc := FindStartCode(frames, 0)
|
||||
for beg >= 0 {
|
||||
end, sc2 := FindStartCode(frames, beg+int(sc))
|
||||
if end == -1 {
|
||||
if onFrame != nil {
|
||||
onFrame(frames[beg+int(sc):])
|
||||
}
|
||||
break
|
||||
}
|
||||
if onFrame != nil && onFrame(frames[beg+int(sc):end]) == false {
|
||||
break
|
||||
}
|
||||
beg = end
|
||||
sc = sc2
|
||||
}
|
||||
}
|
||||
|
||||
func SplitFrameWithStartCode(frames []byte, onFrame func(nalu []byte) bool) {
|
||||
beg, sc := FindStartCode(frames, 0)
|
||||
for beg >= 0 {
|
||||
end, sc2 := FindStartCode(frames, beg+int(sc))
|
||||
if end == -1 {
|
||||
if onFrame != nil && (beg+int(sc)) < len(frames) {
|
||||
onFrame(frames[beg:])
|
||||
}
|
||||
break
|
||||
}
|
||||
if onFrame != nil && (beg+int(sc)) < end && onFrame(frames[beg:end]) == false {
|
||||
break
|
||||
}
|
||||
beg = end
|
||||
sc = sc2
|
||||
}
|
||||
}
|
||||
|
||||
func SplitAACFrame(frames []byte, onFrame func(started bool, aac []byte)) {
|
||||
var adts ADTS_Frame_Header
|
||||
start := FindSyncword(frames, 0)
|
||||
started := false
|
||||
for start >= 0 {
|
||||
adts.Decode(frames[start:])
|
||||
onFrame(started, frames[start:start+int(adts.Variable_Header.Frame_length)])
|
||||
start = FindSyncword(frames, start+int(adts.Variable_Header.Frame_length))
|
||||
started = true
|
||||
}
|
||||
}
|
||||
|
||||
type AAC_PROFILE int
|
||||
|
||||
const (
|
||||
MAIN AAC_PROFILE = iota
|
||||
LC
|
||||
SSR
|
||||
)
|
||||
|
||||
type AAC_SAMPLING_FREQUENCY int
|
||||
|
||||
const (
|
||||
AAC_SAMPLE_96000 AAC_SAMPLING_FREQUENCY = iota
|
||||
AAC_SAMPLE_88200
|
||||
AAC_SAMPLE_64000
|
||||
AAC_SAMPLE_48000
|
||||
AAC_SAMPLE_44100
|
||||
AAC_SAMPLE_32000
|
||||
AAC_SAMPLE_24000
|
||||
AAC_SAMPLE_22050
|
||||
AAC_SAMPLE_16000
|
||||
AAC_SAMPLE_12000
|
||||
AAC_SAMPLE_11025
|
||||
AAC_SAMPLE_8000
|
||||
AAC_SAMPLE_7350
|
||||
)
|
||||
|
||||
var AAC_Sampling_Idx [13]int = [13]int{96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350}
|
||||
|
||||
// Table 4 – Syntax of adts_sequence()
|
||||
// adts_sequence() {
|
||||
// while (nextbits() == syncword) {
|
||||
// adts_frame();
|
||||
// }
|
||||
// }
|
||||
// Table 5 – Syntax of adts_frame()
|
||||
// adts_frame() {
|
||||
// adts_fixed_header();
|
||||
// adts_variable_header();
|
||||
// if (number_of_raw_data_blocks_in_frame == 0) {
|
||||
// adts_error_check();
|
||||
// raw_data_block();
|
||||
// }
|
||||
// else {
|
||||
// adts_header_error_check();
|
||||
// for (i = 0; i <= number_of_raw_data_blocks_in_frame;i++ {
|
||||
// raw_data_block();
|
||||
// adts_raw_data_block_error_check();
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// adts_fixed_header()
|
||||
// {
|
||||
// syncword; 12 bslbf
|
||||
// ID; 1 bslbf
|
||||
// layer; 2 uimsbf
|
||||
// protection_absent; 1 bslbf
|
||||
// profile; 2 uimsbf
|
||||
// sampling_frequency_index; 4 uimsbf
|
||||
// private_bit; 1 bslbf
|
||||
// channel_configuration; 3 uimsbf
|
||||
// original/copy; 1 bslbf
|
||||
// home; 1 bslbf
|
||||
// }
|
||||
|
||||
type ADTS_Fix_Header struct {
|
||||
ID uint8
|
||||
Layer uint8
|
||||
Protection_absent uint8
|
||||
Profile uint8
|
||||
Sampling_frequency_index uint8
|
||||
Private_bit uint8
|
||||
Channel_configuration uint8
|
||||
Originalorcopy uint8
|
||||
Home uint8
|
||||
}
|
||||
|
||||
// adts_variable_header() {
|
||||
// copyright_identification_bit; 1 bslbf
|
||||
// copyright_identification_start; 1 bslbf
|
||||
// frame_length; 13 bslbf
|
||||
// adts_buffer_fullness; 11 bslbf
|
||||
// number_of_raw_data_blocks_in_frame; 2 uimsfb
|
||||
// }
|
||||
|
||||
type ADTS_Variable_Header struct {
|
||||
Copyright_identification_bit uint8
|
||||
copyright_identification_start uint8
|
||||
Frame_length uint16
|
||||
Adts_buffer_fullness uint16
|
||||
Number_of_raw_data_blocks_in_frame uint8
|
||||
}
|
||||
|
||||
type ADTS_Frame_Header struct {
|
||||
Fix_Header ADTS_Fix_Header
|
||||
Variable_Header ADTS_Variable_Header
|
||||
}
|
||||
|
||||
func NewAdtsFrameHeader() *ADTS_Frame_Header {
|
||||
return &ADTS_Frame_Header{
|
||||
Fix_Header: ADTS_Fix_Header{
|
||||
ID: 0,
|
||||
Layer: 0,
|
||||
Protection_absent: 1,
|
||||
Profile: uint8(MAIN),
|
||||
Sampling_frequency_index: uint8(AAC_SAMPLE_44100),
|
||||
Private_bit: 0,
|
||||
Channel_configuration: 0,
|
||||
Originalorcopy: 0,
|
||||
Home: 0,
|
||||
},
|
||||
|
||||
Variable_Header: ADTS_Variable_Header{
|
||||
copyright_identification_start: 0,
|
||||
Copyright_identification_bit: 0,
|
||||
Frame_length: 0,
|
||||
Adts_buffer_fullness: 0,
|
||||
Number_of_raw_data_blocks_in_frame: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (frame *ADTS_Frame_Header) Decode(aac []byte) {
|
||||
_ = aac[6]
|
||||
frame.Fix_Header.ID = aac[1] >> 3
|
||||
frame.Fix_Header.Layer = aac[1] >> 1 & 0x03
|
||||
frame.Fix_Header.Protection_absent = aac[1] & 0x01
|
||||
frame.Fix_Header.Profile = aac[2] >> 6 & 0x03
|
||||
frame.Fix_Header.Sampling_frequency_index = aac[2] >> 2 & 0x0F
|
||||
frame.Fix_Header.Private_bit = aac[2] >> 1 & 0x01
|
||||
frame.Fix_Header.Channel_configuration = (aac[2] & 0x01 << 2) | (aac[3] >> 6)
|
||||
frame.Fix_Header.Originalorcopy = aac[3] >> 5 & 0x01
|
||||
frame.Fix_Header.Home = aac[3] >> 4 & 0x01
|
||||
frame.Variable_Header.Copyright_identification_bit = aac[3] >> 3 & 0x01
|
||||
frame.Variable_Header.copyright_identification_start = aac[3] >> 2 & 0x01
|
||||
frame.Variable_Header.Frame_length = (uint16(aac[3]&0x03) << 11) | (uint16(aac[4]) << 3) | (uint16(aac[5]>>5) & 0x07)
|
||||
frame.Variable_Header.Adts_buffer_fullness = (uint16(aac[5]&0x1F) << 6) | uint16(aac[6]>>2)
|
||||
frame.Variable_Header.Number_of_raw_data_blocks_in_frame = aac[6] & 0x03
|
||||
}
|
||||
|
||||
func (frame *ADTS_Frame_Header) Encode() []byte {
|
||||
var hdr []byte
|
||||
if frame.Fix_Header.Protection_absent == 1 {
|
||||
hdr = make([]byte, 7)
|
||||
} else {
|
||||
hdr = make([]byte, 9)
|
||||
}
|
||||
hdr[0] = 0xFF
|
||||
hdr[1] = 0xF0
|
||||
hdr[1] = hdr[1] | (frame.Fix_Header.ID << 3) | (frame.Fix_Header.Layer << 1) | frame.Fix_Header.Protection_absent
|
||||
hdr[2] = frame.Fix_Header.Profile<<6 | frame.Fix_Header.Sampling_frequency_index<<2 | frame.Fix_Header.Private_bit<<1 | frame.Fix_Header.Channel_configuration>>2
|
||||
hdr[3] = frame.Fix_Header.Channel_configuration<<6 | frame.Fix_Header.Originalorcopy<<5 | frame.Fix_Header.Home<<4
|
||||
hdr[3] = hdr[3] | frame.Variable_Header.copyright_identification_start<<3 | frame.Variable_Header.Copyright_identification_bit<<2 | byte(frame.Variable_Header.Frame_length<<11)
|
||||
hdr[4] = byte(frame.Variable_Header.Frame_length >> 3)
|
||||
hdr[5] = byte((frame.Variable_Header.Frame_length&0x07)<<5) | byte(frame.Variable_Header.Adts_buffer_fullness>>3)
|
||||
hdr[6] = byte(frame.Variable_Header.Adts_buffer_fullness&0x3F<<2) | frame.Variable_Header.Number_of_raw_data_blocks_in_frame
|
||||
return hdr
|
||||
}
|
||||
|
||||
func SampleToAACSampleIndex(sampling int) int {
|
||||
for i, v := range AAC_Sampling_Idx {
|
||||
if v == sampling {
|
||||
return i
|
||||
}
|
||||
}
|
||||
panic("not Found AAC Sample Index")
|
||||
}
|
||||
|
||||
func AACSampleIdxToSample(idx int) int {
|
||||
return AAC_Sampling_Idx[idx]
|
||||
}
|
||||
|
||||
// +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
// | audio object type(5 bits) | sampling frequency index(4 bits) | channel configuration(4 bits) | GA framelength flag(1 bits) | GA Depends on core coder(1 bits) | GA Extension Flag(1 bits) |
|
||||
// +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
|
||||
|
||||
type AudioSpecificConfiguration struct {
|
||||
Audio_object_type uint8
|
||||
Sample_freq_index uint8
|
||||
Channel_configuration uint8
|
||||
GA_framelength_flag uint8
|
||||
GA_depends_on_core_coder uint8
|
||||
GA_extension_flag uint8
|
||||
}
|
||||
|
||||
func NewAudioSpecificConfiguration() *AudioSpecificConfiguration {
|
||||
return &AudioSpecificConfiguration{
|
||||
Audio_object_type: 0,
|
||||
Sample_freq_index: 0,
|
||||
Channel_configuration: 0,
|
||||
GA_framelength_flag: 0,
|
||||
GA_depends_on_core_coder: 0,
|
||||
GA_extension_flag: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (asc *AudioSpecificConfiguration) Encode() []byte {
|
||||
buf := make([]byte, 2)
|
||||
buf[0] = (asc.Audio_object_type & 0x1f << 3) | (asc.Sample_freq_index & 0x0F >> 1)
|
||||
buf[1] = (asc.Sample_freq_index & 0x0F << 7) | (asc.Channel_configuration & 0x0F << 3) | (asc.GA_framelength_flag & 0x01 << 2) | (asc.GA_depends_on_core_coder & 0x01 << 1) | (asc.GA_extension_flag & 0x01)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (asc *AudioSpecificConfiguration) Decode(buf []byte) error {
|
||||
|
||||
if len(buf) < 2 {
|
||||
return errors.New("len of buf < 2 ")
|
||||
}
|
||||
|
||||
asc.Audio_object_type = buf[0] >> 3
|
||||
asc.Sample_freq_index = (buf[0] & 0x07 << 1) | (buf[1] >> 7)
|
||||
asc.Channel_configuration = buf[1] >> 3 & 0x0F
|
||||
asc.GA_framelength_flag = buf[1] >> 2 & 0x01
|
||||
asc.GA_depends_on_core_coder = buf[1] >> 1 & 0x01
|
||||
asc.GA_extension_flag = buf[1] & 0x01
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertADTSToASC(frame []byte) (*AudioSpecificConfiguration, error) {
|
||||
if len(frame) < 7 {
|
||||
return nil, errors.New("len of frame < 7")
|
||||
}
|
||||
adts := NewAdtsFrameHeader()
|
||||
adts.Decode(frame)
|
||||
asc := NewAudioSpecificConfiguration()
|
||||
asc.Audio_object_type = adts.Fix_Header.Profile + 1
|
||||
asc.Channel_configuration = adts.Fix_Header.Channel_configuration
|
||||
asc.Sample_freq_index = adts.Fix_Header.Sampling_frequency_index
|
||||
return asc, nil
|
||||
}
|
||||
|
||||
func ConvertASCToADTS(asc []byte, aacbytes int) (*ADTS_Frame_Header, error) {
|
||||
aac_asc := NewAudioSpecificConfiguration()
|
||||
err := aac_asc.Decode(asc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
aac_adts := NewAdtsFrameHeader()
|
||||
aac_adts.Fix_Header.Profile = aac_asc.Audio_object_type - 1
|
||||
aac_adts.Fix_Header.Channel_configuration = aac_asc.Channel_configuration
|
||||
aac_adts.Fix_Header.Sampling_frequency_index = aac_asc.Sample_freq_index
|
||||
aac_adts.Fix_Header.Protection_absent = 1
|
||||
aac_adts.Variable_Header.Adts_buffer_fullness = 0x3F
|
||||
aac_adts.Variable_Header.Frame_length = uint16(aacbytes)
|
||||
return aac_adts, nil
|
||||
}
|
||||
@@ -281,7 +281,8 @@ func InitializeWebRTCConnection(configuration *models.Configuration, communicati
|
||||
candateBinary, err := json.Marshal(candateJSON)
|
||||
if err == nil {
|
||||
valueMap["candidate"] = string(candateBinary)
|
||||
valueMap["sdp"] = []byte(base64.StdEncoding.EncodeToString([]byte(answer.SDP)))
|
||||
// SDP is not needed to be send..
|
||||
//valueMap["sdp"] = []byte(base64.StdEncoding.EncodeToString([]byte(answer.SDP)))
|
||||
valueMap["session_id"] = handshake.SessionID
|
||||
} else {
|
||||
log.Log.Info("webrtc.main.InitializeWebRTCConnection(): something went wrong while marshalling candidate: " + err.Error())
|
||||
@@ -394,8 +395,8 @@ func WriteToTrack(livestreamCursor *packets.QueueCursor, configuration *models.C
|
||||
|
||||
var cursorError error
|
||||
var pkt packets.Packet
|
||||
var previousTimeVideo int64
|
||||
var previousTimeAudio int64
|
||||
var lastAudioSample *pionMedia.Sample = nil
|
||||
var lastVideoSample *pionMedia.Sample = nil
|
||||
|
||||
start := false
|
||||
receivedKeyFrame := false
|
||||
@@ -453,25 +454,27 @@ func WriteToTrack(livestreamCursor *packets.QueueCursor, configuration *models.C
|
||||
|
||||
if pkt.IsVideo {
|
||||
|
||||
// Calculate the difference
|
||||
bufferDuration := pkt.Time - previousTimeVideo
|
||||
previousTimeVideo = pkt.Time
|
||||
|
||||
// Start at the first keyframe
|
||||
if pkt.IsKeyFrame {
|
||||
start = true
|
||||
}
|
||||
if start {
|
||||
bufferDurationCasted := time.Duration(bufferDuration) * time.Millisecond
|
||||
sample := pionMedia.Sample{Data: pkt.Data, Duration: bufferDurationCasted, PacketTimestamp: uint32(pkt.Time)}
|
||||
sample := pionMedia.Sample{Data: pkt.Data, PacketTimestamp: uint32(pkt.Time)}
|
||||
//sample = pionMedia.Sample{Data: pkt.Data, Duration: time.Second}
|
||||
if config.Capture.ForwardWebRTC == "true" {
|
||||
// We will send the video to a remote peer
|
||||
// TODO..
|
||||
} else {
|
||||
if err := videoTrack.WriteSample(sample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
if lastVideoSample != nil {
|
||||
duration := sample.PacketTimestamp - lastVideoSample.PacketTimestamp
|
||||
bufferDurationCasted := time.Duration(duration) * time.Millisecond
|
||||
lastVideoSample.Duration = bufferDurationCasted
|
||||
if err := videoTrack.WriteSample(*lastVideoSample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
lastVideoSample = &sample
|
||||
}
|
||||
}
|
||||
} else if pkt.IsAudio {
|
||||
@@ -481,23 +484,27 @@ func WriteToTrack(livestreamCursor *packets.QueueCursor, configuration *models.C
|
||||
// If PCM_MULAW we can send it directly.
|
||||
|
||||
if hasAAC {
|
||||
// We will transcode the audio
|
||||
// We will transcode the audio from AAC to PCM_MULAW
|
||||
// Not sure how to do this yet, but we need to use a decoder
|
||||
// and then encode it to PCM_MULAW.
|
||||
// TODO..
|
||||
//d := fdkaac.NewAacDecoder()
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate the difference
|
||||
bufferDuration := pkt.Time - previousTimeAudio
|
||||
previousTimeAudio = pkt.Time
|
||||
|
||||
// We will send the audio
|
||||
bufferDurationCasted := time.Duration(bufferDuration) * time.Millisecond
|
||||
sample := pionMedia.Sample{Data: pkt.Data, Duration: bufferDurationCasted, PacketTimestamp: uint32(pkt.Time)}
|
||||
//sample = pionMedia.Sample{Data: pkt.Data, Duration: time.Second}
|
||||
if err := audioTrack.WriteSample(sample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
sample := pionMedia.Sample{Data: pkt.Data, PacketTimestamp: uint32(pkt.Time)}
|
||||
|
||||
if lastAudioSample != nil {
|
||||
duration := sample.PacketTimestamp - lastAudioSample.PacketTimestamp
|
||||
bufferDurationCasted := time.Duration(duration) * time.Millisecond
|
||||
lastAudioSample.Duration = bufferDurationCasted
|
||||
if err := audioTrack.WriteSample(*lastAudioSample); err != nil && err != io.ErrClosedPipe {
|
||||
log.Log.Error("webrtc.main.WriteToTrack(): something went wrong while writing sample: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
lastAudioSample = &sample
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
},
|
||||
"navigation": {
|
||||
"profile": "Profile",
|
||||
"admin": "admin",
|
||||
"admin": "Admin",
|
||||
"management": "Management",
|
||||
"dashboard": "Dashboard",
|
||||
"recordings": "Recordings",
|
||||
@@ -32,11 +32,11 @@
|
||||
"latest_events": "Latest events",
|
||||
"configure_connection": "Configure connection",
|
||||
"no_events": "No events",
|
||||
"no_events_description": "No recordings where found, make sure your Kerberos Agent is properly configured.",
|
||||
"no_events_description": "No recordings were found, make sure your Agent is properly configured.",
|
||||
"motion_detected": "Motion was detected",
|
||||
"live_view": "Live view",
|
||||
"loading_live_view": "Loading live view",
|
||||
"loading_live_view_description": "Hold on we are loading your live view here. If you didn't configure your camera connection, update it on the settings pages.",
|
||||
"loading_live_view_description": "Hold on, we are loading your live view here. If you didn't configure your camera connection, update it on the settings pages.",
|
||||
"time": "Time",
|
||||
"description": "Description",
|
||||
"name": "Name"
|
||||
@@ -59,32 +59,32 @@
|
||||
"persistence": "Persistence"
|
||||
},
|
||||
"info": {
|
||||
"kerberos_hub_demo": "Have a look at our Kerberos Hub demo environment, to see Kerberos Hub in action!",
|
||||
"configuration_updated_success": "Your configuration have been updated successfully.",
|
||||
"kerberos_hub_demo": "Have a look at our Hub demo environment, to see Hub in action!",
|
||||
"configuration_updated_success": "Your configuration has been updated successfully.",
|
||||
"configuration_updated_error": "Something went wrong while saving.",
|
||||
"verify_hub": "Verifying your Kerberos Hub settings.",
|
||||
"verify_hub_success": "Kerberos Hub settings are successfully verified.",
|
||||
"verify_hub_error": "Something went wrong while verifying Kerberos Hub",
|
||||
"verify_hub": "Verifying your Hub settings.",
|
||||
"verify_hub_success": "Hub settings are successfully verified.",
|
||||
"verify_hub_error": "Something went wrong while verifying Hub.",
|
||||
"verify_persistence": "Verifying your persistence settings.",
|
||||
"verify_persistence_success": "Persistence settings are successfully verified.",
|
||||
"verify_persistence_error": "Something went wrong while verifying the persistence",
|
||||
"verify_persistence_error": "Something went wrong while verifying the persistence.",
|
||||
"verify_camera": "Verifying your camera settings.",
|
||||
"verify_camera_success": "Camera settings are successfully verified.",
|
||||
"verify_camera_error": "Something went wrong while verifying the camera settings",
|
||||
"verify_camera_error": "Something went wrong while verifying the camera settings.",
|
||||
"verify_onvif": "Verifying your ONVIF settings.",
|
||||
"verify_onvif_success": "ONVIF settings are successfully verified.",
|
||||
"verify_onvif_error": "Something went wrong while verifying the ONVIF settings"
|
||||
"verify_onvif_error": "Something went wrong while verifying the ONVIF settings."
|
||||
},
|
||||
"overview": {
|
||||
"general": "General",
|
||||
"description_general": "General settings for your Kerberos Agent",
|
||||
"description_general": "General settings for your Agent",
|
||||
"key": "Key",
|
||||
"camera_name": "Camera name",
|
||||
"camera_friendly_name": "Friendly name",
|
||||
"timezone": "Timezone",
|
||||
"select_timezone": "Select a timezone",
|
||||
"advanced_configuration": "Advanced configuration",
|
||||
"description_advanced_configuration": "Detailed configuration options to enable or disable specific parts of the Kerberos Agent",
|
||||
"description_advanced_configuration": "Detailed configuration options to enable or disable specific parts of the Agent",
|
||||
"offline_mode": "Offline mode",
|
||||
"description_offline_mode": "Disable all outgoing traffic",
|
||||
"encryption": "Encryption",
|
||||
@@ -101,9 +101,9 @@
|
||||
"camera": "Camera",
|
||||
"description_camera": "Camera settings are required to make a connection to your camera of choice.",
|
||||
"only_h264": "Currently only H264/H265 RTSP streams are supported.",
|
||||
"rtsp_url": "RTSP url",
|
||||
"rtsp_url": "RTSP URL",
|
||||
"rtsp_h264": "A H264/H265 RTSP connection to your camera.",
|
||||
"sub_rtsp_url": "Sub RTSP url (used for livestreaming)",
|
||||
"sub_rtsp_url": "Sub RTSP URL (used for livestreaming)",
|
||||
"sub_rtsp_h264": "A secondary RTSP connection to the low resolution of your camera.",
|
||||
"onvif": "ONVIF",
|
||||
"description_onvif": "Credentials to communicate with ONVIF capabilities. These are used for PTZ or other capabilities provided by the camera.",
|
||||
@@ -115,28 +115,28 @@
|
||||
},
|
||||
"recording": {
|
||||
"recording": "Recording",
|
||||
"description_recording": "Specify how you would like to make recordings. Having a continuous 24/7 setup or a motion based recording.",
|
||||
"description_recording": "Specify how you would like to make recordings. Having a continuous 24/7 setup or a motion-based recording.",
|
||||
"continuous_recording": "Continuous recording",
|
||||
"description_continuous_recording": "Make 24/7 or motion based recordings.",
|
||||
"max_duration": "max video duration (seconds)",
|
||||
"description_continuous_recording": "Make 24/7 or motion-based recordings.",
|
||||
"max_duration": "Max video duration (seconds)",
|
||||
"description_max_duration": "The maximum duration of a recording.",
|
||||
"pre_recording": "pre recording (key frames buffered)",
|
||||
"pre_recording": "Pre recording (key frames buffered)",
|
||||
"description_pre_recording": "Seconds before an event occurred.",
|
||||
"post_recording": "post recording (seconds)",
|
||||
"post_recording": "Post recording (seconds)",
|
||||
"description_post_recording": "Seconds after an event occurred.",
|
||||
"threshold": "Recording threshold (pixels)",
|
||||
"description_threshold": "The number of pixels changed to record",
|
||||
"description_threshold": "The number of pixels changed to record.",
|
||||
"autoclean": "Auto clean",
|
||||
"description_autoclean": "Specify if the Kerberos Agent can cleanup recordings when a specific storage capacity (MB) is reached. This will remove the oldest recordings when the capacity is reached.",
|
||||
"description_autoclean": "Specify if the Agent can clean up recordings when a specific storage capacity (MB) is reached. This will remove the oldest recordings when the capacity is reached.",
|
||||
"autoclean_enable": "Enable auto clean",
|
||||
"autoclean_description_enable": "Remove oldest recording when capacity reached.",
|
||||
"autoclean_max_directory_size": "Maximum directory size (MB)",
|
||||
"autoclean_description_max_directory_size": "The maximum MB's of recordings stored.",
|
||||
"autoclean_description_max_directory_size": "The maximum MBs of recordings stored.",
|
||||
"fragmentedrecordings": "Fragmented recordings",
|
||||
"description_fragmentedrecordings": "When recordings are fragmented they are suitable for an HLS stream. When turned on the MP4 container will look a bit different.",
|
||||
"description_fragmentedrecordings": "When recordings are fragmented they are suitable for an HLS stream. When turned on, the MP4 container will look a bit different.",
|
||||
"fragmentedrecordings_enable": "Enable fragmentation",
|
||||
"fragmentedrecordings_description_enable": "Fragmented recordings are required for HLS.",
|
||||
"fragmentedrecordings_duration": "fragment duration",
|
||||
"fragmentedrecordings_duration": "Fragment duration",
|
||||
"fragmentedrecordings_description_duration": "Duration of a single fragment."
|
||||
},
|
||||
"streaming": {
|
||||
@@ -149,16 +149,16 @@
|
||||
"force_turn": "Force TURN",
|
||||
"force_turn_description": "Force TURN usage, even when STUN is available.",
|
||||
"stun_turn_forward": "Forwarding and transcoding",
|
||||
"stun_turn_description_forward": "Optimisations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_description_forward": "Optimizations and enhancements for TURN/STUN communication.",
|
||||
"stun_turn_webrtc": "Forwarding to WebRTC broker",
|
||||
"stun_turn_description_webrtc": "Forward h264 stream through MQTT",
|
||||
"stun_turn_description_webrtc": "Forward H264 stream through MQTT",
|
||||
"stun_turn_transcode": "Transcode stream",
|
||||
"stun_turn_description_transcode": "Convert stream to a lower resolution",
|
||||
"stun_turn_downscale": "Downscale resolution (in % of original resolution)",
|
||||
"mqtt": "MQTT",
|
||||
"description_mqtt": "A MQTT broker is used to communicate from",
|
||||
"description2_mqtt": "to the Kerberos Agent, to achieve for example livestreaming or ONVIF (PTZ) capabilities.",
|
||||
"mqtt_brokeruri": "Broker Uri",
|
||||
"description_mqtt": "An MQTT broker is used to communicate from",
|
||||
"description2_mqtt": "to the Agent, to achieve for example livestreaming or ONVIF (PTZ) capabilities.",
|
||||
"mqtt_brokeruri": "Broker URI",
|
||||
"mqtt_username": "Username",
|
||||
"mqtt_password": "Password",
|
||||
"realtimeprocessing": "Realtime Processing",
|
||||
@@ -180,57 +180,61 @@
|
||||
"friday": "Friday",
|
||||
"saturday": "Saturday",
|
||||
"externalcondition": "External Condition",
|
||||
"description_externalcondition": "Depending on an external webservice recording can be enabled or disabled.",
|
||||
"description_externalcondition": "Depending on an external web service, recording can be enabled or disabled.",
|
||||
"regionofinterest": "Region Of Interest",
|
||||
"description_regionofinterest": "By defining one or more regions, motion will be tracked only in the regions you have defined."
|
||||
},
|
||||
"persistence": {
|
||||
"kerberoshub": "Kerberos Hub",
|
||||
"description_kerberoshub": "Kerberos Agents can send heartbeats to a central",
|
||||
"description2_kerberoshub": "installation. Heartbeats and other relevant information are synced to Kerberos Hub to show realtime information about your video landscape.",
|
||||
"kerberoshub": "Hub",
|
||||
"description_kerberoshub": "Agents can send heartbeats to a central",
|
||||
"description2_kerberoshub": "installation. Heartbeats and other relevant information are synced to Hub to show realtime information about your video landscape.",
|
||||
"persistence": "Persistence",
|
||||
"secondary_persistence": "Secondary Persistence",
|
||||
"description_secondary_persistence": "Recordings will be sent to secondary persistence if the primary persistence is unavailable or fails. This can be useful for failover purposes.",
|
||||
"saasoffering": "Kerberos Hub (SAAS offering)",
|
||||
"saasoffering": "Hub (SaaS offering)",
|
||||
"description_persistence": "Having the ability to store your recordings is the beginning of everything. You can choose between our",
|
||||
"description2_persistence": ", or a 3rd party provider",
|
||||
"select_persistence": "Select a persistence",
|
||||
"kerberoshub_encryption": "Encryption",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Kerberos Hub will encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Kerberos Hub Proxy URL",
|
||||
"kerberoshub_encryption_description": "All traffic from/to Hub will be encrypted using AES-256.",
|
||||
"kerberoshub_proxyurl": "Hub Proxy URL",
|
||||
"kerberoshub_description_proxyurl": "The Proxy endpoint for uploading your recordings.",
|
||||
"kerberoshub_apiurl": "Kerberos Hub API URL",
|
||||
"kerberoshub_apiurl": "Hub API URL",
|
||||
"kerberoshub_description_apiurl": "The API endpoint for uploading your recordings.",
|
||||
"kerberoshub_publickey": "Public key",
|
||||
"kerberoshub_description_publickey": "The public key granted to your Kerberos Hub account.",
|
||||
"kerberoshub_description_publickey": "The public key granted to your Hub account.",
|
||||
"kerberoshub_privatekey": "Private key",
|
||||
"kerberoshub_description_privatekey": "The private key granted to your Kerberos Hub account.",
|
||||
"kerberoshub_description_privatekey": "The private key granted to your Hub account.",
|
||||
"kerberoshub_site": "Site",
|
||||
"kerberoshub_description_site": "The site ID the Kerberos Agents are belonging to in Kerberos Hub.",
|
||||
"kerberoshub_description_site": "The site ID the Agents belong to in Hub.",
|
||||
"kerberoshub_region": "Region",
|
||||
"kerberoshub_description_region": "The region we are storing our recordings in.",
|
||||
"kerberoshub_bucket": "Bucket",
|
||||
"kerberoshub_description_bucket": "The bucket we are storing our recordings in.",
|
||||
"kerberoshub_username": "Username/Directory (should match Kerberos Hub username)",
|
||||
"kerberoshub_description_username": "The username of your Kerberos Hub account.",
|
||||
"kerberosvault_apiurl": "Kerberos Vault API URL",
|
||||
"kerberosvault_description_apiurl": "The Kerberos Vault API",
|
||||
"kerberoshub_username": "Username/Directory (should match Hub username)",
|
||||
"kerberoshub_description_username": "The username of your Hub account.",
|
||||
"kerberosvault_apiurl": "Vault API URL",
|
||||
"kerberosvault_description_apiurl": "The Vault API",
|
||||
"kerberosvault_provider": "Provider",
|
||||
"kerberosvault_description_provider": "The provider to which your recordings will be send.",
|
||||
"kerberosvault_directory": "Directory (should match Kerberos Hub username)",
|
||||
"kerberosvault_description_directory": "Sub directory the recordings will be stored in your provider.",
|
||||
"kerberosvault_description_provider": "The provider to which your recordings will be sent.",
|
||||
"kerberosvault_directory": "Directory (should match Hub username)",
|
||||
"kerberosvault_description_directory": "Subdirectory the recordings will be stored in your provider.",
|
||||
"kerberosvault_accesskey": "Access key",
|
||||
"kerberosvault_description_accesskey": "The access key of your Kerberos Vault account.",
|
||||
"kerberosvault_description_accesskey": "The access key of your Vault account.",
|
||||
"kerberosvault_secretkey": "Secret key",
|
||||
"kerberosvault_description_secretkey": "The secret key of your Kerberos Vault account.",
|
||||
"kerberosvault_description_secretkey": "The secret key of your Vault account.",
|
||||
"kerberosvault_maxretries": "Max retries",
|
||||
"kerberosvault_description_maxretries": "The maximum number of retries to upload a recording.",
|
||||
"kerberosvault_timeout": "Timeout",
|
||||
"kerberosvault_description_timeout": "If a timeout occurs, recordings will be sent directly to the secondary Vault.",
|
||||
"dropbox_directory": "Directory",
|
||||
"dropbox_description_directory": "The sub directory where the recordings will be stored in your Dropbox account.",
|
||||
"dropbox_description_directory": "The subdirectory where the recordings will be stored in your Dropbox account.",
|
||||
"dropbox_accesstoken": "Access token",
|
||||
"dropbox_description_accesstoken": "The access token of your Dropbox account/app.",
|
||||
"verify_connection": "Verify Connection",
|
||||
"remove_after_upload": "Once recordings are uploaded to some persistence, you might want to remove them from the local Kerberos Agent.",
|
||||
"remove_after_upload": "Once recordings are uploaded to some persistence, you might want to remove them from the local Agent.",
|
||||
"remove_after_upload_description": "Remove recordings after they are uploaded successfully.",
|
||||
"remove_after_upload_enabled": "Enabled delete on upload"
|
||||
"remove_after_upload_enabled": "Enable delete on upload"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2536,6 +2536,43 @@ class Settings extends React.Component {
|
||||
)
|
||||
}
|
||||
/>
|
||||
<Input
|
||||
noPadding
|
||||
label={t(
|
||||
'settings.persistence.kerberosvault_maxretries'
|
||||
)}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_maxretries'
|
||||
)}
|
||||
value={
|
||||
config.kstorage ? config.kstorage.max_retries : ''
|
||||
}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage',
|
||||
'max_retries',
|
||||
value,
|
||||
config.kstorage
|
||||
)
|
||||
}
|
||||
/>
|
||||
|
||||
<Input
|
||||
noPadding
|
||||
label={t('settings.persistence.kerberosvault_timeout')}
|
||||
placeholder={t(
|
||||
'settings.persistence.kerberosvault_description_timeout'
|
||||
)}
|
||||
value={config.kstorage ? config.kstorage.timeout : ''}
|
||||
onChange={(value) =>
|
||||
this.onUpdateField(
|
||||
'kstorage',
|
||||
'timeout',
|
||||
value,
|
||||
config.kstorage
|
||||
)
|
||||
}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
{config.cloud === this.DROPBOX && (
|
||||
|
||||
@@ -1715,10 +1715,10 @@
|
||||
"@jridgewell/resolve-uri" "^3.0.3"
|
||||
"@jridgewell/sourcemap-codec" "^1.4.10"
|
||||
|
||||
"@kerberos-io/ui@^1.71.0":
|
||||
version "1.71.0"
|
||||
resolved "https://registry.yarnpkg.com/@kerberos-io/ui/-/ui-1.71.0.tgz#06914c94e8b0982068d2099acf8158917a511bfc"
|
||||
integrity sha512-pHCTn/iQTcQEPoCK82eJHGRn6BgzW3wgV4C+mNqdKOtLTquxL+vh7molEgC66tl3DGf7HyjSNa8LuoxYbt9TEg==
|
||||
"@kerberos-io/ui@^1.76.0":
|
||||
version "1.77.0"
|
||||
resolved "https://registry.yarnpkg.com/@kerberos-io/ui/-/ui-1.77.0.tgz#b748b2a9abf793ff2a9ba64ee41f84debc0ca9dc"
|
||||
integrity sha512-CHh4jeLKwrYvJRL5PM3UEN4p2k1fqwMKgSF2U6IR4v0fE2FwPc/2Ry4zGk6pvLDFHbDpR9jUkHX+iNphvStoyQ==
|
||||
dependencies:
|
||||
"@emotion/react" "^11.10.4"
|
||||
"@emotion/styled" "^11.10.4"
|
||||
|
||||
Reference in New Issue
Block a user