Compare commits

...

632 Commits

Author SHA1 Message Date
Cedric Verstraeten
aac2150a3a [release] v3.1.1 2024-01-07 22:14:44 +01:00
Cedric Verstraeten
9b713637b9 change version number of ui 2024-01-07 21:44:32 +01:00
Cedric Verstraeten
699660d472 only make release when putting [release] 2024-01-07 21:41:32 +01:00
Cedric Verstraeten
751aa17534 feature: make hub encryption configurable + only send heartbeat to vault when credentials are set 2024-01-07 21:30:57 +01:00
Cedric Verstraeten
2681bd2fe3 hot fix: keep track of main and sub stream separately (one of them might block) 2024-01-07 20:20:51 +01:00
Cedric Verstraeten
93adb3dabc different order in action 2024-01-07 08:29:53 +01:00
Cedric Verstraeten
0e15e58a88 try once more different format 2024-01-07 08:26:34 +01:00
Cedric Verstraeten
ef2ea999df only run release to docker when containing [release] 2024-01-07 08:22:24 +01:00
Cedric Verstraeten
ca367611d7 Update docker-nightly.yml 2024-01-07 08:15:24 +01:00
Cedric Verstraeten
eb8f073856 Merge branch 'master' into develop 2024-01-03 22:03:00 +01:00
Cedric Verstraeten
3ae43eba16 hot fix: close client on verifying connection (will keep client open) 2024-01-03 22:02:44 +01:00
Cedric Verstraeten
9719a08eaa Merge branch 'master' into develop 2024-01-03 21:54:30 +01:00
Cedric Verstraeten
1e165cbeb8 hotfix: try to create pullpoint subscription if first time failed 2024-01-03 18:44:53 +01:00
Cedric Verstraeten
8be8cafd00 force release mode in GIN 2024-01-03 18:26:10 +01:00
Cedric Verstraeten
e74d2aadb5 Merge branch 'develop' 2024-01-03 18:16:23 +01:00
Cedric Verstraeten
9c97422f43 properly handle cameras without PTZ function 2024-01-03 18:12:02 +01:00
Cedric Verstraeten
deb0a3ff1f hotfix: position or zoom can be nil 2024-01-03 13:37:38 +01:00
Cedric Verstraeten
95ed1f0e97 move error to debug 2024-01-03 12:36:08 +01:00
Cedric Verstraeten
6a111dadd6 typo in readme (wrong formatting link) 2024-01-03 12:24:35 +01:00
Cedric Verstraeten
95b3623c04 change startup command (new flag method) 2024-01-03 12:19:18 +01:00
Cedric Verstraeten
326d62a640 snap was moved to dedicated repository to better control release: https://github.com/kerberos-io/snap-agent
the repository https://github.com/kerberos-io/snap-agent is linked to the snap build system and will generate new releases
2024-01-03 12:17:47 +01:00
Cedric Verstraeten
9d990650f3 hotfix: onvif endpoint changed 2024-01-03 10:19:04 +01:00
Cedric Verstraeten
4bc891b640 hotfix: move from warning to debug 2024-01-03 10:12:18 +01:00
Cedric Verstraeten
1f133afb89 Merge branch 'develop' 2024-01-03 09:57:51 +01:00
Cedric Verstraeten
8da34a6a1a hotfix: restart agent when nog rtsp url was defined 2024-01-03 09:56:56 +01:00
Cédric Verstraeten
57c49a8325 Update snapcraft.yaml 2024-01-02 22:16:41 +01:00
Cedric Verstraeten
f739d52505 Update docker-nightly.yml 2024-01-01 23:46:12 +01:00
Cedric Verstraeten
793022eb0f no longer support go '1.17', '1.18', '1.19', 2024-01-01 23:41:45 +01:00
Cedric Verstraeten
6b1fd739f4 add as safe directory 2024-01-01 23:38:50 +01:00
Cedric Verstraeten
4efa7048dc add runner user - setup as a workaround 2024-01-01 23:33:08 +01:00
Cedric Verstraeten
4931700d06 try checkout v4, you never know.. 2024-01-01 23:29:50 +01:00
Cedric Verstraeten
4bd49dbee1 run go build as specific user 2024-01-01 23:25:32 +01:00
Cedric Verstraeten
c278a66f0e make go versions as string, removes the 0 (weird issue though) 2024-01-01 23:18:55 +01:00
Cedric Verstraeten
d64e6b631c extending versions + base image 2024-01-01 23:16:50 +01:00
Cedric Verstraeten
fa91e84977 Merge branch 'port-to-gortsplib' into develop 2024-01-01 23:11:24 +01:00
Cedric Verstraeten
8c231d3b63 Merge branch 'master' into develop 2024-01-01 23:10:36 +01:00
Cedric Verstraeten
775c1b7051 show correct error message for failing onvif 2024-01-01 19:36:14 +01:00
Cedric Verstraeten
fb23815210 add support for H265 in UI 2024-01-01 19:31:58 +01:00
Cedric Verstraeten
5261c1cbfc debug condition 2023-12-31 15:46:25 +01:00
Cedric Verstraeten
f2aa3d9176 onvif is enabled, currently expects ptz, which is not the case 2023-12-30 22:07:45 +01:00
Cedric Verstraeten
113b02d665 Update Cloud.go 2023-12-30 09:18:46 +01:00
Cedric Verstraeten
957d2fd095 Update Cloud.go 2023-12-29 14:59:34 +01:00
Cedric Verstraeten
78e7fb595a make sure to set onvifEventsList = []byte("[]") 2023-12-29 11:37:32 +01:00
Cedric Verstraeten
b5415284e2 rename + add conceptual hidden function (not yet added) 2023-12-29 08:10:01 +01:00
Cedric Verstraeten
e94a9a1000 update base image 2023-12-28 16:33:39 +01:00
Cedric Verstraeten
60bb9a521c Update README.md 2023-12-28 11:32:46 +01:00
Cedric Verstraeten
3ac34a366f Update README.md 2023-12-28 11:29:33 +01:00
Cedric Verstraeten
77449a29e7 add h264 and h265 discussion 2023-12-28 11:24:36 +01:00
Cedric Verstraeten
242ff48ab6 add more description error with onvif invalid credentials + send capabilitites as part of onvif/login or verify 2023-12-28 10:55:11 +01:00
Cedric Verstraeten
b71dbddc1a add support for snapshots (raw + base64) #130
also tweaked the logging as bit more
2023-12-28 10:24:15 +01:00
Cedric Verstraeten
6407f3da3d recover from failled pullpoint subscription 2023-12-28 08:22:37 +01:00
Cedric Verstraeten
776571c7b3 improve logging 2023-12-27 14:30:12 +01:00
Cedric Verstraeten
2df35a1999 add remote trigger relay output (mqtt endpoint) + rename a few methods 2023-12-27 10:39:12 +01:00
Cedric Verstraeten
b1ab6bf522 improve logging + updated readme 2023-12-27 10:25:03 +01:00
Cedric Verstraeten
e7fd0bd8a3 add logging output variable (json or text) + improve logging 2023-12-27 10:06:55 +01:00
Cedric Verstraeten
4f5597c441 remove unnecessary prints 2023-12-25 23:10:04 +01:00
Cedric Verstraeten
400457af9f upgrade onvif to 14 2023-12-25 21:37:35 +01:00
Cedric Verstraeten
c48e3a5683 Update go.mod 2023-12-25 21:01:52 +01:00
Cedric Verstraeten
67064879e4 input/output methods 2023-12-25 20:55:51 +01:00
Cedric Verstraeten
698b9c6b54 cleanup comments + add ouputs 2023-12-15 15:07:25 +01:00
Cedric Verstraeten
0e8a89c4c3 add onvif inputs function 2023-12-12 23:34:04 +01:00
Cedric Verstraeten
b0bcf73b52 add condition uri implementation, wrapped condition class so it's easier to extend 2023-12-12 17:30:41 +01:00
Cedric Verstraeten
15a51e7987 align logging 2023-12-12 09:52:35 +01:00
Cedric Verstraeten
b5f5567bcf cleanup names of files (still need more cleanup)+ rework discover method + separated conditions in separate package 2023-12-12 09:15:54 +01:00
Cedric Verstraeten
9151b38e7f document more swagger endpoints + cleanup source 2023-12-11 21:02:01 +01:00
Cedric Verstraeten
898b3a52c2 update loggin + add new swagger endpoints 2023-12-11 20:32:03 +01:00
Cedric Verstraeten
be6eb6165c get keyframe and decode on requesting config (required for factory) 2023-12-10 23:13:42 +01:00
Cedric Verstraeten
e95f545bf4 upgrade deps + fix nil error 2023-12-09 23:02:18 +01:00
Cedric Verstraeten
fd01fc640e get rid of snapshots + was blocking stream and corrupted recordings 2023-12-07 21:33:32 +01:00
Cedric Verstraeten
8cfcfe4643 upgrade onvif 2023-12-07 19:33:18 +01:00
Cedric Verstraeten
60d7b4b356 if we have no backchannel we'll skip the setup 2023-12-06 19:03:36 +01:00
Cedric Verstraeten
9b796c049d mem leak for http close (still one) + not closing some channels properly 2023-12-06 18:53:55 +01:00
Cedric Verstraeten
c8c9f6dff1 implement better logging, making logging levels configurable (WIP) 2023-12-05 23:05:59 +01:00
Cedric Verstraeten
8293d29ee8 make recording write directly to file + fix memory leaks with http on ONVIF API 2023-12-05 22:07:29 +01:00
Cedric Verstraeten
34a0d8f5c4 force TCP + ignore motion detection if no region is set 2023-12-05 08:30:00 +01:00
Cedric Verstraeten
0a195a0dfb Update Dockerfile 2023-12-04 14:47:53 +01:00
Cedric Verstraeten
c82ead31f2 decode using H265 2023-12-04 14:02:41 +01:00
Cedric Verstraeten
3ab4b5b54b OOPS: missing encryption at some points 2023-12-03 20:12:23 +01:00
Cedric Verstraeten
5765f7c4f6 additional checks for closed decoder + properly close recording when closed 2023-12-03 20:10:05 +01:00
Cedric Verstraeten
d1dd30577b get rid of VPS, fails to write in h265 (also upgrade dependencies) 2023-12-03 19:18:01 +01:00
Cedric Verstraeten
1145008c62 reference implementation for transcoding from MULAW to AAC 2023-12-03 09:53:20 +01:00
Cedric Verstraeten
3f1e01e665 dont panic on fail bachchannel 2023-12-03 08:14:56 +01:00
Cedric Verstraeten
ced9355b78 Run Backchannel on a seperate Gortsplib instance 2023-12-02 22:28:26 +01:00
Cedric Verstraeten
6e7ade036e add logging + fix private key pass through + fixed crash on websocket livestreaming 2023-12-02 21:30:07 +01:00
Cedric Verstraeten
976fbb65aa Update Kerberos.go 2023-12-02 15:41:36 +01:00
Cedric Verstraeten
ba7f870d4b wait a bit to close the motion channel, also close audio channel 2023-12-02 15:18:49 +01:00
Cedric Verstraeten
cb3dce5ffd closing 2023-12-02 13:07:52 +01:00
Cedric Verstraeten
b317a6a9db fix closing of rtspclient + integrate h265 support
now we can record in H265 and stream in H264 using webrtc or websocket
2023-12-02 12:34:28 +01:00
Cedric Verstraeten
e42f430bb8 add MPEG4 (AAC support), put ready for H265 2023-12-02 00:43:31 +01:00
Cedric Verstraeten
bd984ea1c7 works now, but needed to change size of paylod 2023-12-01 23:17:32 +01:00
Cedric Verstraeten
6798569b7f first try for the backchannel using gortsplib
getting error short buffer
2023-12-01 22:57:33 +01:00
Cedric Verstraeten
df3183ec1c add backchannel support 2023-12-01 22:18:06 +01:00
Cedric Verstraeten
25c35ba91b fix hull 2023-12-01 21:27:58 +01:00
Cedric Verstraeten
68b9c5f679 fix videostream for subclient 2023-12-01 20:24:35 +01:00
Cedric Verstraeten
9757bc9b18 Calculate width and height + add FPS 2023-12-01 19:47:31 +01:00
Cedric Verstraeten
1e4affbf5c dont write trailer do +1 prerecording reader 2023-12-01 15:05:39 +01:00
Cedric Verstraeten
22f4a7e08a fix closing of stream 2023-12-01 11:05:58 +01:00
Cedric Verstraeten
044e167dd2 add lock + motion detection 2023-12-01 08:34:09 +01:00
Cedric Verstraeten
bffd377461 add substream 2023-11-30 21:33:14 +01:00
Cedric Verstraeten
677c9e334b add decoder, fix livestream 2023-11-30 21:01:57 +01:00
Cedric Verstraeten
df38784a8d fixes 2023-11-30 17:34:03 +01:00
Cedric Verstraeten
dae2c1b5c4 fix keyframing 2023-11-30 17:17:10 +01:00
Cedric Verstraeten
fd6449b377 remove dtsextractor is blocks the stream 2023-11-30 14:50:09 +01:00
Cedric Verstraeten
cd09ed3321 fix 2023-11-30 14:33:12 +01:00
Cedric Verstraeten
e7dc9aa64d swap to joy4 2023-11-30 14:10:07 +01:00
Cedric Verstraeten
fec2587b6d Update Gortsplib.go 2023-11-30 13:49:46 +01:00
Cedric Verstraeten
7c285d36a1 isolate rtsp clients to be able to pass them through 2023-11-30 13:45:34 +01:00
Cedric Verstraeten
ed46cbe35a cleanup enable more features 2023-11-30 00:47:30 +01:00
Cedric Verstraeten
0a8f097c76 cleanup and fix for recording (wrong DTS value) + fix for recording using "old" joy library 2023-11-29 19:33:03 +01:00
Cedric Verstraeten
bce5d443d5 try new muxer 2023-11-29 17:18:51 +01:00
Cedric Verstraeten
19bf456bda adding fragmented mp4 (not working) trying to fix black screen on quicktime player mp4 2023-11-29 16:28:09 +01:00
Cedric Verstraeten
1359858e42 updates and cleanup 2023-11-29 15:01:36 +01:00
Cedric Verstraeten
55b1abe243 Add mp4 muxer, still some work to do 2023-11-29 10:21:58 +01:00
Cedric Verstraeten
c6428d8c5a Fix for WebRTC using new library had to encode nalu 2023-11-27 17:05:55 +01:00
Cedric Verstraeten
e241a03fc4 comment out unused code! 2023-11-26 17:30:05 +01:00
Cedric Verstraeten
ac2b99a3dd inherit from golibrtsp rtp.packet + fix the decoding for livestream + motion 2023-11-26 16:58:55 +01:00
Cedric Verstraeten
341a6a7fae refactoring the rtspclient to be able to swap out easily 2023-11-26 00:07:53 +01:00
Cedric Verstraeten
e74facfb7f fix: blocking state candidates 2023-11-23 22:21:56 +01:00
Cedric Verstraeten
54bc1989f9 fix: update locking webrtc 2023-11-23 21:17:39 +01:00
Cedric Verstraeten
94b71a0868 fix: enabling backchannel on the mainstream 2023-11-20 09:57:55 +01:00
Cedric Verstraeten
c071057eec hotfix: do fallback without backchannel if camera didnt support it, some cameras such as Dahua will fail on the header. 2023-11-20 09:35:41 +01:00
Cedric Verstraeten
e8a355d992 upgrade joy4: add setreaddeadline for RTSP connection 2023-11-19 21:40:08 +01:00
Cedric Verstraeten
ca84664071 hotfix: add locks to make sure candidates are not send to a closed candidate channel 2023-11-18 20:38:29 +01:00
Cedric Verstraeten
dd7fcb31b1 Add ONVIF backchannel functionality with G711 encoding 2023-11-17 16:28:03 +01:00
Cédric Verstraeten
324fffde6b Merge pull request #125 from Izzotop/feat/add-russian-language-support
Add Russian language
2023-11-14 21:22:33 +01:00
Izzotop
cd8347d20f Add Russian language 2023-11-09 16:03:40 +03:00
Cedric Verstraeten
efcbf52b06 Merge branch 'master' of https://github.com/kerberos-io/agent 2023-11-06 17:07:50 +01:00
Cedric Verstraeten
c33469a7b3 add --fix-missing to fix random broken builds (armv6 image) 2023-11-06 17:07:35 +01:00
Cédric Verstraeten
3717535f0b Merge pull request #121 from Chaitanya110703/patch-1
doc(README): remove typo
2023-11-06 16:54:28 +01:00
Cedric Verstraeten
8eb2de5e28 When Kerberos Vault is configured without Kerberos Hub, cameras do not show up in Kerberos Vault #123 2023-11-06 14:37:54 +01:00
Cedric Verstraeten
96f6bcb1dd test file in wrong directory 2023-11-03 13:18:00 +01:00
Cedric Verstraeten
860077a3eb turn off linting for: jsx-a11y/control-has-associated-label 2023-11-02 08:28:26 +01:00
Cedric Verstraeten
8be9343314 upgrade joy issues with audio codec (wrong FFMPEG version) 2023-11-02 08:15:32 +01:00
Cedric Verstraeten
dac04fbb57 upgrade joy for https://github.com/kerberos-io/agent/issues/105 2023-11-01 22:03:48 +01:00
Cedric Verstraeten
b9acf4c150 hot fix: factory needs to override encryption settings 2023-10-24 22:50:55 +02:00
Chaitanya110703
6608018f86 doc(README): remove typo 2023-10-24 21:25:45 +05:30
Cedric Verstraeten
552f5dbea6 hotfix: check if encryption is set for old agents 2023-10-24 17:52:09 +02:00
Cedric Verstraeten
2844a5a419 webrtc: disable relay allow other 2023-10-24 16:44:42 +02:00
Cedric Verstraeten
c4b9610f58 hotfix: mqtt webrtc - wrong session key 2023-10-24 16:22:15 +02:00
Cedric Verstraeten
6a44498730 hot fix: readd locks 2023-10-24 13:39:17 +02:00
Cedric Verstraeten
a2cebaf90b hot fix: wait for token in webrtc 2023-10-24 13:14:14 +02:00
Cedric Verstraeten
3f58f26dfd decrypt recordings through the UI automatically using the existing AES key, you can still use the decrypt action or openssl afterwards 2023-10-23 14:38:29 +02:00
Cedric Verstraeten
a8d5f56f1e hotfix - build error encryption key value 2023-10-23 11:07:54 +02:00
Cédric Verstraeten
1eb62d80c7 add encryption + end-to-end encryption to feature list 2023-10-23 10:59:13 +02:00
Cedric Verstraeten
e474a62dbc Add hindi #119 + allow recordings encryption + decryption tooling. 2023-10-23 10:56:36 +02:00
Cédric Verstraeten
f29b952001 Merge pull request #119 from fadkeabhi/feat#47-add-hindi-language-support
Added transaltions for hindi language
2023-10-22 22:15:07 +02:00
Cedric Verstraeten
38247ac9f6 Add italian to language selector #115 2023-10-22 20:00:10 +02:00
Cédric Verstraeten
580f17028a Merge pull request #115 from LeoSpyke/master
i18n: adds Italian locale
2023-10-22 19:56:55 +02:00
Cedric Verstraeten
48d933a561 backwards compatible when no encryption key was added in previous config 2023-10-20 14:35:09 +02:00
Cedric Verstraeten
0c70ab6158 Refactor MQTT endpoints + Introduce End-to-End encryption using RSA and AES keys + finetune PTZ 2023-10-20 13:31:02 +02:00
ABHISHEK FADAKE
839185dac8 Added transaltions for hindi language 2023-10-03 19:24:47 +05:30
LeoSpyke
ba6cdef9d5 i18n(it): translate persistence and bugfix 2023-09-15 08:17:12 +00:00
LeoSpyke
bedb3c0d7f Merge branch 'kerberos-io:master' into master 2023-09-14 12:47:46 +02:00
Leonardo Papini
2539255940 i18n: Italian translations 2023-09-14 12:47:28 +02:00
Cedric Verstraeten
24136f8b15 we didn't reset the main configuration, causing some config vars still to be set 2023-09-14 10:47:18 +02:00
Cedric Verstraeten
910bb3c079 merging timetable was giving issues 2023-09-14 10:13:50 +02:00
Cedric Verstraeten
47f4c19617 Update Config.go 2023-09-13 08:14:25 +02:00
Cedric Verstraeten
280a81809a Update Config.go 2023-09-12 22:38:26 +02:00
Cedric Verstraeten
59358acb30 add logging + empty friendly name 2023-09-12 15:17:56 +02:00
Cedric Verstraeten
ebd655ac73 Allow remote configuration through MQTT + restructure config method 2023-09-12 10:50:36 +02:00
Cedric Verstraeten
6325e37aae empty presets caused hub connection failing 2023-09-07 08:16:46 +02:00
Cedric Verstraeten
ecabc47847 integrate ondevice configurated presets 2023-08-30 14:12:07 +02:00
Cedric Verstraeten
31cc3d8939 Rely on continuous move will fix the PTZFunctions later 2023-08-29 14:53:48 +02:00
Cedric Verstraeten
c71cb71d08 We should reenable debugging, modifying to Info for now 2023-08-29 14:43:14 +02:00
Cedric Verstraeten
65a739ea75 logging PTZ functions 2023-08-29 14:30:25 +02:00
Cedric Verstraeten
410a62e9ef Some cameras do not support AbsoluteMovement, therefore we'll simulate it with ContinuousMove and a polling mechanism 2023-08-28 09:30:08 +02:00
Cedric Verstraeten
aa76dd1ec8 enable PTZ preset + introduce new MQTT messaging between Hub and Agent (introduction e2e encryption) 2023-08-25 09:05:53 +02:00
Cedric Verstraeten
384448d123 panic when no mongodb + remove files when no longer available + do not cleanup recordings by default, however cleanup when recordings have been uploaded 2023-07-31 08:49:34 +02:00
Cedric Verstraeten
414f74758c remove curly brackets 2023-07-26 19:22:19 +02:00
Cedric Verstraeten
25403ccdab dont restart if previously was not set! https://github.com/kerberos-io/agent/issues/110 2023-07-12 17:48:43 +02:00
Cedric Verstraeten
4c03132b83 Fail agent when no mongodb can be reached in Kerberos Factory deployment 2023-07-12 09:58:31 +02:00
Cedric Verstraeten
470f8f1cb6 some deployments might miss the variable, such as Kerberos Factory, we'll default these values to "true" 2023-07-11 21:57:07 +02:00
Cedric Verstraeten
5308376a67 add terraform deployment example 2023-07-04 21:04:16 +02:00
Cedric Verstraeten
2b112d29cf further detail snap deployment 2023-07-01 11:52:13 +02:00
Cedric Verstraeten
20d2517e74 add snapcraft 2023-07-01 07:42:12 +02:00
Cedric Verstraeten
12902e2482 disable snapcraft for the time being 2023-06-29 23:15:34 +02:00
Cedric Verstraeten
baca44beef Update docker.yml 2023-06-29 21:07:12 +02:00
Cedric Verstraeten
d7580744e2 Update docker.yml 2023-06-29 20:52:58 +02:00
Cedric Verstraeten
04f4bc9bf2 Update docker.yml 2023-06-29 20:47:56 +02:00
Cedric Verstraeten
d879174f4c add user to lxd group for snapcraft build 2023-06-29 20:40:28 +02:00
Cedric Verstraeten
5a1a62a723 Update docker.yml 2023-06-29 20:31:18 +02:00
Cedric Verstraeten
c519b01092 add snapcraft and try to snap the build 2023-06-29 20:23:11 +02:00
Cedric Verstraeten
c2ff7ff785 add missing custom config directory references 2023-06-29 20:10:16 +02:00
Cedric Verstraeten
44ec8c0534 try upgrading the dockerfile 2023-06-29 14:06:41 +02:00
Cedric Verstraeten
21c0e01137 add additional environment variables to tweak the internal agent "disable motion, disable liveview" 2023-06-29 12:28:44 +02:00
Cedric Verstraeten
f7ced6056d update to port 80 + allow frontend to take into account a custom config directory 2023-06-28 20:24:41 +02:00
Cedric Verstraeten
00917e3f88 add flag arguments instead of absolute arguments (we now support names)
added option to define the config location, can be different than the relative location of the agent binary
2023-06-28 19:28:07 +02:00
Cedric Verstraeten
bcfed04a07 add AGENT_TLS_INSECURE to enable Insecure TLS mode 2023-06-28 17:09:29 +02:00
Cedric Verstraeten
bf97bd72f1 add osusergo 2023-06-28 08:41:51 +02:00
Cedric Verstraeten
4b8b6bf66a fix balena links 2023-06-26 08:22:12 +02:00
Cedric Verstraeten
4b6c25bb85 documentation for balena apps 2023-06-25 23:25:42 +02:00
Cedric Verstraeten
729b38999e add deploy with balena 2023-06-25 22:14:23 +02:00
Cedric Verstraeten
4cbf0323f1 Reference separate balena repositories 2023-06-25 20:21:44 +02:00
Cedric Verstraeten
1f5cb8ca88 merge directories 2023-06-25 20:05:03 +02:00
Cedric Verstraeten
8be0a04502 add balena deployment (app + block) 2023-06-25 20:03:37 +02:00
Cedric Verstraeten
bdc0039a24 fix: might be empty if not set, so will never fire motion alert 2023-06-24 20:22:51 +02:00
Cedric Verstraeten
756b893ecd reference latest tags 2023-06-24 12:58:16 +02:00
Cedric Verstraeten
36323b076f Fix for Kerberos Vault persistence check 2023-06-23 21:13:13 +02:00
Cedric Verstraeten
95f43b6444 Fix for empty vault settings, throw error 2023-06-23 20:20:38 +02:00
Cedric Verstraeten
5c23a62ac3 New function to validate Kerberos Hub connectivity and subscription 2023-06-23 19:01:04 +02:00
Cedric Verstraeten
2b425a2ddd add test video for verification 2023-06-23 17:16:13 +02:00
Cedric Verstraeten
abeeb95204 make region editable through ENV + add new upload function to upload to Kerberos Hub 2023-06-23 16:14:01 +02:00
Cédric Verstraeten
6aed20c466 Align to correct region 2023-06-22 15:53:46 +02:00
Cedric Verstraeten
d2dd3dfa62 add outputconfiguration + change endpoint 2023-06-21 15:55:51 +02:00
Cedric Verstraeten
6672535544 fix glitch with loading livestream when hitting dashboard page first 2023-06-14 17:14:40 +02:00
Cedric Verstraeten
ed397b6ecc change environment box sizes 2023-06-14 16:59:26 +02:00
Cedric Verstraeten
530e4c654e set permissions to modify the env.js file on runtime 2023-06-14 16:49:50 +02:00
Cedric Verstraeten
913bd1ba12 add demo environment mode 2023-06-14 16:29:13 +02:00
Cedric Verstraeten
84e532be47 reloading of configuration right after updating config + optimise loading stream + fix for ip camera online and cloud online + onvif ptz checks for hub 2023-06-13 23:12:58 +02:00
Cedric Verstraeten
3341e99af1 timetable might be empty 2023-06-13 09:22:51 +02:00
Cedric Verstraeten
ced6e678ec Update Settings.jsx 2023-06-12 21:08:05 +02:00
Cedric Verstraeten
340a5d7ef6 Update Settings.jsx 2023-06-12 21:06:21 +02:00
Cedric Verstraeten
60e8edc876 add onvif verify option + improve streaming logic + reconnect websocket 2023-06-12 20:33:41 +02:00
Cedric Verstraeten
9cf9babd73 introduce camera connected variable + allow MQTT stream to be connected even when no camera attached 2023-06-09 14:44:09 +02:00
Cedric Verstraeten
229c246e1c adding ctx (support) + unblock when unsupported codec 2023-06-08 21:50:10 +02:00
Cedric Verstraeten
15d9bcda4f decoding issue caused new mongodb adapter to fail 2023-06-07 22:01:44 +02:00
Cedric Verstraeten
068063695e time table might be empty 2023-06-07 20:24:09 +02:00
Cedric Verstraeten
b1722844f3 fix config overriding 2023-06-07 20:19:50 +02:00
Cedric Verstraeten
eb5ab48d6c timetable might be empty 2023-06-07 18:47:48 +02:00
Cedric Verstraeten
b64f1039d7 hotfix: fix missing polygon from start 2023-06-07 18:22:18 +02:00
Cedric Verstraeten
6fcd6e53a1 hotfix: fix openconfig 2023-06-07 17:07:39 +02:00
Cedric Verstraeten
25537b5f02 add new mongodb adapter 2023-06-07 16:44:54 +02:00
Cédric Verstraeten
2fad541e06 Update README.md 2023-06-03 08:49:58 +02:00
Cédric Verstraeten
afefd32a1f Merge pull request #102 from lubikx/master
Alpine+go combination doesn't resolve DNS correctly under docker comp…
2023-05-30 09:07:52 +02:00
Lubor Nosek
89e01e065c Alpine+go combination doesn't resolve DNS correctly under docker compose environment 2023-05-30 08:30:24 +02:00
Cedric Verstraeten
02f3e6a1e2 Update README.md 2023-05-16 12:01:09 +02:00
Cedric Verstraeten
ec5a00f3df get rid of print! 2023-05-10 14:41:32 +02:00
Cedric Verstraeten
2860775954 unused var 2023-05-10 11:43:23 +02:00
Cedric Verstraeten
d2e8e04833 upgrade joy4, allow OPUS 2023-05-10 11:33:38 +02:00
Cedric Verstraeten
fad90390a6 disable transcoder, will free up some memory + add support for audio 2023-05-10 11:33:03 +02:00
Cedric Verstraeten
d6ba875473 add mods 2023-05-09 13:16:32 +02:00
Cedric Verstraeten
e9ea34c20f connect to Kerberos Hub if required, even if camera is not working 2023-05-09 13:14:03 +02:00
Cedric Verstraeten
99cc7d419f add .vscode launch file 2023-05-06 08:46:31 +02:00
Cedric Verstraeten
1a6dc27535 add vault uri example 2023-05-03 20:03:22 +02:00
Cedric Verstraeten
93f40a8d34 get rid of dependency 2023-04-20 15:28:27 +02:00
Cedric Verstraeten
d7f7de97b4 Merge branch 'develop' 2023-04-20 13:37:07 +02:00
Cedric Verstraeten
50babedcbf remove trailing comma 2023-04-20 10:51:35 +02:00
Cédric Verstraeten
4352d993ed Merge pull request #93 from AFLDT/patch-1
Adding Chinese support
2023-04-10 14:59:56 +02:00
xingZhe
1e144e1c60 Change the path of the translation file 2023-04-09 06:41:59 +00:00
Cedric Verstraeten
d4e37e0bae update readme + few fixes + add env for dropbox 2023-04-07 23:27:05 +02:00
Cédric Verstraeten
026bf93980 Merge pull request #96 from cedricve/develop
Persistence: adding dropbox support
2023-04-07 22:26:22 +02:00
Cedric Verstraeten
43c166666c fix codacy errors 2023-04-07 22:16:47 +02:00
Cedric Verstraeten
f3bda88f3e add comments 2023-04-07 22:06:43 +02:00
Cedric Verstraeten
b0af6b2e2b Add testfile for uploading + fixes ui + added translations 2023-04-07 21:42:35 +02:00
Cedric Verstraeten
2925e19b90 complete dropbox integration (add directory + env variables) 2023-04-07 20:21:16 +02:00
Cedric Verstraeten
e67b6a1800 initial work for adding dropbox support 2023-04-07 14:49:12 +02:00
Cédric Verstraeten
bbbed49887 Add capabilities + fix typo 2023-04-07 08:41:47 +02:00
xingZhe
65fd400d4d Adding Chinese support 2023-04-02 14:25:20 +08:00
Cedric Verstraeten
87f681cfe1 fix show language selector 2023-03-23 18:08:20 +01:00
Cedric Verstraeten
f935360fda add delay when failed uploading, send to different mqtt topic when no hub + fixes responsiveness 2023-03-23 16:19:01 +01:00
Cedric Verstraeten
71cd315142 disable MQTT message on motion + rework cloud upload (new option RemoveAfterUpload) + sidebar 2023-03-21 13:31:04 +01:00
Cedric Verstraeten
d9694ac1a3 Merge branch 'heads/develop' 2023-03-19 21:36:31 +01:00
Cedric Verstraeten
08f589586d allow timetable and region to be set through environment variables 2023-03-19 20:39:48 +01:00
Cedric Verstraeten
192f78ae78 renabled arm-v6 2023-03-17 15:00:16 +01:00
Cedric Verstraeten
ef20d4c0b1 fix arch 2023-03-17 14:59:58 +01:00
Cedric Verstraeten
af95c0f798 add japanese, disable armv6 build 2023-03-17 12:50:50 +01:00
Cedric Verstraeten
0e32a10ff5 Merge branch 'master' into develop 2023-03-17 12:43:03 +01:00
Cédric Verstraeten
e59c2b179d Merge pull request #83 from jeffersonGlemos/master
feat add pt full translation
2023-03-17 12:42:44 +01:00
Cedric Verstraeten
e5d03f19de Merge branch 'master' into develop 2023-03-16 23:01:03 +01:00
Cedric Verstraeten
58c3e73f6f stop uploading if no credentials 2023-03-16 22:42:37 +01:00
Jefferson Gonçalves Lemos
8ca2c44422 feat add pt full translation 2023-03-10 13:14:28 -04:00
Cedric Verstraeten
a2e584a225 default values for recording, snapshots, .. 2023-03-09 10:48:26 +01:00
Cedric Verstraeten
c4cda0afb0 fix build 2023-03-07 22:47:47 +01:00
Cédric Verstraeten
adbb923e92 align variables config.json 2023-03-07 21:30:37 +00:00
Cedric Verstraeten
f444ae4ad6 add img to readme codespance make public 2023-03-07 22:25:26 +01:00
Cédric Verstraeten
9fa9538320 Update README.md 2023-03-07 22:15:40 +01:00
Cédric Verstraeten
943e81000e Update Dockerfile 2023-03-07 20:15:00 +01:00
Cédric Verstraeten
b16d028293 Merge pull request #79 from kododake/develop
Added translation for Japanese.
2023-02-27 21:10:14 +01:00
かいりゅか
07646e483d Add files via upload 2023-02-24 19:35:06 +09:00
Cedric Verstraeten
36b93a34b4 check if QUEUE is not null ;) 2023-02-23 15:16:41 +01:00
Cedric Verstraeten
b0d2409524 stop the motion and livestreaming threads first 2023-02-23 14:58:48 +01:00
Cedric Verstraeten
be7a231950 reset configuration 2023-02-23 12:20:25 +01:00
Cedric Verstraeten
31a0b9efa4 enable the rest! 2023-02-21 22:54:33 +01:00
Cedric Verstraeten
d70a3ed343 do not reload decoder if same settings 2023-02-21 22:44:56 +01:00
Cedric Verstraeten
56cebb6451 manage decoder from higher level 2023-02-21 22:12:08 +01:00
Cedric Verstraeten
99f61bc5e8 enable decoder again ;) 2023-02-21 21:45:54 +01:00
Cedric Verstraeten
a5d02e3275 disable decoder, see what it's doing from mem consumption 2023-02-21 21:31:17 +01:00
Cedric Verstraeten
354ab7db05 add garbage collection 2023-02-21 20:49:44 +01:00
Cedric Verstraeten
dc817f8c26 keep snapshot in memory (no longer store on disk) 2023-02-21 12:54:28 +01:00
Cedric Verstraeten
a90097731c fix error 2023-02-21 12:13:29 +01:00
Cedric Verstraeten
5b3bbbb37e new approach to store snapshots! 2023-02-21 12:12:30 +01:00
Cedric Verstraeten
4a4aabd71c upgrade to joy v1.0.54 2023-02-19 22:04:39 +01:00
Cedric Verstraeten
b058c1e742 set pointers to nil 2023-02-18 22:14:33 +01:00
Cedric Verstraeten
7671b1c2c3 unsubscribe from mqtt subscriptions 2023-02-18 22:12:44 +01:00
Cedric Verstraeten
4cc8135e1a move StoreSnapshot to separate method 2023-02-17 20:03:15 +01:00
Cedric Verstraeten
3cb38099ea add process memory + boot time 2023-02-15 13:01:16 +01:00
Cedric Verstraeten
deb0308dc4 rename attributes 2023-02-15 07:02:16 +01:00
Cedric Verstraeten
24c729eea3 Update Cloud.go 2023-02-14 23:11:55 +01:00
Cedric Verstraeten
c59d511ea3 fix for macs and ips 2023-02-14 22:55:18 +01:00
Cedric Verstraeten
6f8745dc3a alignment of motion recordings, make sure there is no overlap between two sibling recordings 2023-02-14 16:59:00 +01:00
Cedric Verstraeten
65d3d649b9 disable liveview (hd/sd) through env 2023-02-14 11:26:50 +01:00
Cedric Verstraeten
b4a8028c04 better way of doing prerecording + matching timestamp with prerecord time 2023-02-14 08:57:55 +01:00
Cedric Verstraeten
9d7077813a use timescale 10000000 2023-02-11 21:32:36 +01:00
Cedric Verstraeten
2feda33808 joy4 v1.0.51 2023-02-11 20:52:36 +01:00
Cedric Verstraeten
ec42b9ea85 update timescale to 90000 2023-02-11 12:27:38 +01:00
Cedric Verstraeten
a2b4ee12ec align timescale to 90000 2023-02-11 12:02:04 +01:00
Cedric Verstraeten
a0f99a5167 add two new variables to disable snapshotting (encoding time) + disable motion 2023-02-07 13:55:38 +01:00
Cedric Verstraeten
9aff467afc allocate single frame for decoding + disable transcoding 2023-02-05 21:17:12 +01:00
Cedric Verstraeten
926f9ea32c ping Kerberos Vault if attached (was no longer working) 2023-02-03 20:25:00 +01:00
Cedric Verstraeten
43d12ee82f increase network timeout 2023-02-03 12:19:42 +01:00
Cedric Verstraeten
677c985b3d get rid of yarn.lock 2023-02-03 11:53:06 +01:00
Cedric Verstraeten
24a5ca04ca upgrade 2023-02-03 11:37:16 +01:00
Cedric Verstraeten
f81ae57395 do an update and upgrade 2023-02-03 09:28:08 +01:00
Cedric Verstraeten
7e6b69819e upgrade node build-ui image 2023-02-03 08:56:56 +01:00
Cedric Verstraeten
4aa8ce7513 align memory values 2023-02-02 22:12:47 +01:00
Cedric Verstraeten
30d59f2613 add systemd details, move to seperate deployment page 2023-02-02 13:14:36 +01:00
Cedric Verstraeten
be0277432b add documentation for static binary 2023-02-01 20:21:37 +01:00
Cedric Verstraeten
a3006f3b9f change output name 2023-02-01 19:32:31 +01:00
Cedric Verstraeten
d4e10083c4 introduce armv6 build + fix for mp4f bin + add release to heartbeat 2023-02-01 18:50:32 +01:00
Cedric Verstraeten
898a11868e another typo :F 2023-02-01 17:28:02 +01:00
Cedric Verstraeten
a098052ee8 another typo 2023-02-01 17:19:47 +01:00
Cedric Verstraeten
6f79086a27 do not move mp4fragment 2023-02-01 17:18:19 +01:00
Cedric Verstraeten
6ee07d4c12 moving www/ frontend + mp4fragmented in right directory 2023-02-01 17:17:36 +01:00
Cedric Verstraeten
a69869440f new way of stripping 2023-02-01 17:11:14 +01:00
Cedric Verstraeten
dcd4e08c6f typo (shame on me..) 2023-02-01 16:41:08 +01:00
Cedric Verstraeten
22e7a0fec9 first try for a new release.. let's see if it gets through !:) 2023-02-01 16:34:55 +01:00
Cedric Verstraeten
8daebd434d update base img 2023-02-01 15:36:15 +01:00
Cedric Verstraeten
8381cd1a5c another fix www is now in other stage 2023-02-01 12:12:11 +01:00
Cedric Verstraeten
051299f331 revert base image 2023-02-01 12:07:47 +01:00
Cedric Verstraeten
20f2c764fe error www not there 2023-02-01 12:04:31 +01:00
Cedric Verstraeten
07619c2bce mutli stage for ui 2023-02-01 12:00:04 +01:00
Cedric Verstraeten
fb86efe715 new base image 👍 2023-02-01 10:35:19 +01:00
Cedric Verstraeten
4eafe6db84 new base image 2023-02-01 06:54:57 +01:00
Cedric Verstraeten
797f986d5d upgrade to node 14 2023-01-31 21:47:43 +01:00
Cedric Verstraeten
864a8ecffa other method 2023-01-31 21:37:37 +01:00
Cedric Verstraeten
7e48be06e3 other yarn 2023-01-31 19:12:09 +01:00
Cedric Verstraeten
27861aa0a5 try with prev image 2023-01-31 18:39:48 +01:00
Cedric Verstraeten
6d45cb7e6d enable armv6 2023-01-31 15:37:36 +01:00
Cedric Verstraeten
1964b893a5 upgrade base image (changed armv6 image) 2023-01-31 15:30:48 +01:00
Cedric Verstraeten
e30c8fe39c compile bento4 + add more system info 2023-01-31 12:25:36 +01:00
Cedric Verstraeten
b183561dc6 update go.sum 2023-01-30 18:48:38 +01:00
Cedric Verstraeten
caa4952e99 Update go.mod 2023-01-30 18:45:18 +01:00
Cedric Verstraeten
5e15b9ac75 Update main.go 2023-01-30 18:20:00 +01:00
Cedric Verstraeten
4fadd770f8 set max size to 300mb 2023-01-30 17:08:16 +01:00
Cedric Verstraeten
7ac2f71de1 Update main.go 2023-01-30 16:16:07 +01:00
Cedric Verstraeten
b49eef2d1c sync to file every keyframe + added a close method to clean up all pointers 2023-01-30 16:09:31 +01:00
Cedric Verstraeten
b8735aba01 sync file continuously 2023-01-30 15:49:12 +01:00
Cedric Verstraeten
c73584a743 cleaning up pointers + wipe memory and write to disc every 100 frames. 2023-01-30 15:37:57 +01:00
Cedric Verstraeten
ef4fc0a184 add gosum 2023-01-30 14:45:34 +01:00
Cedric Verstraeten
8cada20b1e try direct write to writer (instead of bufio) 2023-01-30 14:41:46 +01:00
Cedric Verstraeten
ba4ac215b5 reenable writing packets 2023-01-30 09:32:03 +01:00
Cedric Verstraeten
947ff473a0 get rid of dependencies + add AGENT_CAPTURE_RECORDING option 2023-01-30 09:19:28 +01:00
Cedric Verstraeten
14dc1a2fa3 disable dependencies 2023-01-30 09:11:21 +01:00
Cedric Verstraeten
df69cdc6a4 add possibility to disable recording completely 2023-01-30 08:59:05 +01:00
Cedric Verstraeten
dd54e08e10 Merge branch 'master' into develop 2023-01-30 08:47:08 +01:00
Cedric Verstraeten
7e3bb2359e Update go.yml 2023-01-30 08:21:43 +01:00
Cedric Verstraeten
5d00053b6f fix for checkout 2023-01-30 08:14:37 +01:00
Cedric Verstraeten
d573c2b829 upgrade to actions/checkout@v3.1.0 2023-01-30 08:07:25 +01:00
Cedric Verstraeten
eda9eb90d8 Merge branch 'master' into develop 2023-01-29 19:57:56 +01:00
Cedric Verstraeten
34849e8250 a test for mem 2023-01-29 19:53:34 +01:00
Cedric Verstraeten
d069a606d7 remove cleanup 2023-01-29 19:52:16 +01:00
Cedric Verstraeten
f0b042a2d1 we missed a packet 💯 2023-01-29 16:58:18 +01:00
Cedric Verstraeten
8a3ec65ec1 update recording logic to make sure no packets are lost 2023-01-29 16:47:59 +01:00
Cedric Verstraeten
e53715b9b0 get rid of flush 2023-01-29 15:40:38 +01:00
Cedric Verstraeten
9ef1339f8f upgrade to joy v1.0.46 2023-01-29 11:21:56 +01:00
Cedric Verstraeten
afaa51aac3 add an intermediate flush 2023-01-29 11:14:30 +01:00
Cedric Verstraeten
fb803df15e upgrade joy4 2023-01-28 23:00:58 +01:00
Cedric Verstraeten
de0c4fa657 align fragmentation time scale + upgrade to new joy4 2023-01-28 22:55:53 +01:00
Cedric Verstraeten
18484f407c width and height were swapped causing polygon not matching motion 👎 2023-01-27 00:59:34 +01:00
Cedric Verstraeten
f4a7bc5650 increase sensitivity 2023-01-26 19:05:27 +01:00
Cedric Verstraeten
9410289b4f add gcompat 2023-01-26 18:52:26 +01:00
Cedric Verstraeten
8eecae6f9a get rid of debian c+ 2023-01-26 18:45:50 +01:00
Cedric Verstraeten
63cf88b88d add libbsd 2023-01-26 18:43:44 +01:00
Cedric Verstraeten
cb9403cbf9 Update Dockerfile 2023-01-26 18:32:51 +01:00
Cedric Verstraeten
27da1dbc74 add a few more deps 2023-01-26 18:25:06 +01:00
Cedric Verstraeten
98d9185c20 add libgcc libstdc++ 2023-01-26 18:15:46 +01:00
Cedric Verstraeten
dec367a8f5 Add libc6-compat fpr mp4fragmenter 2023-01-26 16:19:16 +01:00
Cedric Verstraeten
8a3c1e8f08 fix thresholding for motion detection (should be configurable though) 2023-01-26 14:25:44 +01:00
Cedric Verstraeten
89c385ae2f use low-res in SD streaming 2023-01-26 09:03:49 +01:00
Cedric Verstraeten
e920cb2a6b increase encoding quality 2023-01-26 00:50:24 +01:00
Cedric Verstraeten
3f6204e2eb using substream for SD live is giving issues (to be checked) 2023-01-26 00:35:29 +01:00
Cedric Verstraeten
ae9d42e821 fix for pixeltreshold might be empty + mp4fragment fix ldd 2023-01-26 00:22:30 +01:00
Cedric Verstraeten
31400bd5e7 revert to older ffmpeg (something is going wrong in decoding) 2023-01-25 22:23:03 +01:00
Cedric Verstraeten
7973559502 fix for some onvif cameras that requires PT coordinates (als if zero) 2023-01-25 14:12:22 +01:00
Cedric Verstraeten
d4bfc8677a try new release (ffmpeg) only amd64 will work for now, come back later 2023-01-25 10:46:46 +01:00
Cedric Verstraeten
155f1c7cdf alternative way to cleanup (get rid of initializers) 2023-01-24 08:48:27 +01:00
Cedric Verstraeten
9643a79603 fix mem leak 2023-01-23 10:29:19 +01:00
Cedric Verstraeten
3cc67b485e add datadog for k8s profiling 2023-01-22 15:12:45 +01:00
Cedric Verstraeten
2175d76f94 fixing 2023-01-22 00:46:20 +01:00
Cedric Verstraeten
2a069f8881 fix motion detection + color stream 2023-01-21 23:41:42 +01:00
Cedric Verstraeten
c46765ee8f Update Dockerfile 2023-01-20 20:30:15 +01:00
Cedric Verstraeten
427fad262c fix some build errors 2023-01-20 08:14:38 +01:00
Cedric Verstraeten
a895b24425 several fixes and optimasations
fix static build +jpeg encoding using libjpeg turbo + motion optimisation + process motion on substream when availalbe
2023-01-20 07:35:20 +01:00
Cedric Verstraeten
659ba3e67d armv5 not supported 2023-01-18 23:14:13 +01:00
Cedric Verstraeten
7e394e1c93 try armv5 and armv6 + get rid of bitwiseand and a absdiff 2023-01-18 23:11:01 +01:00
Cedric Verstraeten
886522039a remove bimg 2023-01-18 22:12:35 +01:00
Cedric Verstraeten
155f99597a do not resize (check performance) 2023-01-18 21:39:20 +01:00
Cedric Verstraeten
d67ac25e87 work directly on gray images 2023-01-18 21:03:07 +01:00
Cedric Verstraeten
1fdfddeb43 Update main.go 2023-01-18 18:25:56 +01:00
Cedric Verstraeten
770192ac57 add static build, get rid of dynamic libraries 2023-01-18 17:22:56 +01:00
Cedric Verstraeten
228cfd844a optimisation in motion detection 2023-01-18 09:25:13 +01:00
Cedric Verstraeten
4fbcfbc99d add resizing methods 2023-01-17 22:45:01 +01:00
Cedric Verstraeten
bfb72e8268 get rid of gocv 2023-01-17 19:50:08 +01:00
Cedric Verstraeten
e375c107ef get rid of opencv for encoding 2023-01-17 16:51:59 +01:00
Cedric Verstraeten
99ce644e5d upgrade dependencies 2023-01-17 15:29:41 +01:00
Cedric Verstraeten
05c2d4c583 make sure the stream is ready before we start sending stream over websocket (#48)
fix for https://github.com/kerberos-io/agent/issues/68
2023-01-07 08:39:00 +01:00
Cedric Verstraeten
2265eb88e7 Revert "try static build"
This reverts commit 926e392076.
2023-01-04 15:23:41 +01:00
Cedric Verstraeten
926e392076 try static build 2023-01-03 16:06:30 +01:00
Cédric Verstraeten
7c0cbd4771 Update README.md 2022-12-29 21:31:57 +01:00
Cedric Verstraeten
70d27dc502 add view demo 2022-12-29 21:31:28 +01:00
Cedric Verstraeten
fece388ae9 first release of sending live view (sd) over websockets 2022-12-29 21:16:33 +01:00
Cedric Verstraeten
5543be51da issue prod websocket 2022-12-28 12:59:13 +01:00
Cedric Verstraeten
cf28d69cce get rid of console.logs 2022-12-28 11:55:21 +01:00
Cedric Verstraeten
9ea4a96165 Merge branch 'master' of https://github.com/kerberos-io/agent 2022-12-27 22:54:24 +01:00
Cedric Verstraeten
7bd4934ee7 add websocket client 2022-12-27 22:54:22 +01:00
Cédric Verstraeten
fb5919af99 remove empty spaces 2022-12-27 20:15:10 +01:00
Cédric Verstraeten
8cf8a2e253 balena works with an USB camera 2022-12-27 20:13:18 +01:00
Cédric Verstraeten
8dfd15bc3a remove blank spaces 2022-12-27 20:11:56 +01:00
Cédric Verstraeten
05fab6c7f7 Update README.md 2022-12-27 20:11:21 +01:00
Cedric Verstraeten
9ec52d445d writing additional headers for video/mp4 2022-12-27 11:06:34 +01:00
Cédric Verstraeten
a68aaaa214 add --restart=always option 2022-12-27 10:25:16 +01:00
Cedric Verstraeten
97d2f8fb26 Fix for serving recordings (firefox bug https://github.com/kerberos-io/agent/issues/57) 2022-12-26 20:56:37 +01:00
Cedric Verstraeten
b2bc10865c push multi-arch images to separate repo + make sequential 2022-12-26 20:09:59 +01:00
Cedric Verstraeten
7b6ac61a71 upgrade kerberos-io/onvif 2022-12-26 20:01:34 +01:00
Cedric Verstraeten
27b2842481 reimplement onvif ptz (continuous) + added onvif swagger endpoints 2022-12-26 17:17:38 +01:00
Cedric Verstraeten
8507e84ea0 we'll wait a bit to stop the decoders 2022-12-20 21:49:12 +01:00
Cedric Verstraeten
66df22709f add extra check, might have an empty image 2022-12-20 21:24:14 +01:00
Cedric Verstraeten
c8c9a0fd6e add kerberos factory 2022-12-12 21:05:59 +01:00
Cedric Verstraeten
f6b95ce7f3 few more design tweaks 2022-12-12 20:47:26 +01:00
Cedric Verstraeten
6b31ddcbfc complete kubernetes docs + added openshift 2022-12-12 20:43:27 +01:00
Cedric Verstraeten
73892ee86f update kubernetes resource files 2022-12-09 20:53:41 +01:00
Cedric Verstraeten
1a0458de11 small typo 2022-12-09 18:42:04 +01:00
Cedric Verstraeten
42647172b9 fix 2022-12-09 18:35:02 +01:00
Cedric Verstraeten
e29577e1c5 simplify overview page 2022-12-09 18:34:14 +01:00
Cedric Verstraeten
9012bf2bf0 some more tweaks 2022-12-09 18:27:38 +01:00
Cedric Verstraeten
0fa1943701 add username and password env + add kubernetes deployment 2022-12-09 16:50:40 +01:00
Cedric Verstraeten
a0c1acfd85 add example env variables 2022-12-09 10:56:14 +01:00
Cedric Verstraeten
6413cc82c7 few more tweaks 2022-12-08 23:18:50 +01:00
Cedric Verstraeten
e57908c9b4 small updates 2022-12-08 23:02:57 +01:00
Cedric Verstraeten
c36007ab27 fix title heading 2022-12-08 16:21:32 +01:00
Cedric Verstraeten
928604e12b few more tweaks, agent can be deployed anywhere ;) 2022-12-08 16:19:43 +01:00
Cedric Verstraeten
e33ee6cfa6 add introduction for Docker and Docker Compose in the deployment file 2022-12-08 15:58:52 +01:00
Cedric Verstraeten
517b4b26f2 small fixes to layout! 2022-12-08 15:30:22 +01:00
Cedric Verstraeten
06b3d7e67f make an optional section in prereq 2022-12-08 13:54:56 +01:00
Cedric Verstraeten
508422d461 change introduction wordings 2022-12-08 13:53:45 +01:00
Cedric Verstraeten
528638f69c typo in reference 2022-12-08 13:51:39 +01:00
Cedric Verstraeten
70e7507a15 more tweaks for readme 2022-12-08 13:50:48 +01:00
Cedric Verstraeten
df019cf347 find better wording 2022-12-08 13:47:04 +01:00
Cedric Verstraeten
e88a786116 add small note about volume mounting 2022-12-08 13:42:45 +01:00
Cedric Verstraeten
7242212dd0 make a better reference in the introduction quick start 2022-12-08 13:11:09 +01:00
Cedric Verstraeten
e3dd5fd34b adding running and automating section 2022-12-08 13:09:28 +01:00
Cedric Verstraeten
2b90bdbc0b add documentation for docker compose 2022-12-08 13:01:03 +01:00
Cedric Verstraeten
cd4e9eed3d add references to env. list! 2022-12-08 12:29:58 +01:00
Cedric Verstraeten
0658f9afbb add example docker compose 2022-12-07 23:02:35 +01:00
Cedric Verstraeten
541630a65f fix link 2022-12-07 20:29:10 +01:00
Cedric Verstraeten
4e46f7f4bd small type H264 2022-12-07 20:28:43 +01:00
Cedric Verstraeten
4da3c7a50c Add "is camera working" section 2022-12-07 20:18:57 +01:00
Cedric Verstraeten
47fb5b81ab intiialise deployment directory 2022-12-07 17:34:38 +01:00
Cedric Verstraeten
b27b2f011b another try 2022-12-07 17:19:17 +01:00
Cedric Verstraeten
40ea660609 not wkring as expected 2022-12-07 17:17:17 +01:00
Cedric Verstraeten
d70314d4ab broken link in overview 2022-12-07 17:16:26 +01:00
Cedric Verstraeten
8867c1dde5 update overview 2022-12-07 17:14:48 +01:00
Cedric Verstraeten
a36d53a89b Merge branch 'master' of https://github.com/kerberos-io/agent 2022-12-07 16:45:45 +01:00
Cedric Verstraeten
f634b98552 add environment variables to simplify deployment docker compose and kubernetes 2022-12-07 16:45:28 +01:00
Cédric Verstraeten
3942eb5ec1 Update README.md 2022-11-29 21:51:33 +01:00
Cedric Verstraeten
f45413dab9 Merge branch 'master' of https://github.com/kerberos-io/agent 2022-11-28 16:55:08 +01:00
Cedric Verstraeten
5795a41f92 add note for USB and RPi camera 2022-11-28 16:54:57 +01:00
Cédric Verstraeten
ddc6491058 Update README.md 2022-11-24 13:49:22 +01:00
Cédric Verstraeten
9c940e18d7 Update README.md 2022-11-24 11:06:25 +01:00
Cedric Verstraeten
80b12e229a add boards 2022-11-24 09:33:43 +01:00
Cedric Verstraeten
b9a81a31c9 finetune documentation around agent / factory 2022-11-24 09:31:52 +01:00
Cedric Verstraeten
1bc063fdbb fix navigation 2022-11-24 09:23:13 +01:00
Cedric Verstraeten
98b897e4fe add overview 2022-11-24 09:19:14 +01:00
Cedric Verstraeten
312e501da4 add codespaces 2022-11-24 08:40:02 +01:00
Cédric Verstraeten
0d2e68af3f Update Dockerfile 2022-11-22 10:48:42 +01:00
Cédric Verstraeten
e16d933e56 change dev container base image ;) 2022-11-22 09:40:29 +00:00
Cédric Verstraeten
8ee5b42fde Update Dockerfile 2022-11-22 10:09:12 +01:00
Cédric Verstraeten
05447abe93 Update Dockerfile 2022-11-22 10:05:41 +01:00
Cédric Verstraeten
6c91f233e3 Update Dockerfile 2022-11-22 09:58:37 +01:00
Cédric Verstraeten
feaba1afb5 extract credentials added readme 2022-11-22 05:15:31 +00:00
Cedric Verstraeten
e70a66f7b4 organise into examples dir 2022-11-22 05:55:36 +01:00
Cédric Verstraeten
a78ccd023c add k8s + copilot extension 2022-11-21 22:51:44 +00:00
Cedric Verstraeten
87ac0932c3 add namespace + change task names 2022-11-21 10:16:09 +01:00
Cedric Verstraeten
8e1dcd7002 add k8s and openshift deployment (through ansible) examples 2022-11-21 09:06:02 +01:00
Cedric Verstraeten
1b96d01964 add template config on build, so we could restore at a later time 2022-11-20 20:06:57 +01:00
Cédric Verstraeten
c013308afe small updates to FAQ readme 2022-11-17 21:59:01 +00:00
Cédric Verstraeten
e28d133ef0 add yarn build command 2022-11-17 21:53:01 +00:00
Cédric Verstraeten
e8f03cd7d7 formatting issue + added devcontainer feature ansible! 2022-11-17 21:51:29 +00:00
Cédric Verstraeten
31f7eb5a9a add forwarded ports (ui and machinery) + add post installation commands get dependencies 2022-11-17 21:40:35 +00:00
Cédric Verstraeten
89c39aa853 make some modifications to show what needs to be uncommented for codespaces 2022-11-17 21:19:25 +00:00
Cédric Verstraeten
f349926c77 placeholder issue (recording) + formatting 2022-11-17 21:01:27 +00:00
Cedric Verstraeten
bf0ae4cb64 Revert "fixes"
This reverts commit fb666bb622.
2022-11-17 21:40:38 +01:00
Cédric Verstraeten
fb666bb622 fixes 2022-11-17 20:39:02 +00:00
Cédric Verstraeten
b69b52d292 add empty files 2022-11-17 20:37:08 +00:00
Cedric Verstraeten
d11180f26e create a seperate docker file 2022-11-17 21:20:28 +01:00
Cedric Verstraeten
69461c8c7a Merge branch 'master' of https://github.com/kerberos-io/agent 2022-11-17 21:16:54 +01:00
Cedric Verstraeten
8a373adc2c add devcontainer.json 2022-11-17 21:16:45 +01:00
Cédric Verstraeten
eb3355da9a add discord server 2022-11-08 20:56:51 +01:00
Cédric Verstraeten
2cda3ad929 Merge pull request #55 from robdsource/master
Update french translation
2022-11-04 16:05:07 +01:00
robdsource
7a344a987e Update french translation 2022-11-04 15:36:05 +01:00
Cédric Verstraeten
3ce9adc95c Merge pull request #54 from TeeTeufel/master
Adding german translation
2022-10-20 21:56:39 +02:00
Cedric Verstraeten
e5c71bb6b2 Merge branch 'develop' 2022-10-20 21:32:12 +02:00
Cedric Verstraeten
a021a9e332 broke the pre recording when setting substream 2022-10-20 21:31:56 +02:00
Christian Pätsch
d3bd8031a7 Adding german translation 2022-10-20 21:27:58 +02:00
Cedric Verstraeten
bb18fff7d9 Merge branch 'develop' 2022-10-20 17:45:51 +02:00
Cedric Verstraeten
9ed2873adb fix recording pre 2022-10-20 17:37:25 +02:00
Cedric Verstraeten
f891ac375b change cursors 2022-10-20 15:16:15 +02:00
Cedric Verstraeten
630b34ca27 get rid of console.log 2022-10-17 13:31:06 +02:00
Cedric Verstraeten
ecc7514d2b low+high res streaming, ptz and ONVIF capibilities tesing 2022-10-17 13:25:07 +02:00
Cédric Verstraeten
9e395bb8a7 Merge pull request #53 from robdsource/master
Add french translation
2022-10-11 19:35:14 +02:00
robdsource
85fe51f0f7 Add french translation 2022-10-08 21:16:54 +02:00
Cedric Verstraeten
cb247e0312 Merge branch 'develop' 2022-10-02 21:47:28 +02:00
Cedric Verstraeten
9f933ac9ed new way of computing the uptime 2022-09-29 17:16:49 +02:00
Cedric Verstraeten
b0455c6bda get rid of symbol linking 2022-09-24 21:08:09 +02:00
Cedric Verstraeten
bac247f15e get rid of armv5 for the moment + updated base image 2022-09-23 09:03:48 +02:00
Cedric Verstraeten
50823c8f8a add yarn through npm 2022-09-22 19:41:12 +02:00
Cedric Verstraeten
2b81efb2c5 fix node with nvm 2022-09-22 19:30:37 +02:00
Cedric Verstraeten
013e93be67 install nodejs throught apt-get 2022-09-22 19:17:21 +02:00
Cedric Verstraeten
934ea927b2 try to build armv6 2022-09-22 18:51:34 +02:00
Cedric Verstraeten
e54a4097a1 moved balena to other repo kerberos-io/balena-agent 2022-09-22 18:50:19 +02:00
Cedric Verstraeten
bc98b87dc2 add raspberrypi 2 + change default device 2022-09-22 17:10:48 +02:00
Cedric Verstraeten
1c7883b76a add intel nuc + orange pi + jetson 2022-09-22 14:47:48 +02:00
Cedric Verstraeten
3850a91737 add raspberrypi4 support balena 2022-09-22 14:28:56 +02:00
Cedric Verstraeten
6e8599efda Update balena.yaml 2022-09-22 12:16:09 +02:00
Cedric Verstraeten
1f66eceb3d Update docker-compose.yml 2022-09-22 10:28:08 +02:00
Cedric Verstraeten
5b551d128f Update balena.yaml 2022-09-22 10:07:06 +02:00
Cedric Verstraeten
77373dfd1d try to fix multi arch (old way -> kerberos-io/kerberos-balena) 2022-09-22 09:43:13 +02:00
Cedric Verstraeten
c4fdf7de01 adding docker-compose file 2022-09-22 08:58:40 +02:00
Cedric Verstraeten
89875358b1 trying a cross build on balena 2022-09-21 21:45:43 +02:00
Cedric Verstraeten
70bc2a3af1 add balena fleet build to workflow 2022-09-21 21:36:11 +02:00
Cedric Verstraeten
b228d808db adding balena button 2022-09-21 20:27:15 +02:00
Cedric Verstraeten
7fe3f5fb0f grant root access to bind agent to port 80 2022-09-21 19:25:21 +02:00
Cedric Verstraeten
4daf48a39c change to port 80 2022-09-21 19:12:50 +02:00
Cedric Verstraeten
20a1802bdd add a readme to balena 2022-09-21 16:52:56 +02:00
Cedric Verstraeten
b812b49e11 update logo reference balena 2022-09-21 16:48:43 +02:00
Cedric Verstraeten
30fb0b675b add balena config 2022-09-21 16:04:34 +02:00
Cedric Verstraeten
e6257f4ec6 Update config.yml 2022-09-20 22:04:20 +02:00
Cedric Verstraeten
38b28b965c Merge branch 'develop' 2022-09-20 21:45:09 +02:00
Cedric Verstraeten
16e4c68fb3 add data folder permission validation
agent will not boot up if not having any permissions
2022-09-20 21:37:35 +02:00
Cedric Verstraeten
784fe73a55 add verify connect method 2022-09-20 21:17:08 +02:00
Cedric Verstraeten
3a83ffc7b1 add verifycamera placeholder 2022-09-20 16:17:57 +02:00
Cedric Verstraeten
b6ef1249b3 fix Camera typo 2022-09-20 12:46:35 +02:00
Cedric Verstraeten
c8a863a88d Update translation.json 2022-09-20 12:44:20 +02:00
Cedric Verstraeten
da1f7ab574 add translations 2022-09-20 12:44:15 +02:00
Cédric Verstraeten
6938ecfca2 Merge pull request #46 from olokos/list_videos
Improve recorded videos dasboard counter
2022-09-11 22:35:48 +02:00
olokos
9461fe8874 Improve recorded videos dasboard counter
Implemented new `util` function, which gets count of only `.mp4` extension.
This solves the problem of folders being counted as recordings on the dashboard.
2022-09-09 23:42:30 +02:00
Cedric Verstraeten
a770825648 add more translations 2022-09-08 22:11:14 +02:00
Cedric Verstraeten
5d4a6472d9 Merge branch 'develop' 2022-09-08 16:51:37 +02:00
Cédric Verstraeten
4924bbb7d1 Merge pull request #43 from olokos/fix_pre3
Fix prerecording timing
2022-09-08 13:51:00 +02:00
olokos
56a2558ed8 Try removing GOP +1 2022-09-08 13:45:24 +02:00
olokos
0cb125720b Attempt to fix prerecording 2022-09-07 21:33:03 +02:00
olokos
0b12b7d118 Add logging for prerecording and MaxGopCount 2022-09-07 21:30:37 +02:00
Cédric Verstraeten
5f82a497ca Update main.go 2022-09-06 18:53:33 +02:00
Cedric Verstraeten
a67c84a670 add dutch translation + add placeholders + stop loading when empty media 2022-09-05 22:02:05 +02:00
Cedric Verstraeten
13a93d0432 add auto cleanup configuration 2022-09-05 14:05:46 +02:00
Cedric Verstraeten
9e18d927bf add latest tags 2022-09-05 13:31:43 +02:00
Cedric Verstraeten
44f4d2913c add some kerberos hub branding 2022-09-05 13:29:01 +02:00
Cedric Verstraeten
0caea6cf3c Update README.md 2022-09-05 13:02:36 +02:00
Cedric Verstraeten
026b343b76 Update README.md 2022-09-05 13:00:43 +02:00
Cedric Verstraeten
fea45cf349 Update README.md 2022-09-05 12:57:35 +02:00
Cedric Verstraeten
8a0481da03 fix video width + update base config 2022-09-05 12:43:23 +02:00
Cedric Verstraeten
a32ae05f69 Update config.json 2022-09-05 12:43:04 +02:00
Cedric Verstraeten
19fd33f26a add gif readme 2022-09-05 12:00:39 +02:00
Cedric Verstraeten
3d5c5b62cb language fix, add manual fallback in languageselector 2022-09-05 11:15:56 +02:00
Cedric Verstraeten
793bb28c55 add polish file 2022-09-05 08:25:34 +02:00
Cedric Verstraeten
10d6004f58 hard code 'en' for the time being 2022-09-05 08:24:27 +02:00
Cedric Verstraeten
59af5db610 load language only! 2022-09-04 21:57:40 +02:00
Cedric Verstraeten
d0457cfd7c Update LanguageSelect.jsx 2022-09-04 21:51:09 +02:00
Cedric Verstraeten
856fd8717a Update LanguageSelect.jsx 2022-09-04 21:50:58 +02:00
Cedric Verstraeten
169946dad0 set fallback 2022-09-04 21:50:12 +02:00
Cedric Verstraeten
f2e9e534ca change fallback 2022-09-04 21:46:37 +02:00
Cedric Verstraeten
6cb7906076 move translations not able to be resolved in container 2022-09-04 21:11:51 +02:00
Cedric Verstraeten
68f1de2dbf optimise and make more accurate pre recording 2022-09-04 21:05:18 +02:00
Cedric Verstraeten
a9f5e0d34e fix for language selector 2022-09-04 17:44:50 +02:00
Cedric Verstraeten
c1a958890f update translation 2022-09-04 16:19:17 +02:00
Cedric Verstraeten
8f4c7bbadc Update index.jsx 2022-09-03 22:01:49 +02:00
Cedric Verstraeten
38ee070598 change opacity + add first version of language selector 2022-09-03 22:00:15 +02:00
Cedric Verstraeten
235641cf72 change restart order (on config) + hide time settings when disabled 2022-09-03 20:07:26 +02:00
Cedric Verstraeten
ce4693cb0e add additional check 2022-09-02 15:27:59 +02:00
Thomas Quandalle
a08728d119 prerecording was not take into account 2022-09-01 21:53:42 +02:00
Thomas Quandalle
d50a888bdf Update IPCamera.go 2022-09-01 21:12:32 +02:00
Thomas Quandalle
72fefba530 add recordings page 2022-09-01 19:54:42 +02:00
Thomas Quandalle
f5cb48c2e9 Update config.js 2022-08-31 20:17:56 +02:00
Thomas Quandalle
9477fe9585 fix for hostname 2022-08-31 19:50:19 +02:00
Thomas Quandalle
e1d468cfca hide specific settings when toggled off, small tweak for infobar 2022-08-31 16:58:23 +02:00
Thomas Quandalle
e67500a4fe add link to ui library + camerasonline 2022-08-31 16:37:59 +02:00
Thomas Quandalle
daed0ee33a when in offlineMode, mqttClient can nil + try something for disconnects 2022-08-31 16:25:44 +02:00
Thomas Quandalle
721c8b6c65 cleanup config + smaller bug fixes 2022-08-31 16:09:01 +02:00
Thomas Quandalle
fb7ef7aa97 update dashboard page, add breadcrumb buttons 2022-08-31 15:12:14 +02:00
Thomas Quandalle
cf40b68338 add links + recording threshold 2022-08-31 11:15:31 +02:00
Thomas Quandalle
cdca6d77cb fix heartbeat + add kerberos hub connection kpi 2022-08-31 10:06:56 +02:00
Thomas Quandalle
f2b1f82f59 Update Dashboard.jsx 2022-08-30 20:40:52 +02:00
Thomas Quandalle
963c330c00 Update Dashboard.jsx 2022-08-30 19:29:19 +02:00
Thomas Quandalle
10864fbfe4 move dashboard info to app.jsx 2022-08-30 19:26:03 +02:00
Thomas Quandalle
7423cb36ea fix errors here and there :) 2022-08-30 10:30:21 +02:00
Thomas Quandalle
efbf03858e fix live view + add total recordings + offline mode option 2022-08-29 22:33:55 +02:00
Thomas Quandalle
b37787dd02 fix for config url, live view need some tweaks 2022-08-29 15:19:51 +02:00
Thomas Quandalle
19a680cfd8 add MJPEG and live view on dashboard page (WIP)
snapshots are now stored in color instead of gray
2022-08-29 15:12:17 +02:00
Thomas Quandalle
3de747dbb9 update config 2022-08-27 21:25:33 +02:00
Thomas Quandalle
dbaa2ddcbe add webrtc extra logging 2022-08-27 21:19:48 +02:00
Thomas Quandalle
60be32dea3 Update authentication.js 2022-08-27 15:29:09 +02:00
Thomas Quandalle
6a561c6464 change env 2022-08-27 14:40:49 +02:00
Thomas Quandalle
e62ae2f770 fix for API url (port might be different) 2022-08-27 13:38:47 +02:00
Thomas Quandalle
2fb6625e9a fix prod build 2022-08-26 22:03:45 +02:00
Cédric Verstraeten
018a0baf0a Update README.md 2022-08-26 21:49:02 +02:00
Thomas Quandalle
6182594b6e enterprise agent depending how is running + github action worlflow failed 2022-08-26 21:19:07 +02:00
Thomas Quandalle
e00b45037c add a random key when empty + add verification for hub and vault 2022-08-26 21:13:43 +02:00
Thomas Quandalle
a1c000e84f add a working settings page (Finally) 2022-08-26 18:01:18 +02:00
Thomas Quandalle
c1684bb743 add update config function - less strict linting 2022-08-22 22:48:41 +02:00
Thomas Quandalle
31b9f7022b remove console print - broke build 2022-08-22 12:26:17 +02:00
Thomas Quandalle
6b9f25ef57 a few fixes for settings page + add sub rtsp url (WIP) 2022-08-22 10:50:10 +02:00
Thomas Quandalle
24c3b97629 remove double reading from channel 2022-08-21 20:34:15 +02:00
Cédric Verstraeten
03f7201540 Merge pull request #39 from olokos/develop
Save num of changes to filename, update a lot of dependencies and fix compiling develop branch
2022-08-18 10:21:49 +02:00
olokos
9ffd9700db Revert channel size back to 1
Having the size of 2 gives no profits, but creates a side effect of 1kb recording files being saved, because of message channel being blocked awaiting for the other parameter, giving unexpected results
2022-08-17 23:33:20 +02:00
olokos
87f84e993a Eslint: Fix Unexpected unnamed function func-names
This is the proper and correct new way of using anonymous functions.

Reference: https://stackoverflow.com/questions/52735032/warning-unexpected-unnamed-function-func-names-under-eslint-rule
2022-08-17 21:12:01 +02:00
olokos
23b357e669 Fix build after 3debf62975
Because of missing imports and some undefined variables the latest code wouldn't compile.

I have fixed it.

The only thing I am not sure about is dispatchConfig: PropTypes.bool.isRequired
Not sure if it's truly bool or if this is valid, but it works finally again, but with proper log timezone!
2022-08-17 20:27:26 +02:00
olokos
4021c0dada No idea how those 2 got downgraded, but lets use the latest versions again 2022-08-17 11:50:02 +02:00
olokos
dac55e489a Merge remote-tracking branch 'upstream/develop' into develop 2022-08-17 11:44:31 +02:00
Thomas Quandalle
14e1584ca7 add timezone for logging 2022-08-16 22:59:51 +02:00
Thomas Quandalle
3dd61eea6e Merge branch 'master' into develop 2022-08-15 22:08:41 +02:00
Thomas Quandalle
3debf62975 update settings page 2022-08-15 22:08:08 +02:00
olokos
fb20aa3bf5 Split filename source line into multiple lines, improve logging message 2022-08-15 18:39:41 +02:00
olokos
e4ba5570a7 Use node 16 instead of 14 and use latest kerberos/base 2022-08-15 18:15:16 +02:00
olokos
592e03fc3b Update most of go and javascript dependencies
There were some build issues, also eslint was preventing build because of too old react-scripts, that was in conflict with another requirement for more recent eslint, so I've updated react-scripts, so newer eslint isn't a problem anymore.
2022-08-15 18:11:40 +02:00
Cédric Verstraeten
0b1fd776f4 Merge pull request #36 from olokos/master
Implement changesTreshold parameter and config.json
2022-08-15 08:17:40 +02:00
olokos
098bf0b75f Attempt to fix compilation 2022-08-15 02:52:04 +02:00
olokos
204359c6be MotionData: Implement saving changed pixels into filename
Attempt to fix saving changes detected into filename
models/MotionData.go is meant to be a model containing all of the data in regards to currently recorded video

Since I currently mostly need pixel changed saved in the filename properly, I've created 2 structs:

Partial= Timestamp + Changes working
Full= Everything - Not yet used but prepared for fully implementing the rest of data into the filename

Currently filename only saves proper timestamp and the rest is hardcoded, this is one step forwards to have it fully save a dynamic filename.
2022-08-15 02:15:39 +02:00
olokos
d0d8531537 Change changeTreshhold parameter to pixelChangeThreshold 2022-08-14 01:23:43 +02:00
olokos
1b73f5857d Implement changesTreshold parameter and config.json
In case there is no changesTreshold defined in config.json, then treshold will be 75, to stick with original hardcoded value.
2022-08-13 19:52:19 +02:00
Thomas Quandalle
5fbba44109 Merge branch 'develop' 2022-08-05 09:52:38 +02:00
Thomas Quandalle
99c68c220d add update commit before installing packages 2022-08-05 09:18:53 +02:00
Thomas Quandalle
4d7baecb32 fix hub key for motion detection alert 2022-08-05 09:06:17 +02:00
Thomas Quandalle
1853e4e33d Update main.go 2022-06-27 08:22:28 +02:00
Thomas Quandalle
eacd341cc7 refactor and rename to usbcamera-test 2022-06-27 07:59:15 +02:00
Thomas Quandalle
8b9d4f2eb6 disable window 2022-06-26 22:12:30 +02:00
Thomas Quandalle
fb8f7fc042 get rid of dependency 2022-06-26 21:36:11 +02:00
Thomas Quandalle
ae79b7c248 set x264 and fixed fps 2022-06-26 21:31:00 +02:00
Thomas Quandalle
00e62734c7 disable auto reconnect 2022-06-26 19:48:28 +02:00
Thomas Quandalle
a50f2e5435 add v4l-utils 2022-06-26 19:13:52 +02:00
Thomas Quandalle
3324ea8fa2 add user to video group 2022-06-26 18:08:09 +02:00
Thomas Quandalle
37f904c586 move it to a dedicated folder capture-test 2022-06-26 16:25:14 +02:00
Thomas Quandalle
4071bbfa98 add a webcam-test command to verify if a camera is working 2022-06-26 16:21:01 +02:00
152 changed files with 22828 additions and 5471 deletions

View File

@@ -5,11 +5,11 @@ version: 2
jobs:
machinery:
docker:
- image: kerberos/base:6e68480
- image: kerberos/base:0a50dc9
working_directory: /go/src/github.com/{{ORG_NAME}}/{{REPO_NAME}}
steps:
- checkout
- run: apt-get install -y --no-install-recommends libavcodec-dev libavformat-dev libswscale-dev
- run: apt-get update && apt-get install -y --no-install-recommends libavcodec-dev libavformat-dev libswscale-dev libjpeg62-turbo-dev
- run: cd machinery && go mod download
- run: cd machinery && go test -v ./...
- run: cd machinery && go vet

2
.devcontainer/Dockerfile Normal file
View File

@@ -0,0 +1,2 @@
FROM kerberos/devcontainer:0a50dc9
LABEL AUTHOR=Kerberos.io

View File

@@ -0,0 +1,33 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at:
// https://github.com/microsoft/vscode-dev-containers/tree/v0.245.2/containers/docker-existing-dockerfile
{
"name": "A Dockerfile containing FFmpeg, OpenCV, Go and Yarn",
// Sets the run context to one level up instead of the .devcontainer folder.
"context": "..",
// Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename.
"dockerFile": "./Dockerfile",
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": [
3000,
80
],
// Uncomment the next line to run commands after the container is created - for example installing curl.
"postCreateCommand": "cd ui && yarn install && yarn build && cd ../machinery && go mod download",
"features": {
"ghcr.io/devcontainers-contrib/features/ansible:1": {}
},
"customizations": {
"vscode": {
"extensions": [
"ms-kubernetes-tools.vscode-kubernetes-tools",
"GitHub.copilot"
]
}
},
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
// Uncomment to use the Docker CLI from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker.
// "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ],
// Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root.
// "remoteUser": "vscode"
}

View File

@@ -1,4 +1,4 @@
name: Docker Development build
name: Docker development build
on:
push:
@@ -6,6 +6,8 @@ on:
jobs:
build-amd64:
# If contains the keyword "#release" in the commit message.
if: ${{ !contains(github.event.head_commit.message, '#release') }}
runs-on: ubuntu-latest
strategy:
matrix:
@@ -28,10 +30,15 @@ jobs:
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
- name: Create new and append to manifest
run: docker buildx imagetools create -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
- name: Create new and append to latest manifest
run: docker buildx imagetools create -t kerberos/agent-dev:latest kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
build-other:
# If contains the keyword "#release" in the commit message.
if: ${{ !contains(github.event.head_commit.message, '#release') }}
runs-on: ubuntu-latest
strategy:
matrix:
#architecture: [arm64, arm/v7, arm/v6]
architecture: [arm64, arm/v7]
steps:
- name: Login to DockerHub
@@ -50,4 +57,6 @@ jobs:
- name: Run Buildx
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
- name: Create new and append to manifest
run: docker buildx imagetools create --append -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
run: docker buildx imagetools create --append -t kerberos/agent-dev:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
- name: Create new and append to manifest latest
run: docker buildx imagetools create --append -t kerberos/agent-dev:latest kerberos/agent-dev:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)

View File

@@ -1,4 +1,4 @@
name: Docker Nightly build
name: Docker nightly build
on:
# Triggers the workflow every day at 9PM (CET).
@@ -7,6 +7,8 @@ on:
jobs:
build-amd64:
# If contains the keyword "[release]" in the commit message.
if: "contains(github.event.head_commit.message, '[release]')"
runs-on: ubuntu-latest
strategy:
matrix:
@@ -18,7 +20,7 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Checkout
uses: actions/checkout@v3
run: git clone https://github.com/kerberos-io/agent && cd agent
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -26,14 +28,16 @@ jobs:
- name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }}
- name: Run Buildx
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
run: cd agent && docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
- name: Create new and append to manifest
run: docker buildx imagetools create -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
run: cd agent && docker buildx imagetools create -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
build-other:
# If contains the keyword "[release]" in the commit message.
if: "contains(github.event.head_commit.message, '[release]')"
runs-on: ubuntu-latest
strategy:
matrix:
architecture: [arm64, arm/v7]
architecture: [arm64, arm/v7, arm/v6]
steps:
- name: Login to DockerHub
uses: docker/login-action@v2
@@ -41,7 +45,7 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Checkout
uses: actions/checkout@v3
run: git clone https://github.com/kerberos-io/agent && cd agent
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -49,6 +53,6 @@ jobs:
- name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }}
- name: Run Buildx
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
run: cd agent && docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
- name: Create new and append to manifest
run: docker buildx imagetools create --append -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
run: cd agent && docker buildx imagetools create --append -t kerberos/agent-nightly:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent-nightly:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)

View File

@@ -1,38 +1,85 @@
name: Docker Production build
name: Docker master build
on:
push:
# If pushed to master branch.
branches: [ master ]
env:
REPO: kerberos/agent
jobs:
build-amd64:
# If contains the keyword "[release]" in the commit message.
if: "contains(github.event.head_commit.message, '[release]')"
runs-on: ubuntu-latest
permissions:
contents: write
strategy:
matrix:
architecture: [amd64]
steps:
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Checkout
uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }}
- name: Run Buildx
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
- name: Create new and append to manifest
run: docker buildx imagetools create -t kerberos/agent:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
- name: Login to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Checkout
uses: actions/checkout@v3
- uses: benjlevesque/short-sha@v2.1
id: short-sha
with:
length: 7
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }}
- name: Run Buildx
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}} --push .
- name: Create new and append to manifest
run: docker buildx imagetools create -t $REPO:${{ steps.short-sha.outputs.sha }} $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
- name: Create new and append to manifest latest
run: docker buildx imagetools create -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
- name: Run Buildx with output
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{steps.short-sha.outputs.sha}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
- name: Strip binary
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
# We'll make a GitHub release and push the build (tar) as an artifact
- uses: rickstaa/action-create-tag@v1
with:
tag: ${{ steps.short-sha.outputs.sha }}
message: "Release ${{ steps.short-sha.outputs.sha }}"
- name: Create a release
uses: ncipollo/release-action@v1
with:
latest: true
name: ${{ steps.short-sha.outputs.sha }}
tag: ${{ steps.short-sha.outputs.sha }}
artifacts: "agent-${{matrix.architecture}}.tar"
# Taken from GoReleaser's own release workflow.
# The available Snapcraft Action has some bugs described in the issue below.
# The mkdirs are a hack for https://github.com/goreleaser/goreleaser/issues/1715.
#- name: Setup Snapcraft
# run: |
# sudo apt-get update
# sudo apt-get -yq --no-install-suggests --no-install-recommends install snapcraft
# mkdir -p $HOME/.cache/snapcraft/download
# mkdir -p $HOME/.cache/snapcraft/stage-packages
#- name: Use Snapcraft
# run: tar -xf agent-${{matrix.architecture}}.tar && snapcraft
build-other:
# If contains the keyword "[release]" in the commit message.
if: "contains(github.event.head_commit.message, '[release]')"
runs-on: ubuntu-latest
permissions:
contents: write
needs: build-amd64
strategy:
matrix:
architecture: [arm64, arm/v7]
architecture: [arm64, arm-v7, arm-v6]
#architecture: [arm64, arm-v7]
steps:
- name: Login to DockerHub
uses: docker/login-action@v2
@@ -41,6 +88,10 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Checkout
uses: actions/checkout@v3
- uses: benjlevesque/short-sha@v2.1
id: short-sha
with:
length: 7
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
@@ -48,6 +99,21 @@ jobs:
- name: Available platforms
run: echo ${{ steps.buildx.outputs.platforms }}
- name: Run Buildx
run: docker buildx build --platform linux/${{matrix.architecture}} -t kerberos/agent:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7) --push .
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}} --push .
- name: Create new and append to manifest
run: docker buildx imagetools create --append -t kerberos/agent:$(echo $GITHUB_SHA | cut -c1-7) kerberos/agent:arch-$(echo ${{matrix.architecture}} | tr / -)-$(echo $GITHUB_SHA | cut -c1-7)
run: docker buildx imagetools create --append -t $REPO:${{ steps.short-sha.outputs.sha }} $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
- name: Create new and append to manifest latest
run: docker buildx imagetools create --append -t $REPO:latest $REPO-arch:arch-${{matrix.architecture}}-${{steps.short-sha.outputs.sha}}
- name: Run Buildx with output
run: docker buildx build --platform linux/$(echo ${{matrix.architecture}} | tr - /) -t $REPO-arch:arch-$(echo ${{matrix.architecture}} | tr / -)-${{steps.short-sha.outputs.sha}} --output type=tar,dest=output-${{matrix.architecture}}.tar .
- name: Strip binary
run: mkdir -p output/ && tar -xf output-${{matrix.architecture}}.tar -C output && rm output-${{matrix.architecture}}.tar && cd output/ && tar -cf ../agent-${{matrix.architecture}}.tar -C home/agent . && rm -rf output
- name: Create a release
uses: ncipollo/release-action@v1
with:
latest: true
allowUpdates: true
name: ${{ steps.short-sha.outputs.sha }}
tag: ${{ steps.short-sha.outputs.sha }}
artifacts: "agent-${{matrix.architecture}}.tar"

View File

@@ -7,16 +7,17 @@ on:
branches: [ develop, master ]
jobs:
build:
name: Build
runs-on: ubuntu-latest
container:
image: kerberos/base:6e68480
image: kerberos/base:0a50dc9
strategy:
matrix:
go-version: [1.17, 1.18]
#No longer supported Go versions.
#go-version: ['1.17', '1.18', '1.19']
go-version: ['1.20', '1.21']
steps:
- name: Set up Go ${{ matrix.go-version }}
@@ -24,9 +25,9 @@ jobs:
with:
go-version: ${{ matrix.go-version }}
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Install dependencies
run: apt install -y --no-install-recommends git build-essential cmake pkg-config unzip libgtk2.0-dev curl ca-certificates libcurl4-openssl-dev libssl-dev libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev
uses: actions/checkout@v4
- name: Set up git ownershi
run: git config --system --add safe.directory /__w/agent/agent
- name: Get dependencies
run: cd machinery && go mod download
- name: Build

8
.gitignore vendored
View File

@@ -1,8 +1,14 @@
ui/node_modules
ui/build
ui/public/assets/env.js
.idea
machinery/www
yarn.lock
machinery/data/config
machinery/data/cloud
machinery/data/recordings
machinery/data/recordings
machinery/data/snapshots
machinery/test*
machinery/init-dev.sh
machinery/.env
deployments/docker/private-docker-compose.yaml

View File

@@ -1,4 +1,5 @@
FROM kerberos/base:6e68480 AS builder
FROM kerberos/base:0a50dc9 AS build-machinery
LABEL AUTHOR=Kerberos.io
ENV GOROOT=/usr/local/go
@@ -9,11 +10,9 @@ ENV GOSUMDB=off
##########################################
# Installing some additional dependencies.
RUN apt-get update && apt-get install -y --no-install-recommends \
RUN apt-get upgrade -y && apt-get update && apt-get install -y --fix-missing --no-install-recommends \
git build-essential cmake pkg-config unzip libgtk2.0-dev \
curl ca-certificates libcurl4-openssl-dev libssl-dev \
libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev \
libjpeg-dev libpng-dev libtiff-dev libdc1394-22-dev && \
curl ca-certificates libcurl4-openssl-dev libssl-dev libjpeg62-turbo-dev && \
rm -rf /var/lib/apt/lists/*
##############################################################################
@@ -21,36 +20,29 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
RUN mkdir -p /go/src/github.com/kerberos-io/agent
COPY machinery /go/src/github.com/kerberos-io/agent/machinery
COPY ui /go/src/github.com/kerberos-io/agent/ui
########################
# Download NPM and Yarns
RUN apt-get update && apt-get install -y curl && curl -sL https://deb.nodesource.com/setup_14.x | bash - && \
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && \
echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list && \
apt update && apt install yarn -y
RUN rm -rf /go/src/github.com/kerberos-io/agent/machinery/.env
##################################################################
# Build Web
# this will move the /build directory to ../machinery/www
RUN cd /go/src/github.com/kerberos-io/agent/ui && yarn && yarn build
# Get the latest commit hash, so we know which version we're running
COPY .git /go/src/github.com/kerberos-io/agent/.git
RUN cd /go/src/github.com/kerberos-io/agent/.git && git log --format="%H" -n 1 | head -c7 > /go/src/github.com/kerberos-io/agent/machinery/version
RUN cat /go/src/github.com/kerberos-io/agent/machinery/version
##################
# Build Machinery
RUN cd /go/src/github.com/kerberos-io/agent/machinery && \
go mod download && \
go build main.go && \
go build -tags timetzdata,netgo,osusergo --ldflags '-s -w -extldflags "-static -latomic"' main.go && \
mkdir -p /agent && \
mv main /agent && \
mv www /agent && \
mv version /agent && \
mv data /agent && \
mkdir -p /agent/data/cloud && \
mkdir -p /agent/data/snapshots && \
mkdir -p /agent/data/log && \
mkdir -p /agent/data/recordings && \
mkdir -p /agent/data/capture-test && \
mkdir -p /agent/data/config && \
rm -rf /go/src/gitlab.com/
@@ -64,79 +56,78 @@ RUN cp -r /agent ./
####################################################################################
# This will collect dependent libraries so they're later copied to the final image.
RUN /agent/main version
RUN ldd /agent/main | tr -s '[:blank:]' '\n'
RUN ldd /agent/main | tr -s '[:blank:]' '\n' | grep '^/' | \
xargs -I % sh -c 'mkdir -p $(dirname ./%); cp % ./%;'
RUN /dist/agent/main version
##########################################################
# LDD doesnt always work in docker buildx (no idea why..)
# Therefore we are moving some libraries manually
###############################################
# Build Bento4 -> we want fragmented mp4 files
RUN mkdir -p ./usr/lib
ENV BENTO4_VERSION 1.6.0-639
RUN cd /tmp && git clone https://github.com/axiomatic-systems/Bento4 && cd Bento4 && \
git checkout tags/v${BENTO4_VERSION} && \
cd Build && \
cmake -DCMAKE_BUILD_TYPE=Release .. && \
make && \
mv /tmp/Bento4/Build/mp4fragment /dist/agent/ && \
rm -rf /tmp/Bento4
RUN [ -f /lib64/ld-linux-x86-64.so.2 ] && $(mkdir -p lib64 && \
cp /lib64/ld-linux-x86-64.so.2 lib64/) || echo "nothing to do here x86"
FROM node:18.14.0-alpine3.16 AS build-ui
RUN [ -f /lib/ld-linux-aarch64.so.1 ] && $(mkdir -p lib/aarch64-linux-gnu && \
cp /lib/ld-linux-aarch64.so.1 lib/ && \
cp /lib/aarch64-linux-gnu/lib* lib/aarch64-linux-gnu/ && \
cp /usr/lib/aarch64-linux-gnu/libopencv* usr/lib && \
cp /usr/lib/aarch64-linux-gnu/libstdc* usr/lib && \
cp /usr/lib/aarch64-linux-gnu/libx264* usr/lib ) || echo "nothing to do here arm64"
RUN apk update && apk upgrade --available && sync
RUN [ -f /usr/lib/arm-linux-gnueabihf/vfp/neon/libvpx.so.6 ] && \
$(cp /usr/lib/arm-linux-gnueabihf/vfp/neon/libvpx.so.6 ./usr/lib/) || echo "nothing to do here armv7"
########################
# Build Web (React app)
RUN cp -r /usr/local/lib/libavcodec* ./usr/lib && \
cp -r /usr/local/lib/libavformat* ./usr/lib && \
cp -r /usr/local/lib/libavfilter* ./usr/lib && \
cp -r /usr/local/lib/libavutil* ./usr/lib && \
cp -r /usr/local/lib/libavresample* ./usr/lib && \
cp -r /usr/local/lib/libavdevice* ./usr/lib && \
cp -r /usr/local/lib/libswscale* ./usr/lib && \
cp -r /usr/local/lib/libswresample* ./usr/lib && \
cp -r /usr/local/lib/libpostproc* ./usr/lib
RUN mkdir -p /go/src/github.com/kerberos-io/agent/machinery/www
COPY ui /go/src/github.com/kerberos-io/agent/ui
RUN cd /go/src/github.com/kerberos-io/agent/ui && rm -rf yarn.lock && yarn config set network-timeout 300000 && \
yarn && yarn build
# As mentioned before, above is really a hack as LDD
# doesn't work always in docker buildx. You might not need this
# when doing a local build.
################################################################
####################################
# Let's create a /dist folder containing just the files necessary for runtime.
# Later, it will be copied as the / (root) of the output image.
WORKDIR /dist
RUN mkdir -p ./agent && cp -r /go/src/github.com/kerberos-io/agent/machinery/www ./agent/
############################################
# Publish main binary to GitHub release
FROM alpine:latest
############################
# Protect by non-root user.
RUN addgroup -S kerberosio && adduser -S agent -G kerberosio
RUN addgroup -S kerberosio && adduser -S agent -G kerberosio && addgroup agent video
#################################
# Copy files from previous images
COPY --chown=0:0 --from=builder /dist /
COPY --chown=0:0 --from=builder /usr/local/go/lib/time/zoneinfo.zip /zoneinfo.zip
COPY --chown=0:0 --from=build-machinery /dist /
COPY --chown=0:0 --from=build-ui /dist /
ENV ZONEINFO=/zoneinfo.zip
RUN apk update && apk add ca-certificates --no-cache && \
apk add tzdata --no-cache && apk add curl --no-cache && rm -rf /var/cache/apk/*
#################
# Install Bento4
RUN cd && wget https://www.bok.net/Bento4/binaries/Bento4-SDK-1-6-0-639.x86_64-unknown-linux.zip && \
unzip Bento4-SDK-1-6-0-639.x86_64-unknown-linux.zip && rm Bento4-SDK-1-6-0-639.x86_64-unknown-linux.zip && \
cp ~/Bento4-SDK-1-6-0-639.x86_64-unknown-linux/bin/mp4fragment /usr/bin/
RUN apk update && apk add ca-certificates curl libstdc++ libc6-compat --no-cache && rm -rf /var/cache/apk/*
##################
# Try running agent
RUN mv /agent/* /home/agent/
RUN cp /home/agent/mp4fragment /usr/local/bin/
RUN /home/agent/main version
#######################
# Make template config
RUN cp /home/agent/data/config/config.json /home/agent/data/config.template.json
###########################
# Set permissions correctly
RUN chown -R agent:kerberosio /home/agent/data
RUN chown -R agent:kerberosio /home/agent/www
###########################
# Grant the necessary root capabilities to the process trying to bind to the privileged port
RUN apk add libcap && setcap 'cap_net_bind_service=+ep' /home/agent/main
###################
# Run non-root user
@@ -144,17 +135,17 @@ RUN chown -R agent:kerberosio /home/agent/data
USER agent
######################################
# By default the app runs on port 8080
# By default the app runs on port 80
EXPOSE 8080
EXPOSE 80
######################################
# Check if agent is still running
HEALTHCHECK CMD curl --fail http://localhost:8080 || exit 1
HEALTHCHECK CMD curl --fail http://localhost:80 || exit 1
###################################################
# Leeeeettttt'ssss goooooo!!!
# Run the shizzle from the right working directory.
WORKDIR /home/agent
CMD ["./main", "run", "opensource", "8080"]
CMD ["./main", "-action", "run", "-port", "80"]

385
README.md
View File

@@ -2,8 +2,6 @@
<a target="_blank" href="https://kerberos.io"><img src="https://img.shields.io/badge/kerberos-website-gray.svg?longCache=true&colorB=brightgreen" alt="Kerberos Agent"></a>
<a target="_blank" href="https://doc.kerberos.io"><img src="https://img.shields.io/badge/kerberos-documentation-gray.svg?longCache=true&colorB=brightgreen" alt="Kerberos Agent"></a>
<a target="_blank" href="https://twitter.com/kerberosio?ref_src=twsrc%5Etfw"><img src="https://img.shields.io/twitter/url.svg?label=Follow%20%40kerberosio&style=social&url=https%3A%2F%2Ftwitter.com%2Fkerberosio" alt="Twitter Widget"></a>
<a target="_blank" href="https://join.slack.com/t/kerberosio/shared_invite/zt-1a5oj4pwm-O4qCAN9c5r2um0Ns0ge8ww"><img src="https://img.shields.io/badge/slack-@kerberosio-yellow.svg?logo=slack " alt="Kerberos.io"></a>
<a target="_blank" href="https://circleci.com/gh/kerberos-io/agent"><img src="https://circleci.com/gh/kerberos-io/agent.svg?style=svg"/></a>
<img src="https://github.com/kerberos-io/agent/workflows/Go/badge.svg"/>
@@ -17,71 +15,323 @@
<a target="_blank" href="https://www.figma.com/proto/msuYC6sv2cOCqZeDtBxNy7/%5BNEW%5D-Kerberos.io-Apps?node-id=1%3A1788&viewport=-490%2C191%2C0.34553584456443787&scaling=min-zoom&page-id=1%3A2%3Ffuid%3D449684443467913607" alt="Kerberos Agent"></a>
<a href="LICENSE"><img src="https://img.shields.io/badge/License-MIT-yellow.svg" alt="License: MIT"></a>
[![donate](
https://brianmacdonald.github.io/Ethonate/svg/eth-donate-blue.svg)](
https://brianmacdonald.github.io/Ethonate/address#0xf4a759C9436E2280Ea9cdd23d3144D95538fF4bE)
[![donate](https://brianmacdonald.github.io/Ethonate/svg/eth-donate-blue.svg)](https://brianmacdonald.github.io/Ethonate/address#0xf4a759C9436E2280Ea9cdd23d3144D95538fF4bE)
<a target="_blank" href="https://twitter.com/kerberosio?ref_src=twsrc%5Etfw"><img src="https://img.shields.io/twitter/url.svg?label=Follow%20%40kerberosio&style=social&url=https%3A%2F%2Ftwitter.com%2Fkerberosio" alt="Twitter Widget"></a>
[![Discord Shield](https://discordapp.com/api/guilds/1039619181731135499/widget.png?style=shield)](https://discord.gg/Bj77Vqfp2G)
[![kerberosio](https://snapcraft.io/kerberosio/badge.svg)](https://snapcraft.io/kerberosio)
[**Docker Hub**](https://hub.docker.com/r/kerberos/agent) | [**Documentation**](https://doc.kerberos.io) | [**Website**](https://kerberos.io)
[**Docker Hub**](https://hub.docker.com/r/kerberos/agent) | [**Documentation**](https://doc.kerberos.io) | [**Website**](https://kerberos.io) | [**View Demo**](https://demo.kerberos.io)
Kerberos Agent is a cutting edge video surveillance management system made available as Open Source under the MIT License. This means that all the source code is available for you or your company, and you can use, transform and distribute the source code; as long you keep a reference of the original license. Kerberos Agent can be used for commercial usage (which was not the case for v2). Read more [about the license here](LICENSE).
Kerberos Agent is an isolated and scalable video (surveillance) management agent made available as Open Source under the MIT License. This means that all the source code is available for you or your company, and you can use, transform and distribute the source code; as long you keep a reference of the original license. Kerberos Agent can be used for commercial usage (which was not the case for v2). Read more [about the license here](LICENSE).
![login-agent](./assets/img/agent-login.png)
## Support our project
![Kerberos Agent go through UI](./assets/img/kerberos-agent-overview.gif)
If you like our product please feel free to execute an Ethereum donation. All donations will flow back and split to our Open Source contributors, as they are the heart of this community.
<img width="272" alt="Ethereum donation linke" src="https://user-images.githubusercontent.com/1546779/173443671-3d773068-ae10-4862-a990-dc7c89f3d9c2.png">
## :thinking: Prerequisites
Ethereum Address: `0xf4a759C9436E2280Ea9cdd23d3144D95538fF4bE`
## Work In Progress
- An IP camera which supports a RTSP H264 or H265 encoded stream,
- (or) a USB camera, Raspberry Pi camera or other camera, that [you can transform to a valid RTSP H264 or H265 stream](https://github.com/kerberos-io/camera-to-rtsp).
- Any hardware (ARMv6, ARMv7, ARM64, AMD) that can run a binary or container, for example: a Raspberry Pi, NVidia Jetson, Intel NUC, a VM, Bare metal machine or a full blown Kubernetes cluster.
Kerberos Agent (v3) is not yet released, and is actively developed. You can follow the progress [on our project board](https://github.com/kerberos-io/agent/projects/1) and review our designs at [Figma](https://www.figma.com/proto/msuYC6sv2cOCqZeDtBxNy7/%5BNEW%5D-Kerberos.io-Apps?node-id=1%3A1788&viewport=-490%2C191%2C0.34553584456443787&scaling=min-zoom&page-id=1%3A2%3Ffuid%3D449684443467913607). Feel free to give any feedback.
## :video_camera: Is my camera working?
## Previous releases
There are a myriad of cameras out there (USB, IP and other cameras), and it might be daunting to know if Kerberos Agent will work for your camera. [Therefore we are listing all the camera models that are acknowlegded by the community](https://github.com/kerberos-io/agent/issues/59). Feel free to add your camera to the list as well!
This repository contains the next generation of Kerberos.io, **Kerberos Agent (v3)**, and is the successor of the machinery and web repositories. A switch in technologies and architecture has been made. This version is still under active development and can be followed on the [develop branch](https://github.com/kerberos-io/agent/tree/develop) and [project overview](https://github.com/kerberos-io/agent/projects/1).
## :books: Overview
Read more about this [at the FAQ](#faq) below.
### Up and running in no time
![opensource-to-agent](https://user-images.githubusercontent.com/1546779/172066873-7752c979-de63-4417-8d26-34192fdbd1e6.svg)
## Introduction
1. [Quickstart - Docker](#quickstart---docker)
2. [Quickstart - Balena](#quickstart---balena)
3. [Quickstart - Snap](#quickstart---snap)
Kerberos.io is a cutting edge video surveillance system with a strong focus on user experience, scalability, resilience, extension and integration. Kerberos.io provides different solutions, but from a high level point of view it comes into two flavours: Kerberos Agent and Kerberos Enterprise Suite. Bottom line Kerberos Enterprise Suite extends Kerberos Agent with additional components such as Kerberos Factory, Kerberos Vault and Kerberos Hub.
### Introduction
### Kerberos Agent
1. [A world of Kerberos Agents](#a-world-of-kerberos-agents)
- Installation in seconds (Kerberos Etcher, Docker, Binaries).
### Running and automation
1. [How to run and deploy a Kerberos Agent](#how-to-run-and-deploy-a-kerberos-agent)
2. [Access the Kerberos Agent](#access-the-kerberos-agent)
3. [Configure and persist with volume mounts](#configure-and-persist-with-volume-mounts)
4. [Configure with environment variables](#configure-with-environment-variables)
### Insights
1. [Encryption](#encryption)
2. [H264 vs H265](#h264-vs-h265)
### Contributing
1. [Contribute with Codespaces](#contribute-with-codespaces)
2. [Develop and build](#develop-and-build)
3. [Building from source](#building-from-source)
4. [Building for Docker](#building-for-docker)
### Varia
1. [Support our project](#support-our-project)
1. [What is new?](#what-is-new)
1. [Contributors](#contributors)
## Quickstart - Docker
The easiest to get your Kerberos Agent up and running is to use our public image on [Docker hub](https://hub.docker.com/r/kerberos/agent). Once you have selected a specific tag, run below `docker` command, which will open the web interface of your Kerberos agent on port `80`, and off you go. For a more configurable and persistent deployment have a look at [Running and automating a Kerberos Agent](#running-and-automating-a-kerberos-agent).
docker run -p 80:80 --name mycamera -d --restart=always kerberos/agent:latest
If you want to connect to an USB or Raspberry Pi camera, [you'll need to run our side car container](https://github.com/kerberos-io/camera-to-rtsp) which proxy the camera to an RTSP stream. In that case you'll want to configure the Kerberos Agent container to run in the host network, so it can connect directly to the RTSP sidecar.
docker run --network=host --name mycamera -d --restart=always kerberos/agent:latest
## Quickstart - Balena
Run Kerberos Agent with [Balena Cloud](https://www.balena.io/) super powers. Monitor your Kerberos Agent with seamless remote access, over the air updates, an encrypted public `https` endpoint and many more. Checkout our application `video-surveillance` on [Balena Hub](https://hub.balena.io/apps/2064752/video-surveillance), and create your first or fleet of Kerberos Agent(s).
[![deploy with balena](https://balena.io/deploy.svg)](https://dashboard.balena-cloud.com/deploy?repoUrl=https://github.com/kerberos-io/balena-agent)
## Quickstart - Snap
Run Kerberos Agent with our [Snapcraft package](https://snapcraft.io/kerberosio).
snap install kerberosio
Once installed you can find your Kerberos Agent configration at `/var/snap/kerberosio/common`. Run the Kerberos Agent as following
sudo kerberosio.agent -action=run -port=80
## A world of Kerberos Agents
The Kerberos Agent is an isolated and scalable video (surveillance) management agent with a strong focus on user experience, scalability, resilience, extension and integration. Next to the Kerberos Agent, Kerberos.io provides many other tools such as [Kerberos Factory](https://github.com/kerberos-io/factory), [Kerberos Vault](https://github.com/kerberos-io/vault) and [Kerberos Hub](https://github.com/kerberos-io/hub) to provide additional capabilities: bring your own cloud, bring your own storage, central overview, live streaming, machine learning etc.
As mentioned above Kerberos.io applies the concept of agents. An agent is running next to (or on) your camera, and is processing a single camera feed. It applies motion based or continuous recording and make those recordings available through a user friendly web interface. A Kerberos Agent allows you to connect to other cloud services or integrates with custom applications. Kerberos Agent is used for personal usage and scales to enterprise production level deployments.
This repository contains everything you'll need to know about our core product, Kerberos Agent. Below you'll find a brief list of features and functions.
- Low memory and CPU usage.
- Simplified and modern user interface.
- Multi architecture (ARMv7, ARMv8, amd64, etc).
- Multi camera support: IP Cameras (MJPEG/H264), USB cameras, Raspberry Pi Cameras.
- Single camera per instance (e.g. One Docker container per camera).
- Cloud integration through Webhooks, MQTT, etc.
- Cloud storage through Kerberos Hub.
- Multi architecture (ARMv7, ARMv8, amd64, etc).).
- Multi stream, for example recording in H265, live streaming and motion detection in H264.
- Multi camera support: IP Cameras (H264 and H265), USB cameras and Raspberry Pi Cameras [through a RTSP proxy](https://github.com/kerberos-io/camera-to-rtsp).
- Single camera per instance (e.g. one container per camera).
- Low resolution streaming through MQTT and high resolution streaming through WebRTC (only supports H264/PCM).
- Backchannel audio from Kerberos Hub to IP camera (requires PCM ULAW codec)
- Audio (AAC) and video (H264/H265) recording in MP4 container.
- End-to-end encryption through MQTT using RSA and AES (livestreaming, ONVIF, remote configuration, etc)
- Conditional recording: offline mode, motion region, time table, continuous recording, webhook condition etc.
- Post- and pre-recording for motion detection.
- Encryption at rest using AES-256-CBC.
- Ability to create fragmented recordings, and streaming through HLS fMP4.
- [Deploy where you want](#how-to-run-and-deploy-a-kerberos-agent) with the tools you use: `docker`, `docker compose`, `ansible`, `terraform`, `kubernetes`, etc.
- Cloud storage/persistance: Kerberos Hub, Kerberos Vault and Dropbox. [(WIP: Minio, Storj, Google Drive, FTP etc.)](https://github.com/kerberos-io/agent/issues/95)
- Outputs: trigger an integration (Webhooks, MQTT, Script, etc) when a specific event (motion detection or start recording ) occurs
- REST API access and documentation through Swagger (trigger recording, update configuration, etc).
- MIT License
### Kerberos Factory (part of [Kerberos Enterprise suite](https://doc.kerberos.io/enterprise/first-things-first/))
## How to run and deploy a Kerberos Agent
- Installation on top of Kubernetes (K8S).
- Camera support for IP camera only (RTSP/H264).
- Massive horizontal scaling, thanks to Kubernetes.
- Management of multiple Kerberos Agents through a single pane of glass.
- Low memory and CPU intensive.
- Modular and extensible design for building own extensions and integrations (e.g. a video analytics platform).
- Commercial licensed and closed source.
As described before a Kerberos Agent is a container, which can be deployed through various ways and automation tools such as `docker`, `docker compose`, `kubernetes` and the list goes on. To simplify your life we have come with concrete and working examples of deployments to help you speed up your Kerberos.io journey.
## How it works: A world of Agents 🕵🏼‍♂️
We have documented the different deployment models [in the `deployments` directory](https://github.com/kerberos-io/agent/tree/master/deployments) of this repository. There you'll learn and find how to deploy using:
Kerberos.io applies the concept of agents. An agent is running next to or on your camera, and is processing a single camera feed. It applies motion based recording and make those recordings available through a user friendly web interface. Kerberos Agent allows you to connect to other cloud services or custom applications. Kerberos Agent is perfect for personal usage and/or is a great tool if you only have a couple of surveillance cameras to be processed.
- [Static binary](https://github.com/kerberos-io/agent/tree/master/deployments#0-static-binary)
- [Docker](https://github.com/kerberos-io/agent/tree/master/deployments#1-docker)
- [Docker Compose](https://github.com/kerberos-io/agent/tree/master/deployments#2-docker-compose)
- [Kubernetes](https://github.com/kerberos-io/agent/tree/master/deployments#3-kubernetes)
- [Red Hat OpenShift with Ansible](https://github.com/kerberos-io/agent/tree/master/deployments#4-red-hat-ansible-and-openshift)
- [Terraform](https://github.com/kerberos-io/agent/tree/master/deployments#5-terraform)
- [Salt](https://github.com/kerberos-io/agent/tree/master/deployments#6-salt)
- [Balena](https://github.com/kerberos-io/agent/tree/master/deployments#8-balena)
- [Snap](https://github.com/kerberos-io/agent/tree/master/deployments#9-snap)
If you are looking for a solution that scales better with your video surveillance and/or video analytics requirements, [Kerberos Enterprise Suite might be a better fit](https://doc.kerberos.io/enterprise/first-things-first).
By default your Kerberos Agents will store all its configuration and recordings inside the container. To help you automate and have a more consistent data governance, you can attach volumes to configure and persist data of your Kerberos Agents, and/or configure each Kerberos Agent through environment variables.
## Installation
Kerberos Agent **will ship in different formats**: Docker, binary, snap, KiOS. Version 3 is still in active development right now, and not yet released.
## Access the Kerberos Agent
## Run and develop
Once you have deployed the Kerberos Agent, using one of the deployment models described above, you will be able to access the Kerberos Agent user interface. A login page is presented asking for some credentials.
The default username and password for the Kerberos Agent is:
- Username: `root`
- Password: `root`
**_Please note that you change the username and password for a final installation, see [Configure with environment variables](#configure-with-environment-variables) below._**
## Configure and persist with volume mounts
An example of how to mount a host directory is shown below using `docker`, but is applicable for [all the deployment models and tools described above](#running-and-automating-a-kerberos-agent).
You attach a volume to your container by leveraging the `-v` option. To mount your own configuration file and recordings folder, execute as following:
docker run -p 80:80 --name mycamera \
-v $(pwd)/agent/config:/home/agent/data/config \
-v $(pwd)/agent/recordings:/home/agent/data/recordings \
-d --restart=always kerberos/agent:latest
More example [can be found in the deployment section](https://github.com/kerberos-io/agent/tree/master/deployments) for each deployment and automation tool. Please note to verify the permissions of the directory/volume you are attaching. More information in [this issue](https://github.com/kerberos-io/agent/issues/80).
chmod -R 755 kerberos-agent/
chown 100:101 kerberos-agent/ -R
## Configure with environment variables
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deployments easier when leveraging `docker compose` or `kubernetes` deployments much easier and scalable. Using this approach we simplify automation through `ansible` and `terraform`.
docker run -p 80:80 --name mycamera \
-e AGENT_NAME=mycamera \
-e AGENT_TIMEZONE=Europe/Brussels \
-e AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://fake.kerberos.io/stream \
-e AGENT_CAPTURE_CONTINUOUS=true \
-d --restart=always kerberos/agent:latest
| Name | Description | Default Value |
| --------------------------------------- | ----------------------------------------------------------------------------------------------- | ------------------------------ |
| `LOG_LEVEL` | Level for logging, could be "info", "warning", "debug", "error" or "fatal". | "info" |
| `LOG_OUTPUT` | Logging output format "json" or "text". | "text" |
| `AGENT_MODE` | You can choose to run this in 'release' for production, and or 'demo' for showcasing. | "release" |
| `AGENT_TLS_INSECURE` | Specify if you want to use `InsecureSkipVerify` for the internal HTTP client. | "false" |
| `AGENT_USERNAME` | The username used to authenticate against the Kerberos Agent login page. | "root" |
| `AGENT_PASSWORD` | The password used to authenticate against the Kerberos Agent login page. | "root" |
| `AGENT_KEY` | A unique identifier for your Kerberos Agent, this is auto-generated but can be overriden. | "" |
| `AGENT_NAME` | The agent friendly-name. | "agent" |
| `AGENT_TIMEZONE` | Timezone which is used for converting time. | "Africa/Ceuta" |
| `AGENT_REMOVE_AFTER_UPLOAD` | When enabled, recordings uploaded successfully to a storage will be removed from disk. | "true" |
| `AGENT_OFFLINE` | Makes sure no external connection is made. | "false" |
| `AGENT_AUTO_CLEAN` | Cleans up the recordings directory. | "true" |
| `AGENT_AUTO_CLEAN_MAX_SIZE` | If `AUTO_CLEAN` enabled, set the max size of the recordings directory in (MB). | "100" |
| `AGENT_TIME` | Enable the timetable for Kerberos Agent | "false" |
| `AGENT_TIMETABLE` | A (weekly) time table to specify when to make recordings "start1,end1,start2,end2;start1.. | "" |
| `AGENT_REGION_POLYGON` | A single polygon set for motion detection: "x1,y1;x2,y2;x3,y3;... | "" |
| `AGENT_CAPTURE_IPCAMERA_RTSP` | Full-HD RTSP endpoint to the camera you're targetting. | "" |
| `AGENT_CAPTURE_IPCAMERA_SUB_RTSP` | Sub-stream RTSP endpoint used for livestreaming (WebRTC). | "" |
| `AGENT_CAPTURE_IPCAMERA_ONVIF` | Mark as a compliant ONVIF device. | "" |
| `AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR` | ONVIF endpoint/address running on the camera. | "" |
| `AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME` | ONVIF username to authenticate against. | "" |
| `AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD` | ONVIF password to authenticate against. | "" |
| `AGENT_CAPTURE_MOTION` | Toggle for enabling or disabling motion. | "true" |
| `AGENT_CAPTURE_LIVEVIEW` | Toggle for enabling or disabling liveview. | "true" |
| `AGENT_CAPTURE_SNAPSHOTS` | Toggle for enabling or disabling snapshot generation. | "true" |
| `AGENT_CAPTURE_RECORDING` | Toggle for enabling making recordings. | "true" |
| `AGENT_CAPTURE_CONTINUOUS` | Toggle for enabling continuous "true" or motion "false". | "false" |
| `AGENT_CAPTURE_PRERECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) before after motion event. | "10" |
| `AGENT_CAPTURE_POSTRECORDING` | If `CONTINUOUS` set to `false`, specify the recording time (seconds) after motion event. | "20" |
| `AGENT_CAPTURE_MAXLENGTH` | The maximum length of a single recording (seconds). | "30" |
| `AGENT_CAPTURE_PIXEL_CHANGE` | If `CONTINUOUS` set to `false`, the number of pixel require to change before motion triggers. | "150" |
| `AGENT_CAPTURE_FRAGMENTED` | Set the format of the recorded MP4 to fragmented (suitable for HLS). | "false" |
| `AGENT_CAPTURE_FRAGMENTED_DURATION` | If `AGENT_CAPTURE_FRAGMENTED` set to `true`, define the duration (seconds) of a fragment. | "8" |
| `AGENT_MQTT_URI` | A MQTT broker endpoint that is used for bi-directional communication (live view, onvif, etc) | "tcp://mqtt.kerberos.io:1883" |
| `AGENT_MQTT_USERNAME` | Username of the MQTT broker. | "" |
| `AGENT_MQTT_PASSWORD` | Password of the MQTT broker. | "" |
| `AGENT_STUN_URI` | When using WebRTC, you'll need to provide a STUN server. | "stun:turn.kerberos.io:8443" |
| `AGENT_TURN_URI` | When using WebRTC, you'll need to provide a TURN server. | "turn:turn.kerberos.io:8443" |
| `AGENT_TURN_USERNAME` | TURN username used for WebRTC. | "username1" |
| `AGENT_TURN_PASSWORD` | TURN password used for WebRTC. | "password1" |
| `AGENT_CLOUD` | Store recordings in Kerberos Hub (s3), Kerberos Vault (kstorage) or Dropbox (dropbox). | "s3" |
| `AGENT_HUB_ENCRYPTION` | Turning on/off encryption of traffic from your Kerberos Agent to Kerberos Hub. | "true" |
| `AGENT_HUB_URI` | The Kerberos Hub API, defaults to our Kerberos Hub SAAS. | "https://api.hub.domain.com" |
| `AGENT_HUB_KEY` | The access key linked to your account in Kerberos Hub. | "" |
| `AGENT_HUB_PRIVATE_KEY` | The secret access key linked to your account in Kerberos Hub. | "" |
| `AGENT_HUB_REGION` | The Kerberos Hub region, to which you want to upload. | "" |
| `AGENT_HUB_SITE` | The site ID of a site you've created in your Kerberos Hub account. | "" |
| `AGENT_KERBEROSVAULT_URI` | The Kerberos Vault API url. | "https://vault.domain.com/api" |
| `AGENT_KERBEROSVAULT_ACCESS_KEY` | The access key of a Kerberos Vault account. | "" |
| `AGENT_KERBEROSVAULT_SECRET_KEY` | The secret key of a Kerberos Vault account. | "" |
| `AGENT_KERBEROSVAULT_PROVIDER` | A Kerberos Vault provider you have created (optional). | "" |
| `AGENT_KERBEROSVAULT_DIRECTORY` | The directory, in the provider, where the recordings will be stored in. | "" |
| `AGENT_DROPBOX_ACCESS_TOKEN` | The Access Token from your Dropbox app, that is used to leverage the Dropbox SDK. | "" |
| `AGENT_DROPBOX_DIRECTORY` | The directory, in the provider, where the recordings will be stored in. | "" |
| `AGENT_ENCRYPTION` | Enable 'true' or disable 'false' end-to-end encryption for MQTT messages. | "false" |
| `AGENT_ENCRYPTION_RECORDINGS` | Enable 'true' or disable 'false' end-to-end encryption for recordings. | "false" |
| `AGENT_ENCRYPTION_FINGERPRINT` | The fingerprint of the keypair (public/private keys), so you know which one to use. | "" |
| `AGENT_ENCRYPTION_PRIVATE_KEY` | The private key (assymetric/RSA) to decryptand sign requests send over MQTT. | "" |
| `AGENT_ENCRYPTION_SYMMETRIC_KEY` | The symmetric key (AES) to encrypt and decrypt request send over MQTT. | "" |
## Encryption
You can encrypt your recordings and outgoing MQTT messages with your own AES and RSA keys by enabling the encryption settings. Once enabled all your recordings will be encrypted using AES-256-CBC and your symmetric key. You can either use the default `openssl` toolchain to decrypt the recordings with your AES key, as following:
openssl aes-256-cbc -d -md md5 -in encrypted.mp4 -out decrypted.mp4 -k your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
, and additionally you can decrypt a folder of recordings, using the Kerberos Agent binary as following:
go run main.go -action decrypt ./data/recordings your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
or for a single file:
go run main.go -action decrypt ./data/recordings/video.mp4 your-key-96ab185xxxxxxxcxxxxxxxx6a59c62e8
## H264 vs H265
If we talk about video encoders and decoders (codecs) there are 2 major video codecs on the market: H264 and H265. Taking into account your use case, you might use one over the other. We will provide an (not complete) overview of the advantages and disadvantages of each codec in the field of video surveillance and video analytics. If you would like to know more, you should look for additional resources on the internet (or if you like to read physical items, books still exists nowadays).
- H264 (also known as AVC or MPEG-4 Part 10)
- Is the most common one and most widely supported for IP cameras.
- Supported in the majority of browsers, operating system and third-party applications.
- Can be embedded in commercial and 3rd party applications.
- Different levels of compression (high, medium, low, ..)
- Better quality / compression ratio, shows less artifacts at medium compression ratios.
- Does support technologies such as WebRTC
- H265 (also known as HEVC)
- Is not supported on legacy cameras, though becoming rapidly available on "newer" IP cameras.
- Might not always be supported due to licensing. For example not supported in browers on a Linux distro.
- Requires licensing when embedding in a commercial product (be careful).
- Higher levels of compression (50% more than H264).
- H265 shows artifacts in motion based environments (which is less with H264).
- Recording the same video (resolution, duration and FPS) in H264 and H265 will result in approx 50% the file size.
- Not supported in technologies such as WebRTC
Conclusion: depending on the use case you might choose one over the other, and you can use both at the same time. For example you can use H264 (main stream) for livestreaming, and H265 (sub stream) for recording. If you wish to play recordings in a cross-platform and cross-browser environment, you might opt for H264 for better support.
## Contribute with Codespaces
One of the major blockers for letting you contribute to an Open Source project is to setup your local development machine. Why? Because you might have already some tools and libraries installed that are used for other projects, and the libraries you would need for Kerberos Agent, for example FFmpeg, might require a different version. Welcome to the dependency hell..
By leveraging codespaces, which the Kerberos Agent repo supports, you will be able to setup the required development environment in a few minutes. By opening the `<> Code` tab on the top of the page, you will be able to create a codespace, [using the Kerberos Devcontainer](https://github.com/kerberos-io/devcontainer) base image. This image requires all the relevant dependencies: FFmpeg, OpenCV, Golang, Node, Yarn, etc.
![Kerberos Agent codespace](assets/img/codespace.png)
After a few minutes, you will see a beautiful `Visual Studio Code` shown in your browser, and you are ready to code!
![Kerberos Agent VSCode](assets/img/codespace-vscode.png)
On opening of the GitHub Codespace, some dependencies will be installed. Once this is done go ahead to the `ui/src/config.json` file, and (un)comment following section. Make sure to replace the `externalHost` variable with the DNS name you will retrieve from the next step.
// Uncomment this when using codespaces or other special DNS names (which you can't control)
// replace this with the DNS name of the kerberos agent server (the codespace url)
const externalHost = 'cedricve-automatic-computing-machine-v647rxvj4whx9qp-80.preview.app.github.dev';
const dev = {
ENV: 'dev',
HOSTNAME: externalHost,
//API_URL: `${protocol}//${hostname}:80/api`,
//URL: `${protocol}//${hostname}:80`,
//WS_URL: `${websocketprotocol}//${hostname}:80/ws`,
// Uncomment, and comment the above lines, when using codespaces or other special DNS names (which you can't control)
API_URL: `${protocol}//${externalHost}/api`,
URL: `${protocol}//${externalHost}`,
WS_URL: `${websocketprotocol}//${externalHost}/ws`,
};
Go and open two terminals one for the `ui` project and one for the `machinery` project.
1. Terminal A:
cd machinery/
go run main.go -action run -port 80
2. Terminal B:
cd ui/
yarn start
Once executed, a popup will show up mentioning `portforwarding`. You should see two ports being opened, one for the ui `3000` and one for the machinery `80`. `Right-click` on the port `80` and change visibility from `private` to `public`, this is required to avoid `CORS` errors.
![Codespace make public](./assets/img/codespace-make-public.png)
As mentioned above, copy the hostname of the `machinery` DNS name, and past it in the `ui/src/config.json` file. Once done reload, the `ui` page in your browser, and you should be able to access the login page with the default credentials `root` and `root`.
## Develop and build
Kerberos Agent is divided in two parts a `machinery` and `web`. Both parts live in this repository in their relative folders. For development or running the application on your local machine, you have to run both the `machinery` and the `web` as described below. When running in production everything is shipped as only one artifact, read more about this at [Building for production](#building-for-production).
@@ -97,6 +347,10 @@ This will start a webserver and launches the web app on port `3000`.
![login-agent](./assets/img/agent-login.gif)
Once signed in you'll see the dashboard page showing up. After successfull configuration of your agent, you'll should see a live view and possible events recorded to disk.
![dashboard-agent](./assets/img/agent-dashboard.png)
### Machinery
The `machinery` is a **Golang** project which delivers two functions: it acts as the Kerberos Agent which is doing all the heavy lifting with camera processing and other kinds of logic, on the other hand it acts as a webserver (Rest API) that allows communication from the web (React) or any other custom application. The API is documented using `swagger`.
@@ -105,13 +359,13 @@ You can simply run the `machinery` using following commands.
git clone https://github.com/kerberos-io/agent
cd machinery
go run main.go run mycameraname 8080
go run main.go -action run -port 80
This will launch the Kerberos Agent and run a webserver on port `8080`. You can change the port by your own preference. We strongly support the usage of [Goland](https://www.jetbrains.com/go/), as it comes with all the debugging and linting features builtin.
This will launch the Kerberos Agent and run a webserver on port `80`. You can change the port by your own preference. We strongly support the usage of [Goland](https://www.jetbrains.com/go/) or [Visual Studio Code](https://code.visualstudio.com/), as it comes with all the debugging and linting features builtin.
![run-in-goland](https://user-images.githubusercontent.com/1546779/111139940-0a4d1a80-8582-11eb-8985-ceaf7359f4ee.gif)
![VSCode desktop](./assets/img/vscode-desktop.png)
## Building for Production
## Building from source
Running Kerberos Agent in production only require a single binary to run. Nevertheless, we have two parts, the `machinery` and the `web`, we merge them during build time. So this is what happens.
@@ -126,48 +380,27 @@ To build the Kerberos Agent web app, you simply have to run the `build` command
Building the `machinery` is also super easy 🚀, by using `go build` you can create a single binary which ships it all; thank you Golang. After building you will endup with a binary called `main`, this is what contains everything you need to run Kerberos Agent.
Remember the build step of the `web` part, during build time we move the build directory to the `machinery` directory. Inside the `machinery` web server [we reference the](https://github.com/kerberos-io/agent/blob/master/machinery/src/routers/http/Server.go#L44) `build` directory. This makes it possible to just a have single web server that runs it all.
Remember the build step of the `web` part, during build time we move the build directory to the `machinery` directory. Inside the `machinery` web server [we reference the](https://github.com/kerberos-io/agent/blob/master/machinery/src/routers/http/Server.go#L44) `build` directory. This makes it possible to just a have single web server that runs it all.
cd machinery
go build
## Building for Docker
Inside the root of this `agent` repository, you will find a `Dockerfile`. This file contains the instructions for building and shipping **Kerberos Agent**. Important to note is that start from a prebuild base image, `kerberos/debian-opencv-ffmpeg:1.0.xxx`.
Inside the root of this `agent` repository, you will find a `Dockerfile`. This file contains the instructions for building and shipping **Kerberos Agent**. Important to note is that start from a prebuild base image, `kerberos/base:xxx`.
This base image contains already a couple of tools, such as Golang, FFmpeg and OpenCV. We do this for faster compilation times.
By running the `docker build` command, you will create the Kerberos Agent Docker image. After building you can simply run the image as a Docker container.
docker build -t kerberos/agent .
docker run -p 8080:8080 --name mycamera -d kerberos/agent:edge
## FAQ
## What is new?
#### 1. Why a mono repo?
This repository contains the next generation of Kerberos.io, **Kerberos Agent (v3)**, and is the successor of the machinery and web repositories. A switch in technologies and architecture has been made. This version is still under active development and can be followed on the [develop branch](https://github.com/kerberos-io/agent/tree/develop) and [project overview](https://github.com/kerberos-io/agent/projects/1).
We have noticed in the past (v1 and v2) splitting the repositories (machinery and web), created a lot of confusion within our community. People didn't understand the different versions and so on. This caused a lack of collaboration, and made it impossible for some people to collaborate and contribute.
Read more about this [at the FAQ](#faq) below.
Having a mono repo, which is well organised, simplifies the entry point for new people who would like to use, understand and/or contribute to Kerberos Agent.
#### 2. Why a change in technologies?
In previous versions (v1 and v2) we used technologies like C++, PHP and BackboneJS. 7 years ago this was still acceptable, however time has changed and new technologies such as React and Golang became very popular.
Due to previous reason we have decided to rebuild the Kerberos Agent technology from scratch, taking into account all the feedback we acquired over the years. Having these technologies available, we will enable more people to contribute and use our technology.
#### 3. What is the difference with Kerberos Enterprise?
We started the developments of Kerberos Enterprise a year ago (January, 2020), our focus here was scalability, and fast development and easy deployment. We noticed that with technologies such as Golang and React, we can still provide a highly performant video surveillance system.
Kerberos Agent uses the same technology stack, and some code pieces, of Kerberos Enterprise which we have already build. We have a very clear now, of how a well developed and documented video surveillance system needs to look like.
#### 4. When are we going to be able to install the first version?
We plan to ship the first version by the end of Q1, afterwards we will add more and more features as usual.
#### 5. Change in License
Kerberos Agent (v3) is now available under the MIT license.
![opensource-to-agent](https://user-images.githubusercontent.com/1546779/172066873-7752c979-de63-4417-8d26-34192fdbd1e6.svg)
## Contributors

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 138 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 769 KiB

BIN
assets/img/codespace.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 286 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.1 MiB

BIN
assets/img/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 966 KiB

79
deployments/README.md Normal file
View File

@@ -0,0 +1,79 @@
# Kerberos Agent Deployments
Great to see you here, you just arrived at the real stuff! As you may have understood Kerberos Agent is a containerized solution. A Kerberos Agent, or container equivalent, is running for each camera. This approach makes it scalable, isolated and probably the most important thing an exceptional workload governance.
Due to it's nature, of acting as a micro service, there are many different ways how to get this Kerberos Agent up and running. This part of the Kerberos Agent repository contains example configurations, for all the different deployments and automations you can leverage to deploy and scale your video landscape.
We will discuss following deployment models.
- [0. Static binary](#0-static-binary)
- [1. Docker](#1-docker)
- [2. Docker Compose](#2-docker-compose)
- [3. Kubernetes](#3-kubernetes)
- [4. Red Hat Ansible and OpenShift](#4-red-hat-ansible-and-openshift)
- [5. Kerberos Factory](#5-kerberos-factory)
- [6. Terraform](#6-terraform)
- [7. Salt](#7-salt)
- [8. Balena](#8-balena)
## 0. Static binary
Kerberos Agents are now also shipped as static binaries. Within the Docker image build, we are extracting the Kerberos Agent binary and are [uploading them to the releases page](https://github.com/kerberos-io/agent/releases) in the repository. By opening a release you'll find a `.tar` with the relevant files.
> Learn more [about the Kerberos Agent binary here](https://github.com/kerberos-io/agent/tree/master/deployments/binary).
## 1. Docker
Leveraging `docker` is probably one of the easiest way to run and test the Kerberos Agent. Thanks to it's multi-architecture images you could run it on almost every machine. The `docker` approach is perfect for running one or two cameras in a (single machine) home deployment, a POC to verify its capabilities, or testing if your old/new IP camera is operational with our Kerberos Agent.
> Learn more [about Kerberos Agent on Docker here](https://github.com/kerberos-io/agent/tree/master/deployments/docker#1-running-a-single-container).
## 2. Docker Compose
If you consider `docker` as "your way to go", but require to run a bigger (single machine) deployment at home or inside your store then `docker compose` would be more suitable. By specifying a single `docker-compose.yaml` file, you can define all your Kerberos Agents (and thus cameras) in a single file, with a custom configuration to fit your needs.
> Learn more [about Kerberos Agent with Docker Compose here](https://github.com/kerberos-io/agent/tree/master/deployments/docker#2-running-multiple-containers-with-docker-compose).
## 3. Kubernetes
As described above, `docker` is a great tool for smaller deployments, where you are just running on a single machine and want to ramp up quickly. As you might expect, this is a not an ideal situation for production deployments. Kubernetes can help you to build a scalable, flexible and resilient deployment.
> Learn more [about Kerberos Agent in a Kubernetes cluster here](https://github.com/kerberos-io/agent/tree/master/deployments/kubernetes).
## 4. Red Hat Ansible and OpenShift
If you running an alternative distribution such as Red Hat OpenShift, things will work out exactly as mentioned before with the `Kubernetes` deployment. You'll have all the benefints of Red Hat OpenShift on top. One of the things we provide here is an Ansible playbook to deploy the Kerberos Agent in the OpenShift cluster.
> Learn more [about Kerberos Agent in OpenShift with Ansible](https://github.com/kerberos-io/agent/tree/master/deployments/ansible-openshift).
## 5. Kerberos Factory
All of the previously deployments, `docker`, `kubernetes` and `openshift` are great for a technical audience. However for business users, it might be more convenient to have a clean web ui, that one can leverage to add one or more cameras (Kerberos Agents), without the hassle of the technical resources.
> Learn more [about Kerberos Agent with Kerberos Factory](https://github.com/kerberos-io/agent/tree/master/deployments/factory).
## 6. Terraform
Terraform is a tool for infrastructure provisioning to build infrastructure through code, often called Infrastructure as Code. So, Terraform allows you to automate and manage your infrastructure, your platform, and the services that run on that platform. By using Terraform you can deploy your Kerberos Agents remotely at scale.
> Learn more [about Kerberos Agent with Terraform](https://github.com/kerberos-io/agent/tree/master/deployments/terraform).
## 7. Salt
To be written
## 8. Balena
Balena Cloud provide a seamless way of building and deploying applications at scale through the conceps of `blocks`, `apps` and `fleets`. Once you have your `app` deployed, for example our Kerberos Agent, you can benefit from features such as: remote access, over the air updates, an encrypted public `https` endpoint and many more.
Together with the Balena.io team we've build a Balena App, called [`video-surveillance`](https://hub.balena.io/apps/2064752/video-surveillance), which any can use to deploy a video surveillance system in a matter of minutes with all the expected management features you can think of.
> Learn more [about Kerberos Agent with Balena](https://github.com/kerberos-io/agent/tree/master/deployments/balena).
## 9. Snap
The Snap Store, also known as the Ubuntu Store , is a commercial centralized software store operated by Canonical. Similar to AppImage or Flatpak the Snap Store is able to provide up to date software no matter what version of Linux you are running and how old your libraries are.
We have published our own snap `Kerberos Agent` on the Snap Store, allowing you to seamless install a Kerberos Agent on your Linux devive.
> Learn more [about Kerberos Agent with Snap](https://github.com/kerberos-io/agent/tree/master/deployments/snap).

View File

@@ -0,0 +1,49 @@
# Deploy to a Red Hat OpenShift cluster with Ansible
Kubernetes is great, but you might love OpenShift even more. In this directory you'll find some resources to deploy your Kerberos Agent in an OpenShift cluster using Ansible playbook. We'll review the different tasks of the Ansible playbook step by step; find the complete `playbook.yaml` here.
## Variabeles
We'll have a few `variables` in our `playbook.yml` that will help us to setup secure connection with the OpenShift cluster. We need the `cluster_url` and the `username` and `password` of the OpenShift cluster. If you don't know where to find this, you can find this in the OpenShift web ui.
vars:
- oc_cluster_url: ""
- oc_username: ""
- oc_password: ""
## Tasks
Once we have supplied the `variables` we will define following tasks:
- name: Print Variables
- name: Try to login to OCP cluster
- name: Create a Namespace
- name: Create a Persistent volume claim
- name: Deploy Kerberos Agent
- name: Expose Kerberos Agent
1. Print variables: this is a validation step, where we make sure we have the correct variables supplied to the `ansible-playbook` command. This confirms we are using the right credentials to setup a secure connection with the OpenShift cluster.
2. Setup a connection with OpenShift using the defined variabeles. If successfull an `api_key` will become available in the `k8s_auth_result` variable. This variabele will be used with every subsequent operation against the OpenShift cluster.
3. A best practice is to isolate your workloads in namespaces. Therefore we'll create a new namespace in our OpenShift cluster.
4. (Optional) Create a persistent volume to persist the configuration file and recordings in a volume.
5. Deploy Kerberos Agent through a `deployment`.
6. Expose the Kerberos Agent web interface through a `LoadBalancer`; public internet accessible IP address.
## Run the playbook
Now you understand what is happening in the playbook, let's run it. Make sure you have `ansible` install on your `host` or `deploy` machine.
Specify the `environment` input variable as a `JSON` with all required variables defined in step 1. Reference the `playbook.yml` file and execute.
ansible-playbook -e '{ \
"oc_cluster_url":"https://api.j5z0adui.westeurope.aroapp.io:6443", \
"oc_username":"kubeadmin",\
"oc_password":"xxx" \
}' playbook.yml
If everything runs as expected you should see you Kerberos Agent deployed, together with an assigned public ip address. Paste the ip address in your browser, the Kerberos Agent web interface will show up. You can use [the default username and password to sign-in](https://github.com/kerberos-io/agent#access-the-kerberos-agent), or if changed to your own (which is recommended).

View File

@@ -0,0 +1,140 @@
- hosts: localhost
vars:
- oc_cluster_url: ""
- oc_username: ""
- oc_password: ""
tasks:
- name: Print Variables
debug:
msg: "OpenShift url: {{ oc_cluster_url }}, OpenShift username: {{ oc_username }}, OpenShift password: {{ oc_password }}"
- name: Try to login to OCP cluster
k8s_auth:
host: "{{ oc_cluster_url }}"
username: "{{ oc_username }}"
password: "{{ oc_password }}"
validate_certs: no
register: k8s_auth_result
- name: Create a Namespace
k8s:
state: present
host: "{{ oc_cluster_url }}"
api_key: "{{ k8s_auth_result.k8s_auth.api_key }}"
validate_certs: no
definition:
apiVersion: v1
kind: Namespace
metadata:
name: kerberos
register: k8s_namespace_result
- name: Create a Persistent volume claim
k8s:
state: present
host: "{{ oc_cluster_url }}"
api_key: "{{ k8s_auth_result.k8s_auth.api_key }}"
validate_certs: no
namespace: kerberos
definition:
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kerberos-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: managed-premium
register: pvc_result
- name: Deploy Kerberos Agent
k8s:
state: present
apply: yes
namespace: kerberos
host: "{{ oc_cluster_url }}"
api_key: "{{ k8s_auth_result.k8s_auth.api_key }}"
validate_certs: no
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: agent
labels:
name: agent
spec:
selector:
matchLabels:
app: agent
replicas: 1
template:
metadata:
labels:
app: agent
spec:
volumes:
- name: kerberos-data
persistentVolumeClaim:
claimName: kerberos-data
initContainers:
- name: download-config
image: kerberos/agent:latest
volumeMounts:
- name: kerberos-data
mountPath: /home/agent/data/config
subPath: config
command:
[
"cp",
"/home/agent/data/config.template.json",
"/home/agent/data/config/config.json",
]
containers:
- name: agent
image: kerberos/agent:latest
volumeMounts:
- name: kerberos-data
mountPath: /home/agent/data/config
subPath: config
- name: kerberos-data
mountPath: /home/agent/data/recordings
subPath: recordings
- name: kerberos-data
mountPath: /home/agent/data/snapshots
subPath: snapshots
- name: kerberos-data
mountPath: /home/agent/data/cloud
subPath: cloud
ports:
- containerPort: 80
protocol: TCP
- name: Expose Kerberos Agent
k8s:
state: present
apply: yes
namespace: kerberos
host: "{{ oc_cluster_url }}"
api_key: "{{ k8s_auth_result.k8s_auth.api_key }}"
validate_certs: no
definition:
kind: Service
apiVersion: v1
metadata:
name: agent-svc
labels:
name: agent-svc
spec:
selector:
app: agent
type: LoadBalancer
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP

View File

@@ -0,0 +1,31 @@
# Deployment with Balena
Balena Cloud provide a seamless way of building and deploying applications at scale through the conceps of `blocks`, `apps` and `fleets`. Once you have your `app` deployed, for example our Kerberos Agent, you can benefit from features such as: remote access, over the air updates, an encrypted public `https` endpoint and many more.
We provide two mechanisms to deploy Kerberos Agent to a Balena Cloud fleet:
1. Use Kerberos Agent as [a block part of your application](https://github.com/kerberos-io/balena-agent-block).
2. Use Kerberos Agent as [a stand-alone application](https://github.com/kerberos-io/balena-agent).
## Block
Within Balena you can build the concept of a block, which is the equivalent of container image or a function in a typical programming language. The idea of blocks, you can find a more thorough explanation [here](https://docs.balena.io/learn/develop/blocks/), is that you can compose and combine multiple `blocks` to level up to the concept an `app`.
You as a developer can choose which `blocks` you would like to use, to build the desired `application` state you prefer. For example you can use the [Kerberos Agent block](https://hub.balena.io/blocks/2064662/agent) to compose a video surveillance system as part of your existing set of blocks.
You can the `Kerberos Agent` block by defining following elements in your `compose` file.
agent:
image: bh.cr/kerberos_io/agent
## App
Next to building individual `blocks` you as a developer can also decide to build up an application, composed of one or more `blocks` or third-party containers, and publish it as an `app` to the Balena Hub. This is exactly [what we've done..](https://hub.balena.io/apps/2064752/video-surveillance)
On Balena Hub we have created the []`video-surveillance` application](https://hub.balena.io/apps/2064752/video-surveillance) that utilises the [Kerberos Agent `block`](https://hub.balena.io/blocks/2064662/agent). The idea of this application is that utilises the foundation of our Kerberos Agent, but that it might include more `blocks` over time to increase and improve functionalities from other community projects.
To deploy the application you can simply press below `Deploy button` or you can navigate to the [Balena Hub apps page](https://hub.balena.io/apps/2064752/video-surveillance).
[![deploy with balena](https://balena.io/deploy.svg)](https://dashboard.balena-cloud.com/deploy?repoUrl=https://github.com/kerberos-io/agent)
You can find the source code, `balena.yaml` and `docker-compose.yaml` files in the [`balena-agent` repository](https://github.com/kerberos-io/balena-agent).

View File

@@ -0,0 +1,34 @@
# Binary
Kerberos Agents are now also shipped as static binaries. Within the Docker image build, we are extracting the Kerberos Agent binary and are [uploading them to the releases page](https://github.com/kerberos-io/agent/releases) in the repository. By opening a release you'll find a `.tar` with the relevant files.
- `main`: this is the Kerberos Agent binary.
- `data`: the folder containing the recorded video, configuration, etc.
- `mp4fragment`: a binary to transform MP4s to Fragmented MP4s.
- `www`: the Kerberos Agent ui (compiled React app).
You can run the binary as following on port `8080`:
main -action=run -port=80
## Systemd
When running on a Linux OS you might consider to auto-start the Kerberos Agent using systemd. Create a file called `/etc/systemd/system/kerberos-agent.service` and copy-paste following configuration. Update the `WorkingDirectory` and `ExecStart` accordingly.
[Unit]
Wants=network.target
[Service]
ExecStart=/home/pi/agent/main -action=run -port=80
WorkingDirectory=/home/pi/agent/
[Install]
WantedBy=multi-user.target
To load your new service, we'll execute following commands.
sudo systemctl daemon-reload
sudo systemctl enable kerberos-agent
sudo systemctl start kerberos-agent
Confirm the service is running:
sudo systemctl status kerberos-agent

View File

@@ -0,0 +1,92 @@
# Deployment with Docker
The easiest, and let's say most natural, deployment is done [by utilising `docker`](#1-running-a-single-container). Docker can run a stand-alone, single, Kerberos Agent (or container) and a bigger set of Kerberos Agents (or containers) [through `docker compose`](#2-running-multiple-containers-with-docker-compose).
## 1. Running a single container
We are creating Docker images as part of our CI/CD process. You'll find our Docker images on [Docker hub](https://hub.docker.com/r/kerberos/agent). Pick a specific tag of choice, or use latest. Once done run below command, this will open the web interface of your Kerberos agent on port 80.
docker run -p 80:80 --name mycamera -d kerberos/agent:latest
Or for a develop build:
docker run -p 80:80 --name mycamera -d kerberos/agent-dev:latest
Feel free to use another port if your host system already has a workload running on `80`. For example `8082`.
docker run -p 8082:80 --name mycamera -d kerberos/agent:latest
### Attach a volume
By default your Kerberos agent will store all its configuration and recordings inside the container. It might be interesting to store both configuration and your recordings outside the container, on your local disk. This helps persisting your storage even after you decide to wipe out your Kerberos agent.
You attach a volume to your container by leveraging the `-v` option. To mount your own configuration file, execute as following:
1. Decide where you would like to store your configuration and recordings; create a new directory for the config file and recordings folder accordingly.
mkdir agent
mkdir agent/config
mkdir agent/recordings
2. Once you have located your desired directory, copy the latest [`config.json`](https://github.com/kerberos-io/agent/blob/master/machinery/data/config/config.json) file into your config directory.
wget https://raw.githubusercontent.com/kerberos-io/agent/master/machinery/data/config/config.json -O agent/config/config.json
3. Run the docker command as following to attach your config directory and recording directory.
docker run -p 80:80 --name mycamera \
-v $(pwd)/agent/config:/home/agent/data/config \
-v $(pwd)/agent/recordings:/home/agent/data/recordings\
-d --restart=alwayskerberos/agent:latest
### Override with environment variables
Next to attaching the configuration file, it is also possible to override the configuration with environment variables. This makes deployments easier when leveraging `docker compose` or `kubernetes` deployments much easier and scalable. Using this approach we simplify automation through `ansible` and `terraform`. You'll find [the full list of environment variables on the main README.md file](https://github.com/kerberos-io/agent#override-with-environment-variables).
### 2. Running multiple containers with Docker compose
When running multiple containers, you could execute the above process multiple times, or a better way is to run a `docker compose` with predefined configuration file, a `docker-compose.yaml`.
You'll find [an example `docker-compose.yaml` file here](https://github.com/kerberos-io/agent/blob/master/deployments/docker/docker-compose.yaml). This configuration file includes a definition for running 3 Kerberos Agents (or containers). By specifying environment variables you can override the internal configuration. To add more Kerberos Agents to your deployment, just `copy-paste` a `service` block and modify the name, exposed port, and settings accordingly.
kerberos-agent2:
image: "kerberos/agent:latest"
ports:
- "8082:80"
environment:
- AGENT_NAME=agent2
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
- AGENT_HUB_KEY=yyy
- AGENT_HUB_PRIVATE_KEY=yyy
#### Attaching volumes
As described in [1. Running a single container](#1-running-a-single-container) you can also assign volumes to your `docker compose` services. A volume can be added to persist the recordings of your Kerberos Agents on the host machine, or to provide more accurate configurations.
When attaching a volume for persisting recordings or mounting configuration files from the host system. the `docker-compose.yaml` would look like this.
Let's start by creating some directories on your host system. We'll consider 3 Kerberos Agents in this example.
mkdir -p agent1/config agent1/recordings
mkdir -p agent2/config agent2/recordings
mkdir -p agent3/config agent3/recordings
Download the configuration file in each Kerberos Agent configuration directory.
wget https://raw.githubusercontent.com/kerberos-io/agent/master/machinery/data/config/config.json -O agent1/config/config.json
wget https://raw.githubusercontent.com/kerberos-io/agent/master/machinery/data/config/config.json -O agent2/config/config.json
wget https://raw.githubusercontent.com/kerberos-io/agent/master/machinery/data/config/config.json -O agent3/config/config.json
Next we'll add a `volumes:` section to each Kerberos Agent (service) in the `docker-compose-with-volumes.yaml` file.
volumes:
- ./agent1/config:/home/agent/data/config
- ./agent1/recordings:/home/agent/data/recordings
We'll repeat that for the other Kerberos Agents as well. You can review [the final result over here](https://github.com/kerberos-io/agent/blob/master/deployments/docker/docker-compose-with-volumes.yaml).
Run the `docker compose` command by providing a different configuration file name.
docker compose -f docker-compose-with-volumes.yaml up
Please note that you can use a combination of using a configuration file and environment variables at the same time. However environment variables will always override the setting in your configuration file.

View File

@@ -0,0 +1,27 @@
version: "3.9"
services:
kerberos-agent1:
image: "kerberos/agent:latest"
ports:
- "8081:80"
environment:
- AGENT_NAME=agent1
# You can still override the configuration with environment variables, but might not makes sense if you are attaching a host config.
# find full list of environment variables here: https://github.com/kerberos-io/agent#override-with-environment-variables
volumes:
- ./agent1/config:/home/agent/data/config
- ./agent1/recordings:/home/agent/data/recordings
kerberos-agent2:
image: "kerberos/agent:latest"
ports:
- "8082:80"
volumes:
- ./agent2/config:/home/agent/data/config
- ./agent2/recordings:/home/agent/data/recordings
kerberos-agent3:
image: "kerberos/agent:latest"
ports:
- "8083:80"
volumes:
- ./agent3/config:/home/agent/data/config
- ./agent3/recordings:/home/agent/data/recordings

View File

@@ -0,0 +1,35 @@
version: "3.9"
services:
kerberos-agent1:
image: "kerberos/agent:latest"
ports:
- "8081:80"
environment:
- AGENT_NAME=agent1
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
- AGENT_HUB_KEY=xxx
- AGENT_HUB_PRIVATE_KEY=xxx
- AGENT_CAPTURE_CONTINUOUS=true
- AGENT_CAPTURE_PRERECORDING=10
- AGENT_CAPTURE_POSTRECORDING=10
- AGENT_CAPTURE_MAXLENGTH=60
- AGENT_CAPTURE_PIXEL_CHANGE=150
# find full list of environment variables here: https://github.com/kerberos-io/agent#override-with-environment-variables
kerberos-agent2:
image: "kerberos/agent:latest"
ports:
- "8082:80"
environment:
- AGENT_NAME=agent2
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
- AGENT_HUB_KEY=yyy
- AGENT_HUB_PRIVATE_KEY=yyy
kerberos-agent3:
image: "kerberos/agent:latest"
ports:
- "8083:80"
environment:
- AGENT_NAME=agent3
- AGENT_CAPTURE_IPCAMERA_RTSP=rtsp://x.x.x.x:554/Streaming/Channels/101
- AGENT_HUB_KEY=zzz
- AGENT_HUB_PRIVATE_KEY=zzz

View File

@@ -0,0 +1,9 @@
# Deploy with Kerberos Factory
All of the previously deployments, `docker`, `kubernetes` and `openshift` are great for a technical audience. However for business users, it might be more convenient to have a clean web ui, that one can leverage to add one or more cameras (Kerberos Agents), without the hassle of the technical resources.
That's exactly why we have build [Kerberos Factory](https://github.com/kerberos-io/factory). It's a web ui on top of a Kubernetes cluster, which allows a non-technical users to administer and configure a video landscape.
![Factory login](./factory-login.gif)
The idea of [Kerberos Factory](https://github.com/kerberos-io/factory) is that one can configure and deploy a camera, by filling-in some basic input fields. Once done [Kerberos Factory](https://github.com/kerberos-io/factory), will create the relevant resources in your Kubernetes cluster.

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB

View File

@@ -0,0 +1,111 @@
# Deployment with Kubernetes
As described in the [Deployment with Docker](https://github.com/kerberos-io/agent/tree/master/deployments/docker), `docker` is a great tool for smaller deployments, where you are just running on a single machine and want to ramp up quickly. As you might expect, this is a not an ideal situation for production deployments.
Kubernetes can help you to build a scalable, flexible and resilient deployment. By introducing the concept of multi-nodes and deployments, you can make sure your Kerberos Agents are evenly distributed across your different machines, and you can add more nodes when you need to scale out.
We've provided an example deployment `deployment-agent.yml` in this directory, which show case you have to create a deployment (and under the hood a pod), to run a Kerberos Agent workload.
## Create a Kerberos Agent deployment
It's always a best practices to isolate and structure your workloads in Kubernetes. To achieve this we are utilising the concept of namespaces. For this example we will create a new namespace `demo`.
kubectl create namespace demo
Now we have a namespace, have a look at `deployment-agent.yml` in this folder. This configuration file describes the Kubernetes resources we would like to create, and how the Kerberos Agent needs to behave: environment variables, container ports, etc. At the bottom of the file, we find a `service` part, this tells Kubernetes to expose the Kerberos Agent user interface on a publicly accessible IP address. **_Please note that you don't need to expose this, as you can configure the Kerberos Agent with a volume and/or environment variables._**
Let's move on, and apply the Kerberos Agent deployment and service.
kubectl apply -f deployment-agent.yml -n demo
Watch deployment and service to be ready.
watch kubectl get all -n demo
When the deployment and service is created successfully, you should see something like this.
Every 2.0s: kubectl get all -n demo Fri Dec 9 16:33:17 2022
NAME READY STATUS RESTARTS AGE
pod/agent-7c75c4dbcf-zxrb5 1/1 Running 0 19s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/agent-svc LoadBalancer 10.x.x.x 108.x.x.x 80:32664/TCP 20s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/agent 1/1 1 1 20s
NAME DESIRED CURRENT READY AGE
replicaset.apps/agent-7c75c4dbcf 1 1 1 20s
When copying the `EXTERNAL-IP` and pasting it in your browser, you should see the Kerberos Agent user interface. You can use [the default username and password to sign-in](https://github.com/kerberos-io/agent#access-the-kerberos-agent), or if changed to your own (which is recommended).
## Configure with volumes
Just like with `docker`, you can also attach `volumes` to the Kerberos Agent deployment, by creating a `Persistent Volume` and mount it to a specific directory.
Depending on where and how you are hosting the Kubernetes cluster, you may need to create a new `storageClass` or use a predefined `storageClass` from your cloud provider (Azure, GCP, AWS, ..). Have a look at `deployment-agent-volume.yml` to review a complete example.
template:
metadata:
labels:
app: agent
spec:
volumes:
- name: kerberos-data
persistentVolumeClaim:
claimName: kerberos-data
...
containers:
- name: agent
image: kerberos/agent:latest
volumeMounts:
- name: kerberos-data
mountPath: /home/agent/data/config
subPath: config
...
## Expose with Ingress
In the first example `deployment-agent.yml` we are using a `LoadBalancer` to expose the Kerberos Agent user interface; as shown below. If you are a bit more experienced with Kubernetes, you will know there are other `service types` as well.
---
apiVersion: v1
kind: Service
...
type: LoadBalancer
ports:
- port: 80
...
An alternative to `LoadBalancer` is `Ingress`. By leveraging an ingress such as `ingress-nginx` or `traefik` you setup a gateway (single point of contact), through which all communication to your apps (services) will flow.
A huge benefit (there are many others), is that you only allocate 1 public IP address for all your services. So instead of creating a `LoadBalancer` and thus a public IP address for every agent, you will create an `Ingress` service for each agent. Review the complete example at `deployment-agent-with-ingress.yml`.
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: agent-ingress
labels:
name: agent-ingress
annotations:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
tls:
- hosts:
- "myagent.kerberos.io"
secretName: agent-secret
rules:
- host: myagent.kerberos.io
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: agent-svc
port:
number: 80

View File

@@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: agent
labels:
name: agent
spec:
selector:
matchLabels:
app: agent
replicas: 1
template:
metadata:
labels:
app: agent
spec:
volumes:
- name: kerberos-data
persistentVolumeClaim:
claimName: kerberos-data
initContainers:
- name: download-config
image: kerberos/agent:latest
volumeMounts:
- name: kerberos-data
mountPath: /home/agent/data/config
subPath: config
command:
[
"cp",
"/home/agent/data/config.template.json",
"/home/agent/data/config/config.json",
]
containers:
- name: agent
image: kerberos/agent:latest
volumeMounts:
- name: kerberos-data
mountPath: /home/agent/data/config
subPath: config
- name: kerberos-data
mountPath: /home/agent/data/recordings
subPath: recordings
- name: kerberos-data
mountPath: /home/agent/data/snapshots
subPath: snapshots
- name: kerberos-data
mountPath: /home/agent/data/cloud
subPath: cloud
ports:
- containerPort: 80
protocol: TCP

View File

@@ -0,0 +1,82 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: agent
labels:
name: agent
spec:
selector:
matchLabels:
app: agent
replicas: 1
template:
metadata:
labels:
app: agent
spec:
containers:
- name: agent
image: kerberos/agent:latest
ports:
- containerPort: 80
protocol: TCP
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
env:
- name: AGENT_NAME
value: demo-agent
- name: AGENT_CAPTURE_IPCAMERA_RTSP
value: rtsp://fake.kerberos.io/stream
- name: AGENT_HUB_KEY
value: yyy
- name: AGENT_HUB_PRIVATE_KEY
value: yyy
# find full list of environment variables here: https://github.com/kerberos-io/agent#override-with-environment-variables
---
apiVersion: v1
kind: Service
metadata:
name: agent-svc
labels:
name: agent-svc
spec:
#type: LoadBalancer
ports:
- port: 80
targetPort: 80
selector:
app: agent
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: agent-ingress
labels:
name: agent-ingress
annotations:
kubernetes.io/ingress.class: nginx
kubernetes.io/tls-acme: "true"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
tls:
- hosts:
- "demo.kerberos.io"
secretName: agent-secret
rules:
- host: demo.kerberos.io
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: agent-svc
port:
number: 80

View File

@@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: agent
labels:
name: agent
spec:
selector:
matchLabels:
app: agent
replicas: 1
template:
metadata:
labels:
app: agent
spec:
containers:
- name: agent
image: kerberos/agent:latest
ports:
- containerPort: 80
protocol: TCP
resources:
limits:
cpu: 1000m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
env:
- name: AGENT_NAME
value: demo-agent
- name: AGENT_CAPTURE_IPCAMERA_RTSP
value: rtsp://fake.kerberos.io/stream
- name: AGENT_HUB_KEY
value: yyy
- name: AGENT_HUB_PRIVATE_KEY
value: yyy
# find full list of environment variables here: https://github.com/kerberos-io/agent#override-with-environment-variables
---
apiVersion: v1
kind: Service
metadata:
name: agent-svc
labels:
name: agent-svc
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 80
selector:
app: agent

View File

@@ -0,0 +1,15 @@
# Deployment with Snap Store
By browsing to the Snap Store, you'll be able [to find our own snap `Kerberos Agent`](https://snapcraft.io/kerberosio). You can either install the `Kerberos Agent` through the command line.
snap install kerberosio
Or use the Desktop client to have a visual interface.
![Kerberos Agent on Snap Store](./snapstore.png)
Once installed you can find your Kerberos Agent configration at `/var/snap/kerberosio/common`. Run the Kerberos Agent as following.
sudo kerberosio.agent -action=run -port=80
If successfull you'll be able to browse to port `80` or if you defined a different port. This will open the Kerberos Agent interface.

Binary file not shown.

After

Width:  |  Height:  |  Size: 616 KiB

View File

@@ -0,0 +1,41 @@
# Deployment with Terraform
If you are using Terraform as part of your DevOps stack, you might utilise it to deploy your Kerberos Agents. Within this deployment folder we have added an example Terraform file `docker.tf`, which installs the Kerberos Agent `docker` container on a remote system over `SSH`. We might create our own provider in the future, or add additional examples for example `snap`, `kubernetes`, etc.
For this example we will install Kerberos Agent using `docker` on a remote `linux` machine. Therefore we'll make sure we have the `TelkomIndonesia/linux` provider initialised.
terraform init
Once initialised you should see similar output:
Initializing the backend...
Initializing provider plugins...
- Reusing previous version of telkomindonesia/linux from the dependency lock file
- Using previously-installed telkomindonesia/linux v0.7.0
Go and open the `docker.tf` file and locate the `linux` provider, modify following credentials accordingly. Make sure they match for creating an `SSH` connection.
provider "linux" {
host = "x.y.z.u"
port = 22
user = "root"
password = "password"
}
Apply the `docker.tf` file, to install `docker` and the `kerberos/agent` docker container.
terraform apply
Once done you should see following output, and you should be able to reach the remote machine on port `80` or if configured differently the specified port you've defined.
Do you want to perform these actions?
Terraform will perform the actions described above.
Only 'yes' will be accepted to approve.
Enter a value: yes
linux_script.install_docker_kerberos_agent: Modifying... [id=a56cf7b0-db66-4f9b-beec-8a4dcef2a0c7]
linux_script.install_docker_kerberos_agent: Modifications complete after 3s [id=a56cf7b0-db66-4f9b-beec-8a4dcef2a0c7]
Apply complete! Resources: 0 added, 1 changed, 0 destroyed.

View File

@@ -0,0 +1,47 @@
terraform {
required_providers {
linux = {
source = "TelkomIndonesia/linux"
version = "0.7.0"
}
}
}
provider "linux" {
host = "x.y.z.u"
port = 22
user = "root"
password = "password"
}
locals {
image = "kerberos/agent"
version = "latest"
port = 80
}
resource "linux_script" "install_docker" {
lifecycle_commands {
create = "apt update && apt install -y $PACKAGE_NAME"
read = "apt-cache policy $PACKAGE_NAME | grep 'Installed:' | grep -v '(none)' | awk '{ print $2 }' | xargs | tr -d '\n'"
update = "apt update && apt install -y $PACKAGE_NAME"
delete = "apt remove -y $PACKAGE_NAME"
}
environment = {
PACKAGE_NAME = "docker"
}
}
resource "linux_script" "install_docker_kerberos_agent" {
lifecycle_commands {
create = "docker pull $IMAGE:$VERSION && docker run -d -p $PORT:80 --name agent $IMAGE:$VERSION"
read = "docker inspect agent"
update = "docker pull $IMAGE:$VERSION && docker rm agent --force && docker run -d -p $PORT:80 --name agent $IMAGE:$VERSION"
delete = "docker rm agent --force"
}
environment = {
IMAGE = local.image
VERSION = local.version
PORT = local.port
}
}

18
machinery/.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,18 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Launch Package",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "main.go",
"args": ["-action", "run"],
"envFile": "${workspaceFolder}/.env",
"buildFlags": "--tags dynamic",
},
]
}

View File

@@ -1,37 +1,41 @@
{
"type": "template",
"key": "0x123-a-test-agent",
"name": "camera",
"mqtturi": "tcp://mqtt.kerberos.io",
"mqtt_username": "xxx",
"mqtt_password": "xxx",
"timezone": "Europe/Brussels",
"stunuri": "stun:turn.kerberos.io:8443",
"turnuri": "turn:turn.kerberos.io:8443",
"turn_password": "xxx",
"type": "",
"key": "",
"name": "agent",
"time": "false",
"offline": "false",
"auto_clean": "true",
"remove_after_upload": "true",
"max_directory_size": 100,
"timezone": "Africa/Ceuta",
"capture": {
"id": "ipcamera",
"name": "",
"ipcamera": {
"rtsp": "xxx",
"rtsp": "",
"sub_rtsp": "",
"fps": ""
},
"usbcamera": {
"device": "",
"fps": ""
"device": ""
},
"raspicamera": {
"device": "",
"fps": ""
"device": ""
},
"continuous": "false",
"postrecording": 10,
"prerecording": 5,
"maxlengthrecording":60,
"recording": "true",
"snapshots": "true",
"liveview": "true",
"motion": "true",
"postrecording": 20,
"prerecording": 10,
"maxlengthrecording": 30,
"transcodingwebrtc": "",
"transcodingresolution": 0,
"forwardwebrtc": "",
"transcodingwebrtc": "false",
"transcodingresolution": 25,
"fragmented": "false",
"fragmentedduration": 1
"fragmentedduration": 8,
"pixelChangeThreshold": 150
},
"timetable": [
{
@@ -42,8 +46,8 @@
},
{
"start1": 0,
"end1": 69840,
"start2": 70080,
"end1": 43199,
"start2": 43200,
"end2": 86400
},
{
@@ -78,6 +82,7 @@
}
],
"region": {
"name": "",
"rectangle": {
"x1": 0,
"y1": 0,
@@ -86,16 +91,27 @@
},
"polygon": []
},
"cloud": "kstorage",
"cloud": "s3",
"s3": {
"proxyuri": "http://proxy.kerberos.io",
"bucket": "kerberosaccept",
"region": "eu-west-1"
},
"kstorage": {},
"dropbox": {},
"mqtturi": "tcp://mqtt.kerberos.io:1883",
"mqtt_username": "",
"mqtt_password": "",
"stunuri": "stun:turn.kerberos.io:8443",
"turnuri": "turn:turn.kerberos.io:8443",
"turn_username": "username1",
"turn_password": "password1",
"heartbeaturi": "",
"hub_encryption": "true",
"hub_uri": "https://api.cloud.kerberos.io",
"hub_key": "xxx",
"hub_private_key": "xxx",
"hub_key": "",
"hub_private_key": "",
"hub_site": "",
"kstorage": {
"uri": "https://staging.api.vault.kerberos.live",
"access_key": "xxx",
"secret_access_key": "xxx",
"provider": "",
"directory": "xxx"
}
"condition_uri": "",
"encryption": {}
}

View File

Binary file not shown.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,323 @@
basePath: /
definitions:
models.APIResponse:
properties:
can_pan_tilt:
type: boolean
can_zoom:
type: boolean
data: {}
message: {}
ptz_functions: {}
type: object
models.Authentication:
properties:
password:
type: string
username:
type: string
type: object
models.Authorization:
properties:
code:
type: integer
expire:
type: string
role:
type: string
token:
type: string
username:
type: string
type: object
models.CameraStreams:
properties:
rtsp:
type: string
sub_rtsp:
type: string
type: object
models.Capture:
properties:
continuous:
type: string
forwardwebrtc:
type: string
fragmented:
type: string
fragmentedduration:
type: integer
ipcamera:
$ref: '#/definitions/models.IPCamera'
liveview:
type: string
maxlengthrecording:
type: integer
motion:
type: string
name:
type: string
pixelChangeThreshold:
type: integer
postrecording:
type: integer
prerecording:
type: integer
raspicamera:
$ref: '#/definitions/models.RaspiCamera'
recording:
type: string
snapshots:
type: string
transcodingresolution:
type: integer
transcodingwebrtc:
type: string
usbcamera:
$ref: '#/definitions/models.USBCamera'
type: object
models.Config:
properties:
auto_clean:
type: string
capture:
$ref: '#/definitions/models.Capture'
cloud:
type: string
condition_uri:
type: string
dropbox:
$ref: '#/definitions/models.Dropbox'
encryption:
$ref: '#/definitions/models.Encryption'
friendly_name:
type: string
heartbeaturi:
description: obsolete
type: string
hub_key:
type: string
hub_private_key:
type: string
hub_site:
type: string
hub_uri:
type: string
key:
type: string
kstorage:
$ref: '#/definitions/models.KStorage'
max_directory_size:
type: integer
mqtt_password:
type: string
mqtt_username:
type: string
mqtturi:
type: string
name:
type: string
offline:
type: string
region:
$ref: '#/definitions/models.Region'
remove_after_upload:
type: string
s3:
$ref: '#/definitions/models.S3'
stunuri:
type: string
time:
type: string
timetable:
items:
$ref: '#/definitions/models.Timetable'
type: array
timezone:
type: string
turn_password:
type: string
turn_username:
type: string
turnuri:
type: string
type:
type: string
type: object
models.Coordinate:
properties:
x:
type: number
"y":
type: number
type: object
models.Dropbox:
properties:
access_token:
type: string
directory:
type: string
type: object
models.Encryption:
properties:
enabled:
type: string
fingerprint:
type: string
private_key:
type: string
recordings:
type: string
symmetric_key:
type: string
type: object
models.EventFilter:
properties:
number_of_elements:
type: integer
timestamp_offset_end:
type: integer
timestamp_offset_start:
type: integer
type: object
models.IPCamera:
properties:
fps:
type: string
height:
type: integer
onvif:
type: string
onvif_password:
type: string
onvif_username:
type: string
onvif_xaddr:
type: string
rtsp:
type: string
sub_rtsp:
type: string
width:
type: integer
type: object
models.KStorage:
properties:
access_key:
type: string
cloud_key:
description: old way, remove this
type: string
directory:
type: string
provider:
type: string
secret_access_key:
type: string
uri:
type: string
type: object
models.OnvifCredentials:
properties:
onvif_password:
type: string
onvif_username:
type: string
onvif_xaddr:
type: string
type: object
models.OnvifPanTilt:
properties:
onvif_credentials:
$ref: '#/definitions/models.OnvifCredentials'
pan:
type: number
tilt:
type: number
type: object
models.OnvifPreset:
properties:
onvif_credentials:
$ref: '#/definitions/models.OnvifCredentials'
preset:
type: string
type: object
models.OnvifZoom:
properties:
onvif_credentials:
$ref: '#/definitions/models.OnvifCredentials'
zoom:
type: number
type: object
models.Polygon:
properties:
coordinates:
items:
$ref: '#/definitions/models.Coordinate'
type: array
id:
type: string
type: object
models.RaspiCamera:
properties:
device:
type: string
type: object
models.Rectangle:
properties:
x1:
type: integer
x2:
type: integer
y1:
type: integer
y2:
type: integer
type: object
models.Region:
properties:
name:
type: string
polygon:
items:
$ref: '#/definitions/models.Polygon'
type: array
rectangle:
$ref: '#/definitions/models.Rectangle'
type: object
models.S3:
properties:
bucket:
type: string
proxy:
type: string
proxyuri:
type: string
publickey:
type: string
region:
type: string
secretkey:
type: string
username:
type: string
type: object
models.Timetable:
properties:
end1:
type: integer
end2:
type: integer
start1:
type: integer
start2:
type: integer
type: object
models.USBCamera:
properties:
device:
type: string
type: object
info:
contact:
email: support@kerberos.io
@@ -11,7 +330,418 @@ info:
termsOfService: https://kerberos.io
title: Swagger Kerberos Agent API
version: "1.0"
paths: {}
paths:
/api/camera/onvif/capabilities:
post:
description: Will return the ONVIF capabilities for the specific camera.
operationId: camera-onvif-capabilities
parameters:
- description: OnvifCredentials
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.OnvifCredentials'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Will return the ONVIF capabilities for the specific camera.
tags:
- onvif
/api/camera/onvif/gotopreset:
post:
description: Will activate the desired ONVIF preset.
operationId: camera-onvif-gotopreset
parameters:
- description: OnvifPreset
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.OnvifPreset'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Will activate the desired ONVIF preset.
tags:
- onvif
/api/camera/onvif/inputs:
post:
description: Will get the digital inputs from the ONVIF device.
operationId: get-digital-inputs
parameters:
- description: OnvifCredentials
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.OnvifCredentials'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
security:
- Bearer: []
summary: Will get the digital inputs from the ONVIF device.
tags:
- onvif
/api/camera/onvif/login:
post:
description: Try to login into ONVIF supported camera.
operationId: camera-onvif-login
parameters:
- description: OnvifCredentials
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.OnvifCredentials'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Try to login into ONVIF supported camera.
tags:
- onvif
/api/camera/onvif/outputs:
post:
description: Will get the relay outputs from the ONVIF device.
operationId: get-relay-outputs
parameters:
- description: OnvifCredentials
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.OnvifCredentials'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
security:
- Bearer: []
summary: Will get the relay outputs from the ONVIF device.
tags:
- onvif
/api/camera/onvif/outputs/{output}:
post:
description: Will trigger the relay output from the ONVIF device.
operationId: trigger-relay-output
parameters:
- description: OnvifCredentials
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.OnvifCredentials'
- description: Output
in: path
name: output
required: true
type: string
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
security:
- Bearer: []
summary: Will trigger the relay output from the ONVIF device.
tags:
- onvif
/api/camera/onvif/pantilt:
post:
description: Panning or/and tilting the camera using a direction (x,y).
operationId: camera-onvif-pantilt
parameters:
- description: OnvifPanTilt
in: body
name: panTilt
required: true
schema:
$ref: '#/definitions/models.OnvifPanTilt'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Panning or/and tilting the camera.
tags:
- onvif
/api/camera/onvif/presets:
post:
description: Will return the ONVIF presets for the specific camera.
operationId: camera-onvif-presets
parameters:
- description: OnvifCredentials
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.OnvifCredentials'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Will return the ONVIF presets for the specific camera.
tags:
- onvif
/api/camera/onvif/verify:
post:
description: Will verify the ONVIF connectivity.
operationId: verify-onvif
parameters:
- description: OnvifCredentials
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.OnvifCredentials'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
security:
- Bearer: []
summary: Will verify the ONVIF connectivity.
tags:
- onvif
/api/camera/onvif/zoom:
post:
description: Zooming in or out the camera.
operationId: camera-onvif-zoom
parameters:
- description: OnvifZoom
in: body
name: zoom
required: true
schema:
$ref: '#/definitions/models.OnvifZoom'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Zooming in or out the camera.
tags:
- onvif
/api/camera/record:
post:
description: Make a recording.
operationId: camera-record
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Make a recording.
tags:
- camera
/api/camera/restart:
post:
description: Restart the agent.
operationId: camera-restart
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Restart the agent.
tags:
- camera
/api/camera/snapshot/base64:
get:
description: Get a snapshot from the camera in base64.
operationId: snapshot-base64
responses:
"200":
description: ""
summary: Get a snapshot from the camera in base64.
tags:
- camera
/api/camera/snapshot/jpeg:
get:
description: Get a snapshot from the camera in jpeg format.
operationId: snapshot-jpeg
responses:
"200":
description: ""
summary: Get a snapshot from the camera in jpeg format.
tags:
- camera
/api/camera/stop:
post:
description: Stop the agent.
operationId: camera-stop
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Stop the agent.
tags:
- camera
/api/camera/verify/{streamType}:
post:
description: This method will validate a specific profile connection from an
RTSP camera, and try to get the codec.
operationId: verify-camera
parameters:
- description: Stream Type
enum:
- primary
- secondary
in: path
name: streamType
required: true
type: string
- description: Camera Streams
in: body
name: cameraStreams
required: true
schema:
$ref: '#/definitions/models.CameraStreams'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
summary: Validate a specific RTSP profile camera connection.
tags:
- camera
/api/config:
get:
description: Get the current configuration.
operationId: config
responses:
"200":
description: ""
summary: Get the current configuration.
tags:
- config
post:
description: Update the current configuration.
operationId: config
parameters:
- description: Configuration
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.Config'
responses:
"200":
description: ""
summary: Update the current configuration.
tags:
- config
/api/dashboard:
get:
description: Get all information showed on the dashboard.
operationId: dashboard
responses:
"200":
description: ""
summary: Get all information showed on the dashboard.
tags:
- general
/api/days:
get:
description: Get all days stored in the recordings directory.
operationId: days
responses:
"200":
description: ""
summary: Get all days stored in the recordings directory.
tags:
- general
/api/hub/verify:
post:
description: Will verify the hub connectivity.
operationId: verify-hub
parameters:
- description: Config
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.Config'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
security:
- Bearer: []
summary: Will verify the hub connectivity.
tags:
- persistence
/api/latest-events:
post:
description: Get the latest recordings (events) from the recordings directory.
operationId: latest-events
parameters:
- description: Event filter
in: body
name: eventFilter
required: true
schema:
$ref: '#/definitions/models.EventFilter'
responses:
"200":
description: ""
summary: Get the latest recordings (events) from the recordings directory.
tags:
- general
/api/login:
post:
description: Get Authorization token.
operationId: login
parameters:
- description: Credentials
in: body
name: credentials
required: true
schema:
$ref: '#/definitions/models.Authentication'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.Authorization'
summary: Get Authorization token.
tags:
- authentication
/api/persistence/verify:
post:
description: Will verify the persistence.
operationId: verify-persistence
parameters:
- description: Config
in: body
name: config
required: true
schema:
$ref: '#/definitions/models.Config'
responses:
"200":
description: OK
schema:
$ref: '#/definitions/models.APIResponse'
security:
- Bearer: []
summary: Will verify the persistence.
tags:
- persistence
securityDefinitions:
Bearer:
in: header

View File

@@ -1,101 +1,152 @@
module github.com/kerberos-io/agent/machinery
go 1.18
go 1.20
//replace github.com/kerberos-io/joy4 v1.0.63 => ../../../../github.com/kerberos-io/joy4
//replace github.com/kerberos-io/onvif v0.0.10 => ../../../../github.com/kerberos-io/onvif
require (
github.com/InVisionApp/conjungo v1.1.0
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751
github.com/appleboy/gin-jwt/v2 v2.8.0
github.com/appleboy/gin-jwt/v2 v2.9.1
github.com/bluenviron/gortsplib/v4 v4.6.1
github.com/bluenviron/mediacommon v1.5.1
github.com/cedricve/go-onvif v0.0.0-20200222191200-567e8ce298f6
github.com/deepch/vdk v0.0.17
github.com/eclipse/paho.mqtt.golang v1.4.1
github.com/gin-contrib/cors v1.3.1
github.com/gin-contrib/pprof v1.3.0
github.com/gin-gonic/contrib v0.0.0-20201101042839-6a891bf89f19
github.com/gin-gonic/gin v1.8.1
github.com/golang-jwt/jwt/v4 v4.2.0
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
github.com/eclipse/paho.mqtt.golang v1.4.2
github.com/elastic/go-sysinfo v1.9.0
github.com/gin-contrib/cors v1.4.0
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/contrib v0.0.0-20221130124618-7e01895a63f2
github.com/gin-gonic/gin v1.9.1
github.com/gofrs/uuid v4.4.0+incompatible
github.com/golang-jwt/jwt/v4 v4.4.3
github.com/golang-module/carbon/v2 v2.2.3
github.com/gorilla/websocket v1.5.0
github.com/kellydunn/golang-geo v0.7.0
github.com/kerberos-io/joy4 v1.0.33
github.com/kerberos-io/onvif v0.0.3
github.com/kerberos-io/joy4 v1.0.64
github.com/kerberos-io/onvif v0.0.14
github.com/minio/minio-go/v6 v6.0.57
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
github.com/pion/webrtc/v3 v3.1.41
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/sirupsen/logrus v1.8.1
github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe
github.com/swaggo/gin-swagger v1.5.0
github.com/swaggo/swag v1.8.3
github.com/pion/rtp v1.8.3
github.com/pion/webrtc/v3 v3.1.50
github.com/sirupsen/logrus v1.9.0
github.com/swaggo/files v1.0.0
github.com/swaggo/gin-swagger v1.5.3
github.com/swaggo/swag v1.8.9
github.com/tevino/abool v1.2.0
gocv.io/x/gocv v0.31.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
github.com/yapingcat/gomedia v0.0.0-20231203152327-9078d4068ce7
github.com/zaf/g711 v0.0.0-20220109202201-cf0017bf0359
go.mongodb.org/mongo-driver v1.7.5
gopkg.in/DataDog/dd-trace-go.v1 v1.46.0
gopkg.in/natefinch/lumberjack.v2 v2.0.0
)
require (
github.com/DataDog/datadog-agent/pkg/obfuscate v0.0.0-20211129110424-6491aa3bf583 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0-rc.1 // indirect
github.com/DataDog/datadog-go v4.8.2+incompatible // indirect
github.com/DataDog/datadog-go/v5 v5.0.2 // indirect
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
github.com/DataDog/gostackparse v0.5.0 // indirect
github.com/DataDog/sketches-go v1.2.1 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/Microsoft/go-winio v0.5.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beevik/etree v1.1.0 // indirect
github.com/beevik/etree v1.2.0 // indirect
github.com/bytedance/sonic v1.9.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/clbanning/mxj v1.8.4 // indirect
github.com/elgs/gostrgen v0.0.0-20161222160715-9d61ae07eeae // indirect
github.com/clbanning/mxj/v2 v2.7.0 // indirect
github.com/dgraph-io/ristretto v0.1.0 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/elastic/go-windows v1.0.0 // indirect
github.com/elgs/gostrgen v0.0.0-20220325073726-0c3e00d082f6 // indirect
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.6 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-openapi/swag v0.19.15 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.10.0 // indirect
github.com/goccy/go-json v0.9.7 // indirect
github.com/gofrs/uuid v4.2.0+incompatible // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.4.2 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/pprof v0.0.0-20210423192551-a2663126120b // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.0 // indirect
github.com/klauspost/cpuid v1.2.3 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/kylelemons/go-gypsy v1.0.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/lib/pq v1.10.6 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/minio/md5-simd v1.1.0 // indirect
github.com/minio/sha256-simd v0.1.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
github.com/pion/datachannel v1.5.2 // indirect
github.com/onsi/gomega v1.27.4 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/philhofer/fwd v1.1.1 // indirect
github.com/pion/datachannel v1.5.5 // indirect
github.com/pion/dtls/v2 v2.1.5 // indirect
github.com/pion/ice/v2 v2.2.6 // indirect
github.com/pion/ice/v2 v2.2.12 // indirect
github.com/pion/interceptor v0.1.11 // indirect
github.com/pion/logging v0.2.2 // indirect
github.com/pion/mdns v0.0.5 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/rtcp v1.2.9 // indirect
github.com/pion/rtp v1.7.13 // indirect
github.com/pion/sctp v1.8.2 // indirect
github.com/pion/sdp/v3 v3.0.5 // indirect
github.com/pion/srtp/v2 v2.0.9 // indirect
github.com/pion/rtcp v1.2.12 // indirect
github.com/pion/sctp v1.8.5 // indirect
github.com/pion/sdp/v3 v3.0.6 // indirect
github.com/pion/srtp/v2 v2.0.10 // indirect
github.com/pion/stun v0.3.5 // indirect
github.com/pion/transport v0.13.0 // indirect
github.com/pion/transport v0.14.1 // indirect
github.com/pion/turn/v2 v2.0.8 // indirect
github.com/pion/udp v0.1.1 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/ugorji/go/codec v1.2.7 // indirect
github.com/yuin/goldmark v1.4.1 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/tinylib/msgp v1.1.6 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.0.2 // indirect
github.com/xdg-go/stringprep v1.0.2 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
github.com/ziutek/mymysql v1.5.4 // indirect
golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8 // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.11 // indirect
google.golang.org/protobuf v1.28.0 // indirect
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.16.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.6 // indirect
google.golang.org/grpc v1.32.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/ini.v1 v1.42.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
inet.af/netaddr v0.0.0-20220617031823-097006376321 // indirect
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,38 +1,124 @@
package main
import (
"fmt"
"context"
"flag"
"os"
"time"
"github.com/kerberos-io/agent/machinery/src/capture"
"github.com/kerberos-io/agent/machinery/src/components"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/onvif"
configService "github.com/kerberos-io/agent/machinery/src/config"
"github.com/kerberos-io/agent/machinery/src/routers"
"github.com/kerberos-io/agent/machinery/src/utils"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/profiler"
)
var VERSION = "3.0.0"
func main() {
// You might be interested in debugging the agent.
if os.Getenv("DATADOG_AGENT_ENABLED") == "true" {
if os.Getenv("DATADOG_AGENT_K8S_ENABLED") == "true" {
tracer.Start()
defer tracer.Stop()
} else {
service := os.Getenv("DATADOG_AGENT_SERVICE")
environment := os.Getenv("DATADOG_AGENT_ENVIRONMENT")
log.Log.Info("Starting Datadog Agent with service: " + service + " and environment: " + environment)
rules := []tracer.SamplingRule{tracer.RateRule(1)}
tracer.Start(
tracer.WithSamplingRules(rules),
tracer.WithService(service),
tracer.WithEnv(environment),
)
defer tracer.Stop()
err := profiler.Start(
profiler.WithService(service),
profiler.WithEnv(environment),
profiler.WithProfileTypes(
profiler.CPUProfile,
profiler.HeapProfile,
),
)
if err != nil {
log.Log.Fatal(err.Error())
}
defer profiler.Stop()
}
}
const VERSION = "3.0"
action := os.Args[1]
// Start the show ;)
// We'll parse the flags (named variables), and start the agent.
log.Log.Init()
var action string
var configDirectory string
var name string
var port string
var timeout string
flag.StringVar(&action, "action", "version", "Tell us what you want do 'run' or 'version'")
flag.StringVar(&configDirectory, "config", ".", "Where is the configuration stored")
flag.StringVar(&name, "name", "agent", "Provide a name for the agent")
flag.StringVar(&port, "port", "80", "On which port should the agent run")
flag.StringVar(&timeout, "timeout", "2000", "Number of milliseconds to wait for the ONVIF discovery to complete")
flag.Parse()
// Specify the level of loggin: "info", "warning", "debug", "error" or "fatal."
logLevel := os.Getenv("LOG_LEVEL")
if logLevel == "" {
logLevel = "info"
}
// Specify the output formatter of the log: "text" or "json".
logOutput := os.Getenv("LOG_OUTPUT")
if logOutput == "" {
logOutput = "text"
}
// Specify the timezone of the log: "UTC" or "Local".
timezone, _ := time.LoadLocation("CET")
log.Log.Init(logLevel, logOutput, configDirectory, timezone)
switch action {
case "version":
log.Log.Info("You are currrently running Kerberos Agent " + VERSION)
case "pending-upload":
name := os.Args[2]
fmt.Println(name)
case "version":
log.Log.Info("main.Main(): You are currrently running Kerberos Agent " + VERSION)
case "discover":
timeout := os.Args[2]
fmt.Println(timeout)
// Convert duration to int
timeout, err := time.ParseDuration(timeout + "ms")
if err != nil {
log.Log.Fatal("main.Main(): could not parse timeout: " + err.Error())
return
}
onvif.Discover(timeout)
case "decrypt":
log.Log.Info("main.Main(): Decrypting: " + flag.Arg(0) + " with key: " + flag.Arg(1))
symmetricKey := []byte(flag.Arg(1))
if symmetricKey == nil || len(symmetricKey) == 0 {
log.Log.Fatal("main.Main(): symmetric key should not be empty")
return
}
if len(symmetricKey) != 32 {
log.Log.Fatal("main.Main(): symmetric key should be 32 bytes")
return
}
utils.Decrypt(flag.Arg(0), symmetricKey)
case "run":
{
name := os.Args[2]
port := os.Args[3]
// Print Kerberos.io ASCII art
utils.PrintASCIIArt()
// Print the environment variables which include "AGENT_" as prefix.
utils.PrintEnvironmentVariables()
// Read the config on start, and pass it to the other
// function and features. Please note that this might be changed
@@ -42,18 +128,59 @@ func main() {
configuration.Port = port
// Open this configuration either from Kerberos Agent or Kerberos Factory.
components.OpenConfig(&configuration)
configService.OpenConfig(configDirectory, &configuration)
// We will override the configuration with the environment variables
configService.OverrideWithEnvironmentVariables(&configuration)
// Printing final configuration
utils.PrintConfiguration(&configuration)
// Check the folder permissions, it might be that we do not have permissions to write
// recordings, update the configuration or save snapshots.
utils.CheckDataDirectoryPermissions(configDirectory)
// Set timezone
timezone, _ := time.LoadLocation(configuration.Config.Timezone)
log.Log.Init(logLevel, logOutput, configDirectory, timezone)
// Check if we have a device Key or not, if not
// we will generate one.
if configuration.Config.Key == "" {
key := utils.RandStringBytesMaskImpr(30)
configuration.Config.Key = key
err := configService.StoreConfig(configDirectory, configuration.Config)
if err == nil {
log.Log.Info("main.Main(): updated unique key for agent to: " + key)
} else {
log.Log.Info("main.Main(): something went wrong while trying to store key: " + key)
}
}
// Create a cancelable context, which will be used to cancel and restart.
// This is used to restart the agent when the configuration is updated.
ctx, cancel := context.WithCancel(context.Background())
// We create a capture object, this will contain all the streaming clients.
// And allow us to extract media from within difference places in the agent.
capture := capture.Capture{
RTSPClient: nil,
RTSPSubClient: nil,
}
// Bootstrapping the agent
communication := models.Communication{
Context: &ctx,
CancelContext: &cancel,
HandleBootstrap: make(chan string, 1),
}
go components.Bootstrap(&configuration, &communication)
go components.Bootstrap(configDirectory, &configuration, &communication, &capture)
// Start the REST API.
routers.StartWebserver(&configuration, &communication)
routers.StartWebserver(configDirectory, &configuration, &communication, &capture)
}
default:
fmt.Println("Sorry I don't understand :(")
log.Log.Error("main.Main(): Sorry I don't understand :(")
}
}

View File

@@ -0,0 +1,980 @@
package capture
// #cgo pkg-config: libavcodec libavutil libswscale
// #include <libavcodec/avcodec.h>
// #include <libavutil/imgutils.h>
// #include <libswscale/swscale.h>
import "C"
import (
"context"
"errors"
"fmt"
"image"
"reflect"
"strconv"
"sync"
"time"
"unsafe"
"github.com/bluenviron/gortsplib/v4"
"github.com/bluenviron/gortsplib/v4/pkg/base"
"github.com/bluenviron/gortsplib/v4/pkg/description"
"github.com/bluenviron/gortsplib/v4/pkg/format"
"github.com/bluenviron/gortsplib/v4/pkg/format/rtph264"
"github.com/bluenviron/gortsplib/v4/pkg/format/rtph265"
"github.com/bluenviron/gortsplib/v4/pkg/format/rtplpcm"
"github.com/bluenviron/gortsplib/v4/pkg/format/rtpmpeg4audio"
"github.com/bluenviron/gortsplib/v4/pkg/format/rtpsimpleaudio"
"github.com/bluenviron/mediacommon/pkg/codecs/h264"
"github.com/bluenviron/mediacommon/pkg/codecs/h265"
"github.com/bluenviron/mediacommon/pkg/codecs/mpeg4audio"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/packets"
"github.com/pion/rtp"
)
// Implements the RTSPClient interface.
type Golibrtsp struct {
RTSPClient
Url string
Client gortsplib.Client
VideoDecoderMutex *sync.Mutex
VideoH264Index int8
VideoH264Media *description.Media
VideoH264Forma *format.H264
VideoH264Decoder *rtph264.Decoder
VideoH264FrameDecoder *Decoder
VideoH265Index int8
VideoH265Media *description.Media
VideoH265Forma *format.H265
VideoH265Decoder *rtph265.Decoder
VideoH265FrameDecoder *Decoder
AudioLPCMIndex int8
AudioLPCMMedia *description.Media
AudioLPCMForma *format.LPCM
AudioLPCMDecoder *rtplpcm.Decoder
AudioG711Index int8
AudioG711Media *description.Media
AudioG711Forma *format.G711
AudioG711Decoder *rtpsimpleaudio.Decoder
HasBackChannel bool
AudioG711IndexBackChannel int8
AudioG711MediaBackChannel *description.Media
AudioG711FormaBackChannel *format.G711
AudioMPEG4Index int8
AudioMPEG4Media *description.Media
AudioMPEG4Forma *format.MPEG4Audio
AudioMPEG4Decoder *rtpmpeg4audio.Decoder
Streams []packets.Stream
}
// Connect to the RTSP server.
func (g *Golibrtsp) Connect(ctx context.Context) (err error) {
transport := gortsplib.TransportTCP
g.Client = gortsplib.Client{
RequestBackChannels: false,
Transport: &transport,
}
// parse URL
u, err := base.ParseURL(g.Url)
if err != nil {
log.Log.Debug("capture.golibrtsp.Connect(ParseURL): " + err.Error())
return
}
// connect to the server
err = g.Client.Start(u.Scheme, u.Host)
if err != nil {
log.Log.Debug("capture.golibrtsp.Connect(Start): " + err.Error())
}
// find published medias
desc, _, err := g.Client.Describe(u)
if err != nil {
log.Log.Debug("capture.golibrtsp.Connect(Describe): " + err.Error())
return
}
// Iniatlise the mutex.
g.VideoDecoderMutex = &sync.Mutex{}
// find the H264 media and format
var formaH264 *format.H264
mediH264 := desc.FindFormat(&formaH264)
g.VideoH264Media = mediH264
g.VideoH264Forma = formaH264
if mediH264 == nil {
log.Log.Debug("capture.golibrtsp.Connect(H264): " + "video media not found")
} else {
// setup a video media
_, err = g.Client.Setup(desc.BaseURL, mediH264, 0, 0)
if err != nil {
// Something went wrong .. Do something
log.Log.Error("capture.golibrtsp.Connect(H264): " + err.Error())
} else {
// Get SPS from the SDP
// Calculate the width and height of the video
var sps h264.SPS
err = sps.Unmarshal(formaH264.SPS)
if err != nil {
log.Log.Debug("capture.golibrtsp.Connect(H264): " + err.Error())
return
}
g.Streams = append(g.Streams, packets.Stream{
Name: formaH264.Codec(),
IsVideo: true,
IsAudio: false,
SPS: formaH264.SPS,
PPS: formaH264.PPS,
Width: sps.Width(),
Height: sps.Height(),
FPS: sps.FPS(),
IsBackChannel: false,
})
// Set the index for the video
g.VideoH264Index = int8(len(g.Streams)) - 1
// setup RTP/H264 -> H264 decoder
rtpDec, err := formaH264.CreateDecoder()
if err != nil {
// Something went wrong .. Do something
}
g.VideoH264Decoder = rtpDec
// setup H264 -> raw frames decoder
frameDec, err := newDecoder("H264")
if err != nil {
// Something went wrong .. Do something
}
g.VideoH264FrameDecoder = frameDec
}
}
// find the H265 media and format
var formaH265 *format.H265
mediH265 := desc.FindFormat(&formaH265)
g.VideoH265Media = mediH265
g.VideoH265Forma = formaH265
if mediH265 == nil {
log.Log.Debug("capture.golibrtsp.Connect(H265): " + "video media not found")
} else {
// setup a video media
_, err = g.Client.Setup(desc.BaseURL, mediH265, 0, 0)
if err != nil {
// Something went wrong .. Do something
log.Log.Error("capture.golibrtsp.Connect(H265): " + err.Error())
} else {
// Get SPS from the SDP
// Calculate the width and height of the video
var sps h265.SPS
err = sps.Unmarshal(formaH265.SPS)
if err != nil {
log.Log.Info("capture.golibrtsp.Connect(H265): " + err.Error())
return
}
g.Streams = append(g.Streams, packets.Stream{
Name: formaH265.Codec(),
IsVideo: true,
IsAudio: false,
SPS: formaH265.SPS,
PPS: formaH265.PPS,
VPS: formaH265.VPS,
Width: sps.Width(),
Height: sps.Height(),
FPS: sps.FPS(),
IsBackChannel: false,
})
// Set the index for the video
g.VideoH265Index = int8(len(g.Streams)) - 1
// setup RTP/H265 -> H265 decoder
rtpDec, err := formaH265.CreateDecoder()
if err != nil {
// Something went wrong .. Do something
}
g.VideoH265Decoder = rtpDec
// setup H265 -> raw frames decoder
frameDec, err := newDecoder("H265")
if err != nil {
// Something went wrong .. Do something
}
g.VideoH265FrameDecoder = frameDec
}
}
// Look for audio stream.
// find the G711 media and format
audioForma, audioMedi := FindPCMU(desc, false)
g.AudioG711Media = audioMedi
g.AudioG711Forma = audioForma
if audioMedi == nil {
log.Log.Debug("capture.golibrtsp.Connect(G711): " + "audio media not found")
} else {
// setup a audio media
_, err = g.Client.Setup(desc.BaseURL, audioMedi, 0, 0)
if err != nil {
// Something went wrong .. Do something
log.Log.Error("capture.golibrtsp.Connect(G711): " + err.Error())
} else {
// create decoder
audiortpDec, err := audioForma.CreateDecoder()
if err != nil {
// Something went wrong .. Do something
log.Log.Error("capture.golibrtsp.Connect(G711): " + err.Error())
} else {
g.AudioG711Decoder = audiortpDec
g.Streams = append(g.Streams, packets.Stream{
Name: "PCM_MULAW",
IsVideo: false,
IsAudio: true,
IsBackChannel: false,
})
// Set the index for the audio
g.AudioG711Index = int8(len(g.Streams)) - 1
}
}
}
// Look for audio stream.
// find the AAC media and format
audioFormaMPEG4, audioMediMPEG4 := FindMPEG4Audio(desc, false)
g.AudioMPEG4Media = audioMediMPEG4
g.AudioMPEG4Forma = audioFormaMPEG4
if audioMediMPEG4 == nil {
log.Log.Debug("capture.golibrtsp.Connect(MPEG4): " + "audio media not found")
} else {
// setup a audio media
_, err = g.Client.Setup(desc.BaseURL, audioMediMPEG4, 0, 0)
if err != nil {
// Something went wrong .. Do something
log.Log.Error("capture.golibrtsp.Connect(MPEG4): " + err.Error())
} else {
g.Streams = append(g.Streams, packets.Stream{
Name: "AAC",
IsVideo: false,
IsAudio: true,
IsBackChannel: false,
})
// Set the index for the audio
g.AudioMPEG4Index = int8(len(g.Streams)) - 1
// create decoder
audiortpDec, err := audioFormaMPEG4.CreateDecoder()
if err != nil {
// Something went wrong .. Do something
log.Log.Error("capture.golibrtsp.Connect(MPEG4): " + err.Error())
}
g.AudioMPEG4Decoder = audiortpDec
}
}
return
}
func (g *Golibrtsp) ConnectBackChannel(ctx context.Context) (err error) {
// Transport TCP
transport := gortsplib.TransportTCP
g.Client = gortsplib.Client{
RequestBackChannels: true,
Transport: &transport,
}
// parse URL
u, err := base.ParseURL(g.Url)
if err != nil {
log.Log.Error("capture.golibrtsp.ConnectBackChannel(): " + err.Error())
return
}
// connect to the server
err = g.Client.Start(u.Scheme, u.Host)
if err != nil {
log.Log.Error("capture.golibrtsp.ConnectBackChannel(): " + err.Error())
}
// find published medias
desc, _, err := g.Client.Describe(u)
if err != nil {
log.Log.Error("capture.golibrtsp.ConnectBackChannel(): " + err.Error())
return
}
// Look for audio back channel.
g.HasBackChannel = false
// find the LPCM media and format
audioFormaBackChannel, audioMediBackChannel := FindPCMU(desc, true)
g.AudioG711MediaBackChannel = audioMediBackChannel
g.AudioG711FormaBackChannel = audioFormaBackChannel
if audioMediBackChannel == nil {
log.Log.Error("capture.golibrtsp.ConnectBackChannel(): audio backchannel not found, not a real error, however you might expect a backchannel. One of the reasons might be that the device already has an active client connected to the backchannel.")
err = errors.New("no audio backchannel found")
} else {
// setup a audio media
_, err = g.Client.Setup(desc.BaseURL, audioMediBackChannel, 0, 0)
if err != nil {
// Something went wrong .. Do something
log.Log.Error("capture.golibrtsp.ConnectBackChannel(): " + err.Error())
g.HasBackChannel = false
} else {
g.HasBackChannel = true
g.Streams = append(g.Streams, packets.Stream{
Name: "PCM_MULAW",
IsVideo: false,
IsAudio: true,
IsBackChannel: true,
})
// Set the index for the audio
g.AudioG711IndexBackChannel = int8(len(g.Streams)) - 1
}
}
return
}
// Start the RTSP client, and start reading packets.
func (g *Golibrtsp) Start(ctx context.Context, streamType string, queue *packets.Queue, configuration *models.Configuration, communication *models.Communication) (err error) {
log.Log.Debug("capture.golibrtsp.Start(): started")
// called when a MULAW audio RTP packet arrives
if g.AudioG711Media != nil && g.AudioG711Forma != nil {
g.Client.OnPacketRTP(g.AudioG711Media, g.AudioG711Forma, func(rtppkt *rtp.Packet) {
// decode timestamp
pts, ok := g.Client.PacketPTS(g.AudioG711Media, rtppkt)
if !ok {
log.Log.Debug("capture.golibrtsp.Start(): " + "unable to get PTS")
return
}
// extract LPCM samples from RTP packets
op, err := g.AudioG711Decoder.Decode(rtppkt)
if err != nil {
log.Log.Error("capture.golibrtsp.Start(): " + err.Error())
return
}
pkt := packets.Packet{
IsKeyFrame: false,
Packet: rtppkt,
Data: op,
Time: pts,
CompositionTime: pts,
Idx: g.AudioG711Index,
IsVideo: false,
IsAudio: true,
Codec: "PCM_MULAW",
}
queue.WritePacket(pkt)
})
}
// called when a AAC audio RTP packet arrives
if g.AudioMPEG4Media != nil && g.AudioMPEG4Forma != nil {
g.Client.OnPacketRTP(g.AudioMPEG4Media, g.AudioMPEG4Forma, func(rtppkt *rtp.Packet) {
// decode timestamp
pts, ok := g.Client.PacketPTS(g.AudioMPEG4Media, rtppkt)
if !ok {
log.Log.Error("capture.golibrtsp.Start(): " + "unable to get PTS")
return
}
// Encode the AAC samples from RTP packets
// extract access units from RTP packets
aus, err := g.AudioMPEG4Decoder.Decode(rtppkt)
if err != nil {
log.Log.Error("capture.golibrtsp.Start(): " + err.Error())
return
}
enc, err := WriteMPEG4Audio(g.AudioMPEG4Forma, aus)
if err != nil {
log.Log.Error("capture.golibrtsp.Start(): " + err.Error())
return
}
pkt := packets.Packet{
IsKeyFrame: false,
Packet: rtppkt,
Data: enc,
Time: pts,
CompositionTime: pts,
Idx: g.AudioG711Index,
IsVideo: false,
IsAudio: true,
Codec: "AAC",
}
queue.WritePacket(pkt)
})
}
// called when a video RTP packet arrives for H264
var filteredAU [][]byte
if g.VideoH264Media != nil && g.VideoH264Forma != nil {
g.Client.OnPacketRTP(g.VideoH264Media, g.VideoH264Forma, func(rtppkt *rtp.Packet) {
// This will check if we need to stop the thread,
// because of a reconfiguration.
select {
case <-communication.HandleStream:
return
default:
}
if len(rtppkt.Payload) > 0 {
// decode timestamp
pts, ok := g.Client.PacketPTS(g.VideoH264Media, rtppkt)
if !ok {
log.Log.Debug("capture.golibrtsp.Start(): " + "unable to get PTS")
return
}
// Extract access units from RTP packets
// We need to do this, because the decoder expects a full
// access unit. Once we have a full access unit, we can
// decode it, and know if it's a keyframe or not.
au, errDecode := g.VideoH264Decoder.Decode(rtppkt)
if errDecode != nil {
if errDecode != rtph264.ErrNonStartingPacketAndNoPrevious && errDecode != rtph264.ErrMorePacketsNeeded {
log.Log.Error("capture.golibrtsp.Start(): " + errDecode.Error())
}
return
}
// We'll need to read out a few things.
// prepend an AUD. This is required by some players
filteredAU = [][]byte{
{byte(h264.NALUTypeAccessUnitDelimiter), 240},
}
// Check if we have a keyframe.
nonIDRPresent := false
idrPresent := false
for _, nalu := range au {
typ := h264.NALUType(nalu[0] & 0x1F)
switch typ {
case h264.NALUTypeAccessUnitDelimiter:
continue
case h264.NALUTypeIDR:
idrPresent = true
case h264.NALUTypeNonIDR:
nonIDRPresent = true
}
filteredAU = append(filteredAU, nalu)
}
if len(filteredAU) <= 1 || (!nonIDRPresent && !idrPresent) {
return
}
// Convert to packet.
enc, err := h264.AnnexBMarshal(filteredAU)
if err != nil {
log.Log.Error("capture.golibrtsp.Start(): " + err.Error())
return
}
pkt := packets.Packet{
IsKeyFrame: idrPresent,
Packet: rtppkt,
Data: enc,
Time: pts,
CompositionTime: pts,
Idx: g.VideoH264Index,
IsVideo: true,
IsAudio: false,
Codec: "H264",
}
pkt.Data = pkt.Data[4:]
if pkt.IsKeyFrame {
annexbNALUStartCode := func() []byte { return []byte{0x00, 0x00, 0x00, 0x01} }
pkt.Data = append(annexbNALUStartCode(), pkt.Data...)
pkt.Data = append(g.VideoH264Forma.PPS, pkt.Data...)
pkt.Data = append(annexbNALUStartCode(), pkt.Data...)
pkt.Data = append(g.VideoH264Forma.SPS, pkt.Data...)
pkt.Data = append(annexbNALUStartCode(), pkt.Data...)
}
queue.WritePacket(pkt)
// This will check if we need to stop the thread,
// because of a reconfiguration.
select {
case <-communication.HandleStream:
return
default:
}
if idrPresent {
// Increment packets, so we know the device
// is not blocking.
if streamType == "main" {
r := communication.PackageCounter.Load().(int64)
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
communication.PackageCounter.Store((r + 1) % 1000)
communication.LastPacketTimer.Store(time.Now().Unix())
} else if streamType == "sub" {
r := communication.PackageCounterSub.Load().(int64)
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
communication.PackageCounterSub.Store((r + 1) % 1000)
communication.LastPacketTimerSub.Store(time.Now().Unix())
}
}
}
})
}
// called when a video RTP packet arrives for H265
if g.VideoH265Media != nil && g.VideoH265Forma != nil {
g.Client.OnPacketRTP(g.VideoH265Media, g.VideoH265Forma, func(rtppkt *rtp.Packet) {
// This will check if we need to stop the thread,
// because of a reconfiguration.
select {
case <-communication.HandleStream:
return
default:
}
if len(rtppkt.Payload) > 0 {
// decode timestamp
pts, ok := g.Client.PacketPTS(g.VideoH265Media, rtppkt)
if !ok {
log.Log.Debug("capture.golibrtsp.Start(): " + "unable to get PTS")
return
}
// Extract access units from RTP packets
// We need to do this, because the decoder expects a full
// access unit. Once we have a full access unit, we can
// decode it, and know if it's a keyframe or not.
au, errDecode := g.VideoH265Decoder.Decode(rtppkt)
if errDecode != nil {
if errDecode != rtph265.ErrNonStartingPacketAndNoPrevious && errDecode != rtph265.ErrMorePacketsNeeded {
log.Log.Error("capture.golibrtsp.Start(): " + errDecode.Error())
}
return
}
filteredAU = [][]byte{
{byte(h265.NALUType_AUD_NUT) << 1, 1, 0x50},
}
isRandomAccess := false
for _, nalu := range au {
typ := h265.NALUType((nalu[0] >> 1) & 0b111111)
switch typ {
/*case h265.NALUType_VPS_NUT:
continue*/
case h265.NALUType_SPS_NUT:
continue
case h265.NALUType_PPS_NUT:
continue
case h265.NALUType_AUD_NUT:
continue
case h265.NALUType_IDR_W_RADL, h265.NALUType_IDR_N_LP, h265.NALUType_CRA_NUT:
isRandomAccess = true
}
filteredAU = append(filteredAU, nalu)
}
au = filteredAU
if len(au) <= 1 {
return
}
// add VPS, SPS and PPS before random access access unit
if isRandomAccess {
au = append([][]byte{
g.VideoH265Forma.VPS,
g.VideoH265Forma.SPS,
g.VideoH265Forma.PPS}, au...)
}
enc, err := h264.AnnexBMarshal(au)
if err != nil {
log.Log.Error("capture.golibrtsp.Start(): " + err.Error())
return
}
pkt := packets.Packet{
IsKeyFrame: isRandomAccess,
Packet: rtppkt,
Data: enc,
Time: pts,
CompositionTime: pts,
Idx: g.VideoH265Index,
IsVideo: true,
IsAudio: false,
Codec: "H265",
}
queue.WritePacket(pkt)
// This will check if we need to stop the thread,
// because of a reconfiguration.
select {
case <-communication.HandleStream:
return
default:
}
if isRandomAccess {
// Increment packets, so we know the device
// is not blocking.
if streamType == "main" {
r := communication.PackageCounter.Load().(int64)
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
communication.PackageCounter.Store((r + 1) % 1000)
communication.LastPacketTimer.Store(time.Now().Unix())
} else if streamType == "sub" {
r := communication.PackageCounterSub.Load().(int64)
log.Log.Debug("capture.golibrtsp.Start(): packet size " + strconv.Itoa(len(pkt.Data)))
communication.PackageCounterSub.Store((r + 1) % 1000)
communication.LastPacketTimerSub.Store(time.Now().Unix())
}
}
}
})
}
// Wait for a second, so we can be sure the stream is playing.
time.Sleep(1 * time.Second)
// Play the stream.
_, err = g.Client.Play(nil)
if err != nil {
log.Log.Error("capture.golibrtsp.Start(): " + err.Error())
}
return
}
// Start the RTSP client, and start reading packets.
func (g *Golibrtsp) StartBackChannel(ctx context.Context) (err error) {
log.Log.Info("capture.golibrtsp.StartBackChannel(): started")
// Wait for a second, so we can be sure the stream is playing.
time.Sleep(1 * time.Second)
// Play the stream.
_, err = g.Client.Play(nil)
if err != nil {
log.Log.Error("capture.golibrtsp.StartBackChannel(): " + err.Error())
}
return
}
func (g *Golibrtsp) WritePacket(pkt packets.Packet) error {
if g.HasBackChannel && g.AudioG711MediaBackChannel != nil {
err := g.Client.WritePacketRTP(g.AudioG711MediaBackChannel, pkt.Packet)
if err != nil {
log.Log.Debug("capture.golibrtsp.WritePacket(): " + err.Error())
return err
}
}
return nil
}
// Decode a packet to an image.
func (g *Golibrtsp) DecodePacket(pkt packets.Packet) (image.YCbCr, error) {
var img image.YCbCr
var err error
g.VideoDecoderMutex.Lock()
if len(pkt.Data) == 0 {
err = errors.New("TSPClient(Golibrtsp).DecodePacket(): empty frame")
} else if g.VideoH264Decoder != nil {
img, err = g.VideoH264FrameDecoder.decode(pkt.Data)
} else if g.VideoH265Decoder != nil {
img, err = g.VideoH265FrameDecoder.decode(pkt.Data)
} else {
err = errors.New("TSPClient(Golibrtsp).DecodePacket(): no decoder found, might already be closed")
}
g.VideoDecoderMutex.Unlock()
if err != nil {
log.Log.Error("capture.golibrtsp.DecodePacket(): " + err.Error())
return image.YCbCr{}, err
}
if img.Bounds().Empty() {
log.Log.Debug("capture.golibrtsp.DecodePacket(): empty frame")
return image.YCbCr{}, errors.New("Empty image")
}
return img, nil
}
// Decode a packet to a Gray image.
func (g *Golibrtsp) DecodePacketRaw(pkt packets.Packet) (image.Gray, error) {
var img image.Gray
var err error
g.VideoDecoderMutex.Lock()
if len(pkt.Data) == 0 {
err = errors.New("capture.golibrtsp.DecodePacketRaw(): empty frame")
} else if g.VideoH264Decoder != nil {
img, err = g.VideoH264FrameDecoder.decodeRaw(pkt.Data)
} else if g.VideoH265Decoder != nil {
img, err = g.VideoH265FrameDecoder.decodeRaw(pkt.Data)
} else {
err = errors.New("capture.golibrtsp.DecodePacketRaw(): no decoder found, might already be closed")
}
g.VideoDecoderMutex.Unlock()
if err != nil {
log.Log.Error("capture.golibrtsp.DecodePacketRaw(): " + err.Error())
return image.Gray{}, err
}
if img.Bounds().Empty() {
log.Log.Debug("capture.golibrtsp.DecodePacketRaw(): empty image")
return image.Gray{}, errors.New("Empty image")
}
// Do a deep copy of the image
imgDeepCopy := image.NewGray(img.Bounds())
imgDeepCopy.Stride = img.Stride
copy(imgDeepCopy.Pix, img.Pix)
return *imgDeepCopy, err
}
// Get a list of streams from the RTSP server.
func (j *Golibrtsp) GetStreams() ([]packets.Stream, error) {
return j.Streams, nil
}
// Get a list of video streams from the RTSP server.
func (g *Golibrtsp) GetVideoStreams() ([]packets.Stream, error) {
var videoStreams []packets.Stream
for _, stream := range g.Streams {
if stream.IsVideo {
videoStreams = append(videoStreams, stream)
}
}
return videoStreams, nil
}
// Get a list of audio streams from the RTSP server.
func (g *Golibrtsp) GetAudioStreams() ([]packets.Stream, error) {
var audioStreams []packets.Stream
for _, stream := range g.Streams {
if stream.IsAudio {
audioStreams = append(audioStreams, stream)
}
}
return audioStreams, nil
}
// Close the connection to the RTSP server.
func (g *Golibrtsp) Close() error {
// Close the demuxer.
g.Client.Close()
if g.VideoH264Decoder != nil {
g.VideoH264FrameDecoder.Close()
}
if g.VideoH265FrameDecoder != nil {
g.VideoH265FrameDecoder.Close()
}
return nil
}
func frameData(frame *C.AVFrame) **C.uint8_t {
return (**C.uint8_t)(unsafe.Pointer(&frame.data[0]))
}
func frameLineSize(frame *C.AVFrame) *C.int {
return (*C.int)(unsafe.Pointer(&frame.linesize[0]))
}
// h264Decoder is a wrapper around FFmpeg's H264 decoder.
type Decoder struct {
codecCtx *C.AVCodecContext
srcFrame *C.AVFrame
}
// newH264Decoder allocates a new h264Decoder.
func newDecoder(codecName string) (*Decoder, error) {
codec := C.avcodec_find_decoder(C.AV_CODEC_ID_H264)
if codecName == "H265" {
codec = C.avcodec_find_decoder(C.AV_CODEC_ID_H265)
}
if codec == nil {
return nil, fmt.Errorf("avcodec_find_decoder() failed")
}
codecCtx := C.avcodec_alloc_context3(codec)
if codecCtx == nil {
return nil, fmt.Errorf("avcodec_alloc_context3() failed")
}
res := C.avcodec_open2(codecCtx, codec, nil)
if res < 0 {
C.avcodec_close(codecCtx)
return nil, fmt.Errorf("avcodec_open2() failed")
}
srcFrame := C.av_frame_alloc()
if srcFrame == nil {
C.avcodec_close(codecCtx)
return nil, fmt.Errorf("av_frame_alloc() failed")
}
return &Decoder{
codecCtx: codecCtx,
srcFrame: srcFrame,
}, nil
}
// close closes the decoder.
func (d *Decoder) Close() {
if d.srcFrame != nil {
C.av_frame_free(&d.srcFrame)
}
C.av_frame_free(&d.srcFrame)
C.avcodec_close(d.codecCtx)
}
func (d *Decoder) decode(nalu []byte) (image.YCbCr, error) {
nalu = append([]uint8{0x00, 0x00, 0x00, 0x01}, []uint8(nalu)...)
// send NALU to decoder
var avPacket C.AVPacket
avPacket.data = (*C.uint8_t)(C.CBytes(nalu))
defer C.free(unsafe.Pointer(avPacket.data))
avPacket.size = C.int(len(nalu))
res := C.avcodec_send_packet(d.codecCtx, &avPacket)
if res < 0 {
return image.YCbCr{}, nil
}
// receive frame if available
res = C.avcodec_receive_frame(d.codecCtx, d.srcFrame)
if res < 0 {
return image.YCbCr{}, nil
}
if res == 0 {
fr := d.srcFrame
w := int(fr.width)
h := int(fr.height)
ys := int(fr.linesize[0])
cs := int(fr.linesize[1])
return image.YCbCr{
Y: fromCPtr(unsafe.Pointer(fr.data[0]), ys*h),
Cb: fromCPtr(unsafe.Pointer(fr.data[1]), cs*h/2),
Cr: fromCPtr(unsafe.Pointer(fr.data[2]), cs*h/2),
YStride: ys,
CStride: cs,
SubsampleRatio: image.YCbCrSubsampleRatio420,
Rect: image.Rect(0, 0, w, h),
}, nil
}
return image.YCbCr{}, nil
}
func (d *Decoder) decodeRaw(nalu []byte) (image.Gray, error) {
nalu = append([]uint8{0x00, 0x00, 0x00, 0x01}, []uint8(nalu)...)
// send NALU to decoder
var avPacket C.AVPacket
avPacket.data = (*C.uint8_t)(C.CBytes(nalu))
defer C.free(unsafe.Pointer(avPacket.data))
avPacket.size = C.int(len(nalu))
res := C.avcodec_send_packet(d.codecCtx, &avPacket)
if res < 0 {
return image.Gray{}, nil
}
// receive frame if available
res = C.avcodec_receive_frame(d.codecCtx, d.srcFrame)
if res < 0 {
return image.Gray{}, nil
}
if res == 0 {
fr := d.srcFrame
w := int(fr.width)
h := int(fr.height)
ys := int(fr.linesize[0])
return image.Gray{
Pix: fromCPtr(unsafe.Pointer(fr.data[0]), w*h),
Stride: ys,
Rect: image.Rect(0, 0, w, h),
}, nil
}
return image.Gray{}, nil
}
func fromCPtr(buf unsafe.Pointer, size int) (ret []uint8) {
hdr := (*reflect.SliceHeader)((unsafe.Pointer(&ret)))
hdr.Cap = size
hdr.Len = size
hdr.Data = uintptr(buf)
return
}
func FindPCMU(desc *description.Session, isBackChannel bool) (*format.G711, *description.Media) {
for _, media := range desc.Medias {
if media.IsBackChannel == isBackChannel {
for _, forma := range media.Formats {
if g711, ok := forma.(*format.G711); ok {
if g711.MULaw {
return g711, media
}
}
}
}
}
return nil, nil
}
func FindMPEG4Audio(desc *description.Session, isBackChannel bool) (*format.MPEG4Audio, *description.Media) {
for _, media := range desc.Medias {
if media.IsBackChannel == isBackChannel {
for _, forma := range media.Formats {
if mpeg4, ok := forma.(*format.MPEG4Audio); ok {
return mpeg4, media
}
}
}
}
return nil, nil
}
// WriteMPEG4Audio writes MPEG-4 Audio access units.
func WriteMPEG4Audio(forma *format.MPEG4Audio, aus [][]byte) ([]byte, error) {
pkts := make(mpeg4audio.ADTSPackets, len(aus))
for i, au := range aus {
pkts[i] = &mpeg4audio.ADTSPacket{
Type: forma.Config.Type,
SampleRate: forma.Config.SampleRate,
ChannelCount: forma.Config.ChannelCount,
AU: au,
}
}
enc, err := pkts.Marshal()
if err != nil {
return nil, err
}
return enc, nil
}

View File

@@ -1,98 +0,0 @@
package capture
import (
"strconv"
"sync"
"time"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/joy4/av/pubsub"
"github.com/kerberos-io/joy4/av"
"github.com/kerberos-io/joy4/av/avutil"
"github.com/kerberos-io/joy4/cgo/ffmpeg"
"github.com/kerberos-io/joy4/format"
)
func OpenRTSP(url string) (av.DemuxCloser, []av.CodecData, error) {
format.RegisterAll()
infile, err := avutil.Open(url)
if err == nil {
streams, errstreams := infile.Streams()
return infile, streams, errstreams
}
return nil, []av.CodecData{}, err
}
func GetVideoDecoder(streams []av.CodecData) *ffmpeg.VideoDecoder {
// Load video codec
var vstream av.VideoCodecData
for _, stream := range streams {
if stream.Type().IsAudio() {
//astream := stream.(av.AudioCodecData)
} else if stream.Type().IsVideo() {
vstream = stream.(av.VideoCodecData)
}
}
dec, _ := ffmpeg.NewVideoDecoder(vstream)
return dec
}
func DecodeImage(pkt av.Packet, decoder *ffmpeg.VideoDecoder, decoderMutex *sync.Mutex) (*ffmpeg.VideoFrame, error) {
decoderMutex.Lock()
img, err := decoder.Decode(pkt.Data)
decoderMutex.Unlock()
return img, err
}
func HandleStream(infile av.DemuxCloser, queue *pubsub.Queue, communication *models.Communication) { //, wg *sync.WaitGroup) {
log.Log.Debug("HandleStream: started")
var err error
loop:
for {
// This will check if we need to stop the thread,
// because of a reconfiguration.
select {
case <-communication.HandleStream:
break loop
default:
}
var pkt av.Packet
if pkt, err = infile.ReadPacket(); err != nil { // sometimes this throws an end of file..
log.Log.Error("HandleStream: " + err.Error())
if err.Error() == "EOF" {
time.Sleep(30 * time.Second)
}
}
// Could be that a decode is throwing errors.
if len(pkt.Data) > 0 {
queue.WritePacket(pkt)
// This will check if we need to stop the thread,
// because of a reconfiguration.
select {
case <-communication.HandleStream:
break loop
default:
}
if pkt.IsKeyFrame {
// Increment packets, so we know the device
// is not blocking.
r := communication.PackageCounter.Load().(int64)
log.Log.Info("HandleStream: packet size " + strconv.Itoa(len(pkt.Data)))
communication.PackageCounter.Store((r + 1) % 1000)
}
}
}
queue.Close()
log.Log.Debug("HandleStream: finished")
}

View File

@@ -0,0 +1,72 @@
package capture
import (
"context"
"image"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/packets"
)
type Capture struct {
RTSPClient *Golibrtsp
RTSPSubClient *Golibrtsp
RTSPBackChannelClient *Golibrtsp
}
func (c *Capture) SetMainClient(rtspUrl string) *Golibrtsp {
c.RTSPClient = &Golibrtsp{
Url: rtspUrl,
}
return c.RTSPClient
}
func (c *Capture) SetSubClient(rtspUrl string) *Golibrtsp {
c.RTSPSubClient = &Golibrtsp{
Url: rtspUrl,
}
return c.RTSPSubClient
}
func (c *Capture) SetBackChannelClient(rtspUrl string) *Golibrtsp {
c.RTSPBackChannelClient = &Golibrtsp{
Url: rtspUrl,
}
return c.RTSPBackChannelClient
}
// RTSPClient is a interface that abstracts the RTSP client implementation.
type RTSPClient interface {
// Connect to the RTSP server.
Connect(ctx context.Context) error
// Connect to a backchannel RTSP server.
ConnectBackChannel(ctx context.Context) error
// Start the RTSP client, and start reading packets.
Start(ctx context.Context, streamType string, queue *packets.Queue, configuration *models.Configuration, communication *models.Communication) error
// Start the RTSP client, and start reading packets.
StartBackChannel(ctx context.Context) (err error)
// Decode a packet into a image.
DecodePacket(pkt packets.Packet) (image.YCbCr, error)
// Decode a packet into a image.
DecodePacketRaw(pkt packets.Packet) (image.Gray, error)
// Write a packet to the RTSP server.
WritePacket(pkt packets.Packet) error
// Close the connection to the RTSP server.
Close() error
// Get a list of streams from the RTSP server.
GetStreams() ([]packets.Stream, error)
// Get a list of video streams from the RTSP server.
GetVideoStreams() ([]packets.Stream, error)
// Get a list of audio streams from the RTSP server.
GetAudioStreams() ([]packets.Stream, error)
}

View File

@@ -2,124 +2,361 @@
package capture
import (
"context"
"encoding/base64"
"image"
"os"
"runtime"
"runtime/debug"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/kerberos-io/agent/machinery/src/conditions"
"github.com/kerberos-io/agent/machinery/src/encryption"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/packets"
"github.com/kerberos-io/agent/machinery/src/utils"
"github.com/kerberos-io/joy4/av/pubsub"
"github.com/kerberos-io/joy4/format/mp4"
"github.com/kerberos-io/joy4/av"
"github.com/yapingcat/gomedia/go-mp4"
)
func HandleRecordStream(recordingCursor *pubsub.QueueCursor, configuration *models.Configuration, communication *models.Communication, streams []av.CodecData) {
log.Log.Debug("HandleRecordStream: started")
func CleanupRecordingDirectory(configDirectory string, configuration *models.Configuration) {
autoClean := configuration.Config.AutoClean
if autoClean == "true" {
maxSize := configuration.Config.MaxDirectorySize
if maxSize == 0 {
maxSize = 300
}
// Total size of the recording directory.
recordingsDirectory := configDirectory + "/data/recordings"
size, err := utils.DirSize(recordingsDirectory)
if err == nil {
sizeInMB := size / 1000 / 1000
if sizeInMB >= maxSize {
// Remove the oldest recording
oldestFile, err := utils.FindOldestFile(recordingsDirectory)
if err == nil {
err := os.Remove(recordingsDirectory + "/" + oldestFile.Name())
log.Log.Info("HandleRecordStream: removed oldest file as part of cleanup - " + recordingsDirectory + "/" + oldestFile.Name())
if err != nil {
log.Log.Info("HandleRecordStream: something went wrong, " + err.Error())
}
} else {
log.Log.Info("HandleRecordStream: something went wrong, " + err.Error())
}
}
} else {
log.Log.Info("HandleRecordStream: something went wrong, " + err.Error())
}
} else {
log.Log.Info("HandleRecordStream: Autoclean disabled, nothing to do here.")
}
}
func HandleRecordStream(queue *packets.Queue, configDirectory string, configuration *models.Configuration, communication *models.Communication, rtspClient RTSPClient) {
config := configuration.Config
recordingPeriod := config.Capture.PostRecording // number of seconds to record.
maxRecordingPeriod := config.Capture.MaxLengthRecording // maximum number of seconds to record.
loc, _ := time.LoadLocation(config.Timezone)
// Synchronise the last synced time
now := time.Now().Unix()
startRecording := now
timestamp := now
if config.Capture.Recording == "false" {
log.Log.Info("capture.main.HandleRecordStream(): disabled, we will not record anything.")
} else {
log.Log.Debug("capture.main.HandleRecordStream(): started")
// Check if continuous recording.
if config.Capture.Continuous == "true" {
recordingPeriod := config.Capture.PostRecording // number of seconds to record.
maxRecordingPeriod := config.Capture.MaxLengthRecording // maximum number of seconds to record.
// Do not do anything!
log.Log.Info("HandleRecordStream: Start continuous recording ")
// Synchronise the last synced time
now := time.Now().Unix()
startRecording := now
timestamp := now
loc, _ := time.LoadLocation(config.Timezone)
now = time.Now().Unix()
timestamp = now
start := false
var name string
var myMuxer *mp4.Muxer
// For continuous and motion based recording we will use a single file.
var file *os.File
var err error
// If continuous record the full length
recordingPeriod = maxRecordingPeriod
// Recording file name
fullName := ""
// Check if continuous recording.
if config.Capture.Continuous == "true" {
// Get as much packets we need.
//for pkt := range packets {
var cursorError error
var pkt av.Packet
recordingStatus := "idle"
//var cws *cacheWriterSeeker
var myMuxer *mp4.Movmuxer
var videoTrack uint32
var audioTrack uint32
var name string
for cursorError == nil {
// Do not do anything!
log.Log.Info("capture.main.HandleRecordStream(continuous): start recording")
pkt, cursorError = recordingCursor.ReadPacket()
now = time.Now().Unix()
timestamp = now
start := false
now := time.Now().Unix()
// If continuous record the full length
recordingPeriod = maxRecordingPeriod
// Recording file name
fullName := ""
if start && // If already recording and current frame is a keyframe and we should stop recording
pkt.IsKeyFrame && (timestamp+recordingPeriod-now <= 0 || now-startRecording >= maxRecordingPeriod) {
// Get as much packets we need.
var cursorError error
var pkt packets.Packet
var nextPkt packets.Packet
recordingStatus := "idle"
recordingCursor := queue.Oldest()
// This will write the trailer a well.
if err := myMuxer.WriteTrailer(); err != nil {
log.Log.Error(err.Error())
}
log.Log.Info("HandleRecordStream: Recording finished: file save: " + name)
file.Close()
// Check if need to convert to fragmented using bento
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
}
// Create a symbol link.
fc, _ := os.Create("./data/cloud/" + name)
fc.Close()
// Cleanup muxer
start = false
myMuxer = nil
runtime.GC()
debug.FreeOSMemory()
recordingStatus = "idle"
if cursorError == nil {
pkt, cursorError = recordingCursor.ReadPacket()
}
// If not yet started and a keyframe, let's make a recording
if !start && pkt.IsKeyFrame {
for cursorError == nil {
// Check if within time interval
nowInTimezone := time.Now().In(loc)
weekday := nowInTimezone.Weekday()
hour := nowInTimezone.Hour()
minute := nowInTimezone.Minute()
second := nowInTimezone.Second()
timeEnabled := config.Time
timeInterval := config.Timetable[int(weekday)]
nextPkt, cursorError = recordingCursor.ReadPacket()
if timeEnabled == "true" && timeInterval != nil {
start1 := timeInterval.Start1
end1 := timeInterval.End1
start2 := timeInterval.Start2
end2 := timeInterval.End2
currentTimeInSeconds := hour*60*60 + minute*60 + second
if (currentTimeInSeconds >= start1 && currentTimeInSeconds <= end1) ||
(currentTimeInSeconds >= start2 && currentTimeInSeconds <= end2) {
now := time.Now().Unix()
} else {
log.Log.Debug("HandleRecordStream: Disabled: no continuous recording at this moment. Not within specified time interval.")
if start && // If already recording and current frame is a keyframe and we should stop recording
nextPkt.IsKeyFrame && (timestamp+recordingPeriod-now <= 0 || now-startRecording >= maxRecordingPeriod) {
// Write the last packet
ttime := convertPTS(pkt.Time)
if pkt.IsVideo {
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
}
} else if pkt.IsAudio {
if pkt.Codec == "AAC" {
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
}
} else if pkt.Codec == "PCM_MULAW" {
// TODO: transcode to AAC, some work to do..
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
}
}
// This will write the trailer a well.
if err := myMuxer.WriteTrailer(); err != nil {
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
}
log.Log.Info("capture.main.HandleRecordStream(continuous): recording finished: file save: " + name)
// Cleanup muxer
start = false
file.Close()
file = nil
// Check if need to convert to fragmented using bento
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
}
// Check if we need to encrypt the recording.
if config.Encryption != nil && config.Encryption.Enabled == "true" && config.Encryption.Recordings == "true" && config.Encryption.SymmetricKey != "" {
// reopen file into memory 'fullName'
contents, err := os.ReadFile(fullName)
if err == nil {
// encrypt
encryptedContents, err := encryption.AesEncrypt(contents, config.Encryption.SymmetricKey)
if err == nil {
// write back to file
err := os.WriteFile(fullName, []byte(encryptedContents), 0644)
if err != nil {
log.Log.Error("capture.main.HandleRecordStream(continuous): error writing file: " + err.Error())
}
} else {
log.Log.Error("capture.main.HandleRecordStream(continuous): error encrypting file: " + err.Error())
}
} else {
log.Log.Error("capture.main.HandleRecordStream(continuous): error reading file: " + err.Error())
}
}
// Create a symbol link.
fc, _ := os.Create(configDirectory + "/data/cloud/" + name)
fc.Close()
recordingStatus = "idle"
// Clean up the recording directory if necessary.
CleanupRecordingDirectory(configDirectory, configuration)
}
// If not yet started and a keyframe, let's make a recording
if !start && pkt.IsKeyFrame {
// We might have different conditions enabled such as time window or uri response.
// We'll validate those conditions and if not valid we'll not do anything.
valid, err := conditions.Validate(loc, configuration)
if !valid && err != nil {
log.Log.Debug("capture.main.HandleRecordStream(continuous): " + err.Error() + ".")
time.Sleep(5 * time.Second)
continue
}
start = true
timestamp = now
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
// - Timestamp
// - Size + - + microseconds
// - device
// - Region
// - Number of changes
// - Token
startRecording = time.Now().Unix() // we mark the current time when the record started.ss
s := strconv.FormatInt(startRecording, 10) + "_" +
"6" + "-" +
"967003" + "_" +
config.Name + "_" +
"200-200-400-400" + "_0_" +
"769"
name = s + ".mp4"
fullName = configDirectory + "/data/recordings/" + name
// Running...
log.Log.Info("capture.main.HandleRecordStream(continuous): recording started")
file, err = os.Create(fullName)
if err == nil {
//cws = newCacheWriterSeeker(4096)
myMuxer, _ = mp4.CreateMp4Muxer(file)
// We choose between H264 and H265
if pkt.Codec == "H264" {
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H264)
} else if pkt.Codec == "H265" {
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H265)
}
// For an MP4 container, AAC is the only audio codec supported.
audioTrack = myMuxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
} else {
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
}
ttime := convertPTS(pkt.Time)
if pkt.IsVideo {
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
}
} else if pkt.IsAudio {
if pkt.Codec == "AAC" {
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
}
} else if pkt.Codec == "PCM_MULAW" {
// TODO: transcode to AAC, some work to do..
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
}
}
recordingStatus = "started"
} else if start {
ttime := convertPTS(pkt.Time)
if pkt.IsVideo {
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
}
} else if pkt.IsAudio {
if pkt.Codec == "AAC" {
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
log.Log.Error("capture.main.HandleRecordStream(continuous): " + err.Error())
}
} else if pkt.Codec == "PCM_MULAW" {
// TODO: transcode to AAC, some work to do..
log.Log.Debug("capture.main.HandleRecordStream(continuous): no AAC audio codec detected, skipping audio track.")
}
}
}
start = true
timestamp = now
pkt = nextPkt
}
// We might have interrupted the recording while restarting the agent.
// If this happens we need to check to properly close the recording.
if cursorError != nil {
if recordingStatus == "started" {
// This will write the trailer a well.
if err := myMuxer.WriteTrailer(); err != nil {
log.Log.Error(err.Error())
}
log.Log.Info("capture.main.HandleRecordStream(continuous): Recording finished: file save: " + name)
// Cleanup muxer
start = false
file.Close()
file = nil
// Check if need to convert to fragmented using bento
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
}
// Check if we need to encrypt the recording.
if config.Encryption != nil && config.Encryption.Enabled == "true" && config.Encryption.Recordings == "true" && config.Encryption.SymmetricKey != "" {
// reopen file into memory 'fullName'
contents, err := os.ReadFile(fullName)
if err == nil {
// encrypt
encryptedContents, err := encryption.AesEncrypt(contents, config.Encryption.SymmetricKey)
if err == nil {
// write back to file
err := os.WriteFile(fullName, []byte(encryptedContents), 0644)
if err != nil {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error writing file: " + err.Error())
}
} else {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error encrypting file: " + err.Error())
}
} else {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error reading file: " + err.Error())
}
}
// Create a symbol link.
fc, _ := os.Create(configDirectory + "/data/cloud/" + name)
fc.Close()
recordingStatus = "idle"
// Clean up the recording directory if necessary.
CleanupRecordingDirectory(configDirectory, configuration)
}
}
} else {
log.Log.Info("capture.main.HandleRecordStream(motiondetection): Start motion based recording ")
var lastDuration time.Duration
var lastRecordingTime int64
//var cws *cacheWriterSeeker
var myMuxer *mp4.Movmuxer
var videoTrack uint32
var audioTrack uint32
for motion := range communication.HandleMotion {
timestamp = time.Now().Unix()
startRecording = time.Now().Unix() // we mark the current time when the record started.
numberOfChanges := motion.NumberOfChanges
// If we have prerecording we will substract the number of seconds.
// Taking into account FPS = GOP size (Keyfram interval)
if config.Capture.PreRecording > 0 {
// Might be that recordings are coming short after each other.
// Therefore we do some math with the current time and the last recording time.
timeBetweenNowAndLastRecording := startRecording - lastRecordingTime
if timeBetweenNowAndLastRecording > int64(config.Capture.PreRecording) {
startRecording = startRecording - int64(config.Capture.PreRecording) + 1
} else {
startRecording = startRecording - timeBetweenNowAndLastRecording
}
}
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
@@ -130,166 +367,308 @@ func HandleRecordStream(recordingCursor *pubsub.QueueCursor, configuration *mode
// - Number of changes
// - Token
startRecording = time.Now().Unix() // we mark the current time when the record started.ss
s := strconv.FormatInt(startRecording, 10) + "_" + "6" + "-" + "967003" + "_" + config.Name + "_" + "200-200-400-400" + "_" + "24" + "_" + "769"
name = s + ".mp4"
fullName = "./data/recordings/" + name
s := strconv.FormatInt(startRecording, 10) + "_" +
"6" + "-" +
"967003" + "_" +
config.Name + "_" +
"200-200-400-400" + "_" +
strconv.Itoa(numberOfChanges) + "_" +
"769"
name := s + ".mp4"
fullName := configDirectory + "/data/recordings/" + name
// Running...
log.Log.Info("Recording started")
log.Log.Info("capture.main.HandleRecordStream(motiondetection): recording started")
file, _ = os.Create(fullName)
myMuxer, _ = mp4.CreateMp4Muxer(file)
file, err = os.Create(fullName)
if err == nil {
myMuxer = mp4.NewMuxer(file)
// Check which video codec we need to use.
videoSteams, _ := rtspClient.GetVideoStreams()
for _, stream := range videoSteams {
if stream.Name == "H264" {
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H264)
} else if stream.Name == "H265" {
videoTrack = myMuxer.AddVideoTrack(mp4.MP4_CODEC_H265)
}
}
// For an MP4 container, AAC is the only audio codec supported.
audioTrack = myMuxer.AddAudioTrack(mp4.MP4_CODEC_AAC)
start := false
// Get as much packets we need.
var cursorError error
var pkt packets.Packet
var nextPkt packets.Packet
recordingCursor := queue.DelayedGopCount(int(config.Capture.PreRecording + 1))
if cursorError == nil {
pkt, cursorError = recordingCursor.ReadPacket()
}
log.Log.Info("HandleRecordStream: composing recording")
log.Log.Info("HandleRecordStream: write header")
for cursorError == nil {
// Creating the file, might block sometimes.
if err := myMuxer.WriteHeader(streams); err != nil {
log.Log.Error(err.Error())
nextPkt, cursorError = recordingCursor.ReadPacket()
if cursorError != nil {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + cursorError.Error())
}
now := time.Now().Unix()
select {
case motion := <-communication.HandleMotion:
timestamp = now
log.Log.Info("capture.main.HandleRecordStream(motiondetection): motion detected while recording. Expanding recording.")
numberOfChanges = motion.NumberOfChanges
log.Log.Info("capture.main.HandleRecordStream(motiondetection): Received message with recording data, detected changes to save: " + strconv.Itoa(numberOfChanges))
default:
}
if (timestamp+recordingPeriod-now < 0 || now-startRecording > maxRecordingPeriod) && nextPkt.IsKeyFrame {
log.Log.Info("capture.main.HandleRecordStream(motiondetection): closing recording (timestamp: " + strconv.FormatInt(timestamp, 10) + ", recordingPeriod: " + strconv.FormatInt(recordingPeriod, 10) + ", now: " + strconv.FormatInt(now, 10) + ", startRecording: " + strconv.FormatInt(startRecording, 10) + ", maxRecordingPeriod: " + strconv.FormatInt(maxRecordingPeriod, 10))
break
}
if pkt.IsKeyFrame && !start && pkt.Time >= lastDuration {
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): write frames")
start = true
}
if start {
ttime := convertPTS(pkt.Time)
if pkt.IsVideo {
if err := myMuxer.Write(videoTrack, pkt.Data, ttime, ttime); err != nil {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
}
} else if pkt.IsAudio {
if pkt.Codec == "AAC" {
if err := myMuxer.Write(audioTrack, pkt.Data, ttime, ttime); err != nil {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
}
} else if pkt.Codec == "PCM_MULAW" {
// TODO: transcode to AAC, some work to do..
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): no AAC audio codec detected, skipping audio track.")
}
}
// We will sync to file every keyframe.
if pkt.IsKeyFrame {
err := file.Sync()
if err != nil {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): " + err.Error())
} else {
log.Log.Debug("capture.main.HandleRecordStream(motiondetection): synced file " + name)
}
}
}
pkt = nextPkt
}
if err := myMuxer.WritePacket(pkt); err != nil {
log.Log.Error(err.Error())
}
recordingStatus = "started"
} else if start {
if err := myMuxer.WritePacket(pkt); err != nil {
log.Log.Error(err.Error())
}
}
}
// We might have interrupted the recording while restarting the agent.
// If this happens we need to check to properly close the recording.
if cursorError != nil {
if recordingStatus == "started" {
// This will write the trailer a well.
if err := myMuxer.WriteTrailer(); err != nil {
log.Log.Error(err.Error())
}
myMuxer.WriteTrailer()
log.Log.Info("HandleRecordStream: Recording finished: file save: " + name)
log.Log.Info("capture.main.HandleRecordStream(motiondetection): file save: " + name)
lastDuration = pkt.Time
lastRecordingTime = time.Now().Unix()
file.Close()
file = nil
// Check if need to convert to fragmented using bento
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
}
// Create a symbol link.
fc, _ := os.Create("./data/cloud/" + name)
fc.Close()
// Cleanup muxer
start = false
myMuxer = nil
runtime.GC()
debug.FreeOSMemory()
recordingStatus = "idle"
}
}
} else {
log.Log.Info("HandleRecordStream: Start motion based recording ")
var myMuxer *mp4.Muxer
var file *os.File
var err error
for _ = range communication.HandleMotion {
now = time.Now().Unix()
timestamp = now
startRecording = now // we mark the current time when the record started.
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
// - Timestamp
// - Size + - + microseconds
// - device
// - Region
// - Number of changes
// - Token
s := strconv.FormatInt(startRecording, 10) + "_" + "6" + "-" + "967003" + "_" + config.Name + "_" + "200-200-400-400" + "_" + "24" + "_" + "769"
name := s + ".mp4"
fullName := "./data/recordings/" + name
// Running...
log.Log.Info("HandleRecordStream: Recording started")
file, err = os.Create(fullName)
if err == nil {
myMuxer = mp4.NewMuxer(file)
}
start := false
log.Log.Info("HandleRecordStream: composing recording")
log.Log.Info("HandleRecordStream: write header")
// Creating the file, might block sometimes.
if err := myMuxer.WriteHeader(streams); err != nil {
log.Log.Error(err.Error())
}
// Get as much packets we need.
//for pkt := range packets {
var cursorError error
var pkt av.Packet
for cursorError == nil {
pkt, cursorError = recordingCursor.ReadPacket()
if cursorError != nil {
log.Log.Error("HandleRecordStream: " + cursorError.Error())
}
now := time.Now().Unix()
select {
case <-communication.HandleMotion:
timestamp = now
log.Log.Info("HandleRecordStream: motion detected while recording. Expanding recording.")
default:
}
if timestamp+recordingPeriod-now <= 0 || now-startRecording >= maxRecordingPeriod {
log.Log.Info("HandleRecordStream: closing recording (timestamp: " + strconv.FormatInt(timestamp, 10) + ", recordingPeriod: " + strconv.FormatInt(recordingPeriod, 10) + ", now: " + strconv.FormatInt(now, 10) + ", startRecording: " + strconv.FormatInt(startRecording, 10) + ", maxRecordingPeriod: " + strconv.FormatInt(maxRecordingPeriod, 10))
break
}
if pkt.IsKeyFrame {
log.Log.Info("HandleRecordStream: write frames")
start = true
}
if start {
if err := myMuxer.WritePacket(pkt); err != nil {
log.Log.Error(err.Error())
// Check if we need to encrypt the recording.
if config.Encryption != nil && config.Encryption.Enabled == "true" && config.Encryption.Recordings == "true" && config.Encryption.SymmetricKey != "" {
// reopen file into memory 'fullName'
contents, err := os.ReadFile(fullName)
if err == nil {
// encrypt
encryptedContents, err := encryption.AesEncrypt(contents, config.Encryption.SymmetricKey)
if err == nil {
// write back to file
err := os.WriteFile(fullName, []byte(encryptedContents), 0644)
if err != nil {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error writing file: " + err.Error())
}
} else {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error encrypting file: " + err.Error())
}
} else {
log.Log.Error("capture.main.HandleRecordStream(motiondetection): error reading file: " + err.Error())
}
}
// Create a symbol linc.
fc, _ := os.Create(configDirectory + "/data/cloud/" + name)
fc.Close()
// Clean up the recording directory if necessary.
CleanupRecordingDirectory(configDirectory, configuration)
}
// This will write the trailer as well.
myMuxer.WriteTrailer()
log.Log.Info("HandleRecordStream: file save: " + name)
file.Close()
myMuxer = nil
runtime.GC()
debug.FreeOSMemory()
// Check if need to convert to fragmented using bento
if config.Capture.Fragmented == "true" && config.Capture.FragmentedDuration > 0 {
utils.CreateFragmentedMP4(fullName, config.Capture.FragmentedDuration)
}
// Create a symbol linc.
fc, _ := os.Create("./data/cloud/" + name)
fc.Close()
}
log.Log.Debug("capture.main.HandleRecordStream(): finished")
}
}
// VerifyCamera godoc
// @Router /api/camera/verify/{streamType} [post]
// @ID verify-camera
// @Tags camera
// @Param streamType path string true "Stream Type" Enums(primary, secondary)
// @Param cameraStreams body models.CameraStreams true "Camera Streams"
// @Summary Validate a specific RTSP profile camera connection.
// @Description This method will validate a specific profile connection from an RTSP camera, and try to get the codec.
// @Success 200 {object} models.APIResponse
func VerifyCamera(c *gin.Context) {
var cameraStreams models.CameraStreams
err := c.BindJSON(&cameraStreams)
// Should return in 5 seconds.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err == nil {
streamType := c.Param("streamType")
if streamType == "" {
streamType = "primary"
}
rtspUrl := cameraStreams.RTSP
if streamType == "secondary" {
rtspUrl = cameraStreams.SubRTSP
}
// Currently only support H264 encoded cameras, this will change.
// Establishing the camera connection without backchannel if no substream
rtspClient := &Golibrtsp{
Url: rtspUrl,
}
err := rtspClient.Connect(ctx)
if err == nil {
// Get the streams from the rtsp client.
streams, _ := rtspClient.GetStreams()
videoIdx := -1
audioIdx := -1
for i, stream := range streams {
if (stream.Name == "H264" || stream.Name == "H265") && videoIdx < 0 {
videoIdx = i
} else if stream.Name == "PCM_MULAW" && audioIdx < 0 {
audioIdx = i
}
}
err := rtspClient.Close()
if err == nil {
if videoIdx > -1 {
c.JSON(200, models.APIResponse{
Message: "All good, detected a H264 codec.",
Data: streams,
})
} else {
c.JSON(400, models.APIResponse{
Message: "Stream doesn't have a H264 codec, we only support H264 so far.",
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong while closing the connection " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong while receiving the config " + err.Error(),
})
}
}
func Base64Image(captureDevice *Capture, communication *models.Communication) string {
// We'll try to get a snapshot from the camera.
var queue *packets.Queue
var cursor *packets.QueueCursor
// We'll pick the right client and decoder.
rtspClient := captureDevice.RTSPSubClient
if rtspClient != nil {
queue = communication.SubQueue
cursor = queue.Latest()
} else {
rtspClient = captureDevice.RTSPClient
queue = communication.Queue
cursor = queue.Latest()
}
log.Log.Debug("HandleRecordStream: finished")
// We'll try to have a keyframe, if not we'll return an empty string.
var encodedImage string
for {
if queue != nil && cursor != nil && rtspClient != nil {
pkt, err := cursor.ReadPacket()
if err == nil {
if !pkt.IsKeyFrame {
continue
}
var img image.YCbCr
img, err = (*rtspClient).DecodePacket(pkt)
if err == nil {
bytes, _ := utils.ImageToBytes(&img)
encodedImage = base64.StdEncoding.EncodeToString(bytes)
break
}
break
}
} else {
break
}
}
return encodedImage
}
func JpegImage(captureDevice *Capture, communication *models.Communication) image.YCbCr {
// We'll try to get a snapshot from the camera.
var queue *packets.Queue
var cursor *packets.QueueCursor
// We'll pick the right client and decoder.
rtspClient := captureDevice.RTSPSubClient
if rtspClient != nil {
queue = communication.SubQueue
cursor = queue.Latest()
} else {
rtspClient = captureDevice.RTSPClient
queue = communication.Queue
cursor = queue.Latest()
}
// We'll try to have a keyframe, if not we'll return an empty string.
var image image.YCbCr
for {
if queue != nil && cursor != nil && rtspClient != nil {
pkt, err := cursor.ReadPacket()
if err == nil {
if !pkt.IsKeyFrame {
continue
}
image, _ = (*rtspClient).DecodePacket(pkt)
break
}
} else {
break
}
}
return image
}
func convertPTS(v time.Duration) uint64 {
return uint64(v.Milliseconds())
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,135 @@
// Package cloud contains the Dropbox implementation of the Cloud interface.
// It uses the Dropbox SDK to upload files to Dropbox.
package cloud
import (
"bytes"
"errors"
"os"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/users"
"github.com/gin-gonic/gin"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
)
// UploadDropbox uploads the file to your Dropbox account using the access token and directory.
func UploadDropbox(configuration *models.Configuration, fileName string) (bool, bool, error) {
config := configuration.Config
token := config.Dropbox.AccessToken
directory := config.Dropbox.Directory
if directory != "" {
// Check if trailing slash if not we'll add one.
if directory[len(directory)-1:] != "/" {
directory = directory + "/"
}
}
if token == "" {
err := "UploadDropbox: Dropbox not properly configured"
log.Log.Info(err)
return false, true, errors.New(err)
}
// Upload to Dropbox
log.Log.Info("UploadDropbox: Uploading to Dropbox")
log.Log.Info("UploadDropbox: Upload started for " + fileName)
fullname := "data/recordings/" + fileName
dConfig := dropbox.Config{
Token: token,
LogLevel: dropbox.LogInfo, // if needed, set the desired logging level. Default is off
}
file, err := os.OpenFile(fullname, os.O_RDWR, 0755)
if file != nil {
defer file.Close()
}
if err == nil {
// Upload the file
dbf := files.New(dConfig)
res, err := dbf.Upload(&files.UploadArg{
CommitInfo: files.CommitInfo{
Path: "/" + directory + fileName,
Mode: &files.WriteMode{
Tagged: dropbox.Tagged{
Tag: "overwrite",
},
},
},
}, file)
if err != nil {
log.Log.Error("UploadDropbox: Error uploading file: " + err.Error())
return false, false, err
}
log.Log.Info("UploadDropbox: File uploaded successfully, " + res.Name)
return true, true, nil
}
log.Log.Error("UploadDropbox: Error opening file: " + err.Error())
return false, true, err
}
// VerifyDropbox verifies if the Dropbox token is valid and it is able to upload a file.
func VerifyDropbox(config models.Config, c *gin.Context) {
token := config.Dropbox.AccessToken
directory := config.Dropbox.Directory
if directory != "" {
// Check if trailing slash if not we'll add one.
if directory[len(directory)-1:] != "/" {
directory = directory + "/"
}
}
if token != "" {
dConfig := dropbox.Config{
Token: token,
LogLevel: dropbox.LogInfo, // if needed, set the desired logging level. Default is off
}
dbx := users.New(dConfig)
_, err := dbx.GetCurrentAccount()
if err != nil {
c.JSON(400, models.APIResponse{
Data: "Something went wrong while reaching the Dropbox API: " + err.Error(),
})
} else {
// Upload the file
content := TestFile
file := bytes.NewReader(content)
dbf := files.New(dConfig)
_, err := dbf.Upload(&files.UploadArg{
CommitInfo: files.CommitInfo{
Path: "/" + directory + "kerbers-agent-test.mp4",
Mode: &files.WriteMode{
Tagged: dropbox.Tagged{
Tag: "overwrite",
},
},
},
}, file)
if err != nil {
c.JSON(400, models.APIResponse{
Data: "Something went wrong while reaching the Dropbox API: " + err.Error(),
})
} else {
c.JSON(200, models.APIResponse{
Data: "Dropbox is working fine.",
})
}
}
} else {
c.JSON(400, models.APIResponse{
Data: "Dropbox token is not set.",
})
}
}

View File

@@ -2,6 +2,7 @@ package cloud
import (
"crypto/tls"
"errors"
"net/http"
"net/url"
"os"
@@ -13,11 +14,10 @@ import (
"github.com/minio/minio-go/v6"
)
func UploadS3(configuration *models.Configuration, fileName string, directory string) bool {
func UploadS3(configuration *models.Configuration, fileName string) (bool, bool, error) {
config := configuration.Config
//fmt.Println("Uploading...")
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
// - Timestamp
@@ -27,6 +27,13 @@ func UploadS3(configuration *models.Configuration, fileName string, directory st
// - Number of changes
// - Token
if config.S3 == nil {
errorMessage := "UploadS3: Uploading Failed, as no settings found"
log.Log.Error(errorMessage)
return false, false, errors.New(errorMessage)
}
// Legacy support, should get rid of it!
aws_access_key_id := config.S3.Publickey
aws_secret_access_key := config.S3.Secretkey
aws_region := config.S3.Region
@@ -39,9 +46,18 @@ func UploadS3(configuration *models.Configuration, fileName string, directory st
aws_secret_access_key = config.HubPrivateKey
}
// Check if we have some credentials otherwise we abort the request.
if aws_access_key_id == "" || aws_secret_access_key == "" {
errorMessage := "UploadS3: Uploading Failed, as no credentials found"
log.Log.Error(errorMessage)
return false, false, errors.New(errorMessage)
}
s3Client, err := minio.NewWithRegion("s3.amazonaws.com", aws_access_key_id, aws_secret_access_key, true, aws_region)
if err != nil {
log.Log.Error(err.Error())
errorMessage := "UploadS3: " + err.Error()
log.Log.Error(errorMessage)
return false, true, errors.New(errorMessage)
}
// Check if we need to use the proxy.
@@ -57,9 +73,9 @@ func UploadS3(configuration *models.Configuration, fileName string, directory st
fileParts := strings.Split(fileName, "_")
if len(fileParts) == 1 {
log.Log.Error("ERROR: " + fileName + " is not a valid name.")
os.Remove(directory + "/" + fileName)
return false
errorMessage := "UploadS3: " + fileName + " is not a valid name."
log.Log.Error(errorMessage)
return false, true, errors.New(errorMessage)
}
deviceKey := config.Key
@@ -73,18 +89,21 @@ func UploadS3(configuration *models.Configuration, fileName string, directory st
fullname := "data/recordings/" + fileName
file, err := os.OpenFile(fullname, os.O_RDWR, 0755)
defer file.Close()
if file != nil {
defer file.Close()
}
if err != nil {
log.Log.Error("UploadS3: " + err.Error())
os.Remove(directory + "/" + fileName)
return false
errorMessage := "UploadS3: " + err.Error()
log.Log.Error(errorMessage)
return false, true, errors.New(errorMessage)
}
fileInfo, err := file.Stat()
if err != nil {
log.Log.Error("UploadS3: " + err.Error())
os.Remove(directory + "/" + fileName)
return false
errorMessage := "UploadS3: " + err.Error()
log.Log.Error(errorMessage)
return false, true, errors.New(errorMessage)
}
n, err := s3Client.PutObject(config.S3.Bucket,
@@ -108,11 +127,11 @@ func UploadS3(configuration *models.Configuration, fileName string, directory st
})
if err != nil {
log.Log.Error("UploadS3: Uploading Failed, " + err.Error())
return false
errorMessage := "UploadS3: Uploading Failed, " + err.Error()
log.Log.Error(errorMessage)
return false, true, errors.New(errorMessage)
} else {
log.Log.Info("UploadS3: Upload Finished, file has been uploaded to bucket: " + strconv.FormatInt(n, 10))
os.Remove(directory + "/" + fileName)
return true
return true, true, nil
}
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,131 @@
package cloud
import (
"crypto/tls"
"errors"
"io/ioutil"
"net/http"
"os"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
)
func UploadKerberosHub(configuration *models.Configuration, fileName string) (bool, bool, error) {
config := configuration.Config
if config.HubURI == "" ||
config.HubKey == "" ||
config.HubPrivateKey == "" ||
config.S3.Region == "" {
err := "UploadKerberosHub: Kerberos Hub not properly configured."
log.Log.Info(err)
return false, false, errors.New(err)
}
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
// - Timestamp
// - Size + - + microseconds
// - device
// - Region
// - Number of changes
// - Token
log.Log.Info("UploadKerberosHub: Uploading to Kerberos Hub (" + config.HubURI + ")")
log.Log.Info("UploadKerberosHub: Upload started for " + fileName)
fullname := "data/recordings/" + fileName
// Check if we still have the file otherwise we abort the request.
file, err := os.OpenFile(fullname, os.O_RDWR, 0755)
if file != nil {
defer file.Close()
}
if err != nil {
err := "UploadKerberosHub: Upload Failed, file doesn't exists anymore."
log.Log.Info(err)
return false, false, errors.New(err)
}
// Check if we are allowed to upload to the hub with these credentials.
// There might be different reasons like (muted, read-only..)
req, err := http.NewRequest("HEAD", config.HubURI+"/storage/upload", nil)
if err != nil {
errorMessage := "UploadKerberosHub: error reading HEAD request, " + config.HubURI + "/storage: " + err.Error()
log.Log.Error(errorMessage)
return false, true, errors.New(errorMessage)
}
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
req.Header.Set("X-Kerberos-Hub-PublicKey", config.HubKey)
req.Header.Set("X-Kerberos-Hub-PrivateKey", config.HubPrivateKey)
req.Header.Set("X-Kerberos-Hub-Region", config.S3.Region)
var client *http.Client
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client = &http.Client{Transport: tr}
} else {
client = &http.Client{}
}
resp, err := client.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err == nil {
if resp != nil {
if err == nil {
if resp.StatusCode == 200 {
log.Log.Info("UploadKerberosHub: Upload allowed using the credentials provided (" + config.HubKey + ", " + config.HubPrivateKey + ")")
} else {
log.Log.Info("UploadKerberosHub: Upload NOT allowed using the credentials provided (" + config.HubKey + ", " + config.HubPrivateKey + ")")
return false, true, nil
}
}
}
}
// Now we know we are allowed to upload to the hub, we can start uploading.
req, err = http.NewRequest("POST", config.HubURI+"/storage/upload", file)
if err != nil {
errorMessage := "UploadKerberosHub: error reading POST request, " + config.KStorage.URI + "/storage/upload: " + err.Error()
log.Log.Error(errorMessage)
return false, true, errors.New(errorMessage)
}
req.Header.Set("Content-Type", "video/mp4")
req.Header.Set("X-Kerberos-Storage-FileName", fileName)
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
req.Header.Set("X-Kerberos-Hub-PublicKey", config.HubKey)
req.Header.Set("X-Kerberos-Hub-PrivateKey", config.HubPrivateKey)
req.Header.Set("X-Kerberos-Hub-Region", config.S3.Region)
resp, err = client.Do(req)
if resp != nil {
defer resp.Body.Close()
}
if err == nil {
if resp != nil {
body, err := ioutil.ReadAll(resp.Body)
if err == nil {
if resp.StatusCode == 200 {
log.Log.Info("UploadKerberosHub: Upload Finished, " + resp.Status + ".")
return true, true, nil
} else {
log.Log.Info("UploadKerberosHub: Upload Failed, " + resp.Status + ", " + string(body))
return false, true, nil
}
}
}
}
errorMessage := "UploadKerberosHub: Upload Failed, " + err.Error()
log.Log.Info(errorMessage)
return false, true, errors.New(errorMessage)
}

View File

@@ -1,6 +1,8 @@
package cloud
import (
"crypto/tls"
"errors"
"io/ioutil"
"net/http"
"os"
@@ -9,19 +11,19 @@ import (
"github.com/kerberos-io/agent/machinery/src/models"
)
func UploadKerberosVault(configuration *models.Configuration, fileName string, directory string) bool {
func UploadKerberosVault(configuration *models.Configuration, fileName string) (bool, bool, error) {
config := configuration.Config
if config.KStorage.AccessKey == "" ||
config.KStorage.SecretAccessKey == "" ||
config.KStorage.Provider == "" ||
config.KStorage.Directory == "" ||
config.KStorage.URI == "" {
log.Log.Info("UploadKerberosVault: Kerberos Vault not properly configured.")
err := "UploadKerberosVault: Kerberos Vault not properly configured."
log.Log.Info(err)
return false, false, errors.New(err)
}
//fmt.Println("Uploading...")
// timestamp_microseconds_instanceName_regionCoordinates_numberOfChanges_token
// 1564859471_6-474162_oprit_577-283-727-375_1153_27.mp4
// - Timestamp
@@ -30,21 +32,20 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string, d
// - Region
// - Number of changes
// - Token
// KerberosCloud, this means storage is disabled and proxy enabled.
log.Log.Info("UploadKerberosVault: Uploading to Kerberos Vault (" + config.KStorage.URI + ")")
log.Log.Info("UploadKerberosVault: Upload started for " + fileName)
fullname := "data/recordings/" + fileName
file, err := os.OpenFile(fullname, os.O_RDWR, 0755)
if err != nil {
log.Log.Info("UploadKerberosVault: Upload Failed, file doesn't exists anymore.")
os.Remove(directory + "/" + fileName)
return false
if file != nil {
defer file.Close()
}
if err != nil {
err := "UploadKerberosVault: Upload Failed, file doesn't exists anymore."
log.Log.Info(err)
return false, false, errors.New(err)
}
defer file.Close()
publicKey := config.KStorage.CloudKey
// This is the new way ;)
@@ -54,7 +55,9 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string, d
req, err := http.NewRequest("POST", config.KStorage.URI+"/storage", file)
if err != nil {
log.Log.Error("Error reading request. " + err.Error())
errorMessage := "UploadKerberosVault: error reading request, " + config.KStorage.URI + "/storage: " + err.Error()
log.Log.Error(errorMessage)
return false, true, errors.New(errorMessage)
}
req.Header.Set("Content-Type", "video/mp4")
req.Header.Set("X-Kerberos-Storage-CloudKey", publicKey)
@@ -65,11 +68,18 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string, d
req.Header.Set("X-Kerberos-Storage-Device", config.Key)
req.Header.Set("X-Kerberos-Storage-Capture", "IPCamera")
req.Header.Set("X-Kerberos-Storage-Directory", config.KStorage.Directory)
//client := &http.Client{Timeout: time.Second * 30}
client := &http.Client{}
var client *http.Client
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client = &http.Client{Transport: tr}
} else {
client = &http.Client{}
}
resp, err := client.Do(req)
if resp != nil {
defer resp.Body.Close()
}
@@ -80,17 +90,16 @@ func UploadKerberosVault(configuration *models.Configuration, fileName string, d
if err == nil {
if resp.StatusCode == 200 {
log.Log.Info("UploadKerberosVault: Upload Finished, " + resp.Status + ", " + string(body))
// We will remove the file from disk as well
os.Remove(fullname)
os.Remove(directory + "/" + fileName)
return true, true, nil
} else {
log.Log.Info("UploadKerberosVault: Upload Failed, " + resp.Status + ", " + string(body))
return false, true, nil
}
resp.Body.Close()
}
}
} else {
log.Log.Info("UploadKerberosVault: Upload Failed, " + err.Error())
}
return true
errorMessage := "UploadKerberosVault: Upload Failed, " + err.Error()
log.Log.Info(errorMessage)
return false, true, errors.New(errorMessage)
}

View File

@@ -1,159 +0,0 @@
package components
import (
"bufio"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"reflect"
"sort"
"time"
"github.com/InVisionApp/conjungo"
"github.com/kerberos-io/agent/machinery/src/database"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"gopkg.in/mgo.v2/bson"
)
func GetSnapshot() string {
var snapshot string
files, err := ioutil.ReadDir("./data/snapshots")
if err == nil && len(files) > 1 {
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime().Before(files[j].ModTime())
})
f, _ := os.Open("./data/snapshots/" + files[1].Name())
defer f.Close()
// Read entire JPG into byte slice.
reader := bufio.NewReader(f)
content, _ := ioutil.ReadAll(reader)
// Encode as base64.
snapshot = base64.StdEncoding.EncodeToString(content)
}
return snapshot
}
// ReadUserConfig Reads the user configuration of the Kerberos Open Source instance.
// This will return a models.User struct including the username, password,
// selected language, and if the installation was completed or not.
func ReadUserConfig() (userConfig models.User) {
for {
jsonFile, err := os.Open("./data/config/user.json")
if err != nil {
fmt.Println(err)
fmt.Println("Config file is not found " + "./data/config/user.json" + ", trying again in 5s.")
time.Sleep(5 * time.Second)
} else {
fmt.Println("Successfully Opened user.json")
byteValue, _ := ioutil.ReadAll(jsonFile)
err = json.Unmarshal(byteValue, &userConfig)
if err != nil {
fmt.Println("JSON file not valid: " + err.Error())
} else {
jsonFile.Close()
break
}
time.Sleep(5 * time.Second)
}
jsonFile.Close()
}
return
}
func OpenConfig(configuration *models.Configuration) {
// We are checking which deployment this is running, so we can load
// into the configuration as expected.
if os.Getenv("DEPLOYMENT") == "factory" || os.Getenv("MACHINERY_ENVIRONMENT") == "kubernetes" {
// Factory deployment means that configuration is stored in MongoDB
// Multiple agents have there configuration stored, and can benefit from
// the concept of a global concept.
session := database.New().Copy()
defer session.Close()
db := session.DB(database.DatabaseName)
collection := db.C("configuration")
collection.Find(bson.M{
"type": "global",
}).One(&configuration.GlobalConfig)
collection.Find(bson.M{
"type": "config",
"name": os.Getenv("DEPLOYMENT_NAME"),
}).One(&configuration.CustomConfig)
// We will merge both configs in a single config file.
// Read again from database but this store overwrite the same object.
opts := conjungo.NewOptions()
opts.SetTypeMergeFunc(
reflect.TypeOf(""),
func(t, s reflect.Value, o *conjungo.Options) (reflect.Value, error) {
targetStr, _ := t.Interface().(string)
sourceStr, _ := s.Interface().(string)
finalStr := targetStr
if sourceStr != "" {
finalStr = sourceStr
}
return reflect.ValueOf(finalStr), nil
},
)
// Merge Config toplevel
conjungo.Merge(&configuration.Config, configuration.GlobalConfig, opts)
conjungo.Merge(&configuration.Config, configuration.CustomConfig, opts)
// Merge Kerberos Vault settings
var kerberosvault models.KStorage
conjungo.Merge(&kerberosvault, configuration.GlobalConfig.KStorage, opts)
conjungo.Merge(&kerberosvault, configuration.CustomConfig.KStorage, opts)
configuration.Config.KStorage = &kerberosvault
// Merge Kerberos S3 settings
var s3 models.S3
conjungo.Merge(&s3, configuration.GlobalConfig.S3, opts)
conjungo.Merge(&s3, configuration.CustomConfig.S3, opts)
configuration.Config.S3 = &s3
} else if os.Getenv("DEPLOYMENT") == "" || os.Getenv("DEPLOYMENT") == "agent" {
// Local deployment means we do a stand-alone installation
// Configuration is stored into a json file, and there is only 1 agent.
// Open device config
for {
jsonFile, err := os.Open("./data/config/config.json")
if err != nil {
log.Log.Error("Config file is not found " + "./data/config/config.json" + ", trying again in 5s.")
time.Sleep(5 * time.Second)
} else {
log.Log.Info("Successfully Opened config.json from " + configuration.Name)
byteValue, _ := ioutil.ReadAll(jsonFile)
err = json.Unmarshal(byteValue, &configuration.Config)
jsonFile.Close()
if err != nil {
fmt.Println("JSON file not valid: " + err.Error())
} else {
err = json.Unmarshal(byteValue, &configuration.CustomConfig)
if err != nil {
fmt.Println("JSON file not valid: " + err.Error())
} else {
break
}
}
time.Sleep(5 * time.Second)
}
jsonFile.Close()
}
}
return
}

View File

@@ -1,31 +1,63 @@
package components
import (
"context"
"os"
"strconv"
"sync"
"sync/atomic"
"time"
mqtt "github.com/eclipse/paho.mqtt.golang"
"github.com/gin-gonic/gin"
"github.com/kerberos-io/agent/machinery/src/capture"
"github.com/kerberos-io/agent/machinery/src/cloud"
"github.com/kerberos-io/agent/machinery/src/computervision"
configService "github.com/kerberos-io/agent/machinery/src/config"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/onvif"
"github.com/kerberos-io/agent/machinery/src/packets"
routers "github.com/kerberos-io/agent/machinery/src/routers/mqtt"
"github.com/kerberos-io/joy4/av/pubsub"
"github.com/kerberos-io/agent/machinery/src/utils"
"github.com/tevino/abool"
)
func Bootstrap(configuration *models.Configuration, communication *models.Communication) {
log.Log.Debug("Bootstrap: started")
func Bootstrap(configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
log.Log.Debug("components.Kerberos.Bootstrap(): bootstrapping the kerberos agent.")
// We will keep track of the Kerberos Agent up time
// This is send to Kerberos Hub in a heartbeat.
uptimeStart := time.Now()
// Initiate the packet counter, this is being used to detect
// if a camera is going blocky, or got disconnected.
var packageCounter atomic.Value
packageCounter.Store(int64(0))
communication.PackageCounter = &packageCounter
var packageCounterSub atomic.Value
packageCounterSub.Store(int64(0))
communication.PackageCounterSub = &packageCounterSub
// This is used when the last packet was received (timestamp),
// this metric is used to determine if the camera is still online/connected.
var lastPacketTimer atomic.Value
packageCounter.Store(int64(0))
communication.LastPacketTimer = &lastPacketTimer
var lastPacketTimerSub atomic.Value
packageCounterSub.Store(int64(0))
communication.LastPacketTimerSub = &lastPacketTimerSub
// This is used to understand if we have a working Kerberos Hub connection
// cloudTimestamp will be updated when successfully sending heartbeats.
var cloudTimestamp atomic.Value
cloudTimestamp.Store(int64(0))
communication.CloudTimestamp = &cloudTimestamp
communication.HandleStream = make(chan string, 1)
communication.HandleSubStream = make(chan string, 1)
communication.HandleUpload = make(chan string, 1)
communication.HandleHeartBeat = make(chan string, 1)
communication.HandleLiveSD = make(chan int64, 1)
@@ -33,149 +65,668 @@ func Bootstrap(configuration *models.Configuration, communication *models.Commun
communication.HandleLiveHDPeers = make(chan string, 1)
communication.IsConfiguring = abool.New()
cameraSettings := &models.Camera{}
// Before starting the agent, we have a control goroutine, that might
// do several checks to see if the agent is still operational.
go ControlAgent(communication)
// Handle heartbeats
go cloud.HandleHeartBeat(configuration, communication, uptimeStart)
// We'll create a MQTT handler, which will be used to communicate with Kerberos Hub.
// Configure a MQTT client which helps for a bi-directional communication
mqttClient := routers.ConfigureMQTT(configDirectory, configuration, communication)
// Run the agent and fire up all the other
// goroutines which do image capture, motion detection, onvif, etc.
for {
// This will blocking until receiving a signal to be restarted, reconfigured, stopped, etc.
status := RunAgent(configuration, communication)
status := RunAgent(configDirectory, configuration, communication, mqttClient, uptimeStart, cameraSettings, captureDevice)
if status == "stop" {
break
log.Log.Info("components.Kerberos.Bootstrap(): shutting down the agent in 3 seconds.")
time.Sleep(time.Second * 3)
os.Exit(0)
}
// We will re open the configuration, might have changed :O!
OpenConfig(configuration)
if status == "not started" {
// We will re open the configuration, might have changed :O!
configService.OpenConfig(configDirectory, configuration)
// We will override the configuration with the environment variables
configService.OverrideWithEnvironmentVariables(configuration)
}
// Reset the MQTT client, might have provided new information, so we need to reconnect.
if routers.HasMQTTClientModified(configuration) {
routers.DisconnectMQTT(mqttClient, &configuration.Config)
mqttClient = routers.ConfigureMQTT(configDirectory, configuration, communication)
}
// We will create a new cancelable context, which will be used to cancel and restart.
// This is used to restart the agent when the configuration is updated.
ctx, cancel := context.WithCancel(context.Background())
communication.Context = &ctx
communication.CancelContext = &cancel
}
log.Log.Debug("Bootstrap: finished")
}
func RunAgent(configuration *models.Configuration, communication *models.Communication) string {
log.Log.Debug("RunAgent: started")
func RunAgent(configDirectory string, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, uptimeStart time.Time, cameraSettings *models.Camera, captureDevice *capture.Capture) string {
log.Log.Info("components.Kerberos.RunAgent(): Creating camera and processing threads.")
config := configuration.Config
// Currently only support H264 encoded cameras, this will change.
// Establishing the camera connection
log.Log.Info("RunAgent: opening RTSP stream")
rtspUrl := config.Capture.IPCamera.RTSP
infile, streams, err := capture.OpenRTSP(rtspUrl)
//var decoder *ffmpeg.VideoDecoder
var queue *pubsub.Queue
status := "not started"
if err == nil {
// Currently only support H264 encoded cameras, this will change.
// Establishing the camera connection without backchannel if no substream
rtspUrl := config.Capture.IPCamera.RTSP
rtspClient := captureDevice.SetMainClient(rtspUrl)
if rtspUrl != "" {
err := rtspClient.Connect(context.Background())
if err != nil {
log.Log.Error("components.Kerberos.RunAgent(): error connecting to RTSP stream: " + err.Error())
rtspClient.Close()
rtspClient = nil
time.Sleep(time.Second * 3)
return status
}
} else {
log.Log.Error("components.Kerberos.RunAgent(): no rtsp url found in config, please provide one.")
rtspClient = nil
time.Sleep(time.Second * 3)
return status
}
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP stream: " + rtspUrl)
// Get the video streams from the RTSP server.
videoStreams, err := rtspClient.GetVideoStreams()
if err != nil || len(videoStreams) == 0 {
log.Log.Error("components.Kerberos.RunAgent(): no video stream found, might be the wrong codec (we only support H264 for the moment)")
rtspClient.Close()
time.Sleep(time.Second * 3)
return status
}
// Get the video stream from the RTSP server.
videoStream := videoStreams[0]
// Get some information from the video stream.
width := videoStream.Width
height := videoStream.Height
// Set config values as well
configuration.Config.Capture.IPCamera.Width = width
configuration.Config.Capture.IPCamera.Height = height
var queue *packets.Queue
var subQueue *packets.Queue
// Create a packet queue, which is filled by the HandleStream routing
// and consumed by all other routines: motion, livestream, etc.
if config.Capture.PreRecording <= 0 {
config.Capture.PreRecording = 1
log.Log.Warning("components.Kerberos.RunAgent(): Prerecording value not found in config or invalid value! Found: " + strconv.FormatInt(config.Capture.PreRecording, 10))
}
// We might have a secondary rtsp url, so we might need to use that for livestreaming let us check first!
subStreamEnabled := false
subRtspUrl := config.Capture.IPCamera.SubRTSP
var videoSubStreams []packets.Stream
if subRtspUrl != "" && subRtspUrl != rtspUrl {
// For the sub stream we will not enable backchannel.
subStreamEnabled = true
rtspSubClient := captureDevice.SetSubClient(subRtspUrl)
captureDevice.RTSPSubClient = rtspSubClient
err := rtspSubClient.Connect(context.Background())
if err != nil {
log.Log.Error("components.Kerberos.RunAgent(): error connecting to RTSP sub stream: " + err.Error())
time.Sleep(time.Second * 3)
return status
}
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP sub stream: " + rtspUrl)
// Get the video streams from the RTSP server.
videoSubStreams, err = rtspSubClient.GetVideoStreams()
if err != nil || len(videoSubStreams) == 0 {
log.Log.Error("components.Kerberos.RunAgent(): no video sub stream found, might be the wrong codec (we only support H264 for the moment)")
rtspSubClient.Close()
time.Sleep(time.Second * 3)
return status
}
// Get the video stream from the RTSP server.
videoSubStream := videoSubStreams[0]
width := videoSubStream.Width
height := videoSubStream.Height
// Set config values as well
configuration.Config.Capture.IPCamera.Width = width
configuration.Config.Capture.IPCamera.Height = height
}
if cameraSettings.RTSP != rtspUrl ||
cameraSettings.SubRTSP != subRtspUrl ||
cameraSettings.Width != width ||
cameraSettings.Height != height {
// TODO: this condition is used to reset the decoder when the camera settings change.
// The main idea is that you only set the decoder once, and then reuse it on each restart (no new memory allocation).
// However the stream settings of the camera might have been changed, and so the decoder might need to be reloaded.
// .... Not used for the moment ....
if cameraSettings.RTSP != "" && cameraSettings.SubRTSP != "" && cameraSettings.Initialized {
//decoder.Close()
//if subStreamEnabled {
// subDecoder.Close()
//}
}
// At some routines we will need to decode the image.
// Make sure its properly locked as we only have a single decoder.
var decoderMutex sync.Mutex
decoder := capture.GetVideoDecoder(streams)
log.Log.Info("components.Kerberos.RunAgent(): camera settings changed, reloading decoder")
//capture.GetVideoDecoder(decoder, streams)
//if subStreamEnabled {
// capture.GetVideoDecoder(subDecoder, subStreams)
//}
// Create a packet queue, which is filled by the HandleStream routing
// and consumed by all other routines: motion, livestream, etc.
queue = pubsub.NewQueue()
queue.SetMaxGopCount(5) // GOP time frame is set to 5.
queue.WriteHeader(streams)
// Configure a MQTT client which helps for a bi-directional communication
communication.HandleONVIF = make(chan models.OnvifAction, 1)
mqttClient := routers.ConfigureMQTT(configuration, communication)
// Handle heartbeats
go cloud.HandleHeartBeat(configuration, communication)
// Handle the camera stream
go capture.HandleStream(infile, queue, communication) //, &wg)
// Handle processing of motion
motionCursor := queue.Oldest()
communication.HandleMotion = make(chan int64, 1)
go computervision.ProcessMotion(motionCursor, configuration, communication, mqttClient, decoder, &decoderMutex)
// Handle livestream SD (low resolution over MQTT)
livestreamCursor := queue.Oldest()
go cloud.HandleLiveStreamSD(livestreamCursor, configuration, communication, mqttClient, decoder, &decoderMutex)
// Handle livestream HD (high resolution over WEBRTC)
livestreamHDCursor := queue.Oldest()
communication.HandleLiveHDHandshake = make(chan models.SDPPayload, 1)
go cloud.HandleLiveStreamHD(livestreamHDCursor, configuration, communication, mqttClient, streams, decoder, &decoderMutex)
// Handle recording, will write an mp4 to disk.
recordingCursor := queue.Oldest()
go capture.HandleRecordStream(recordingCursor, configuration, communication, streams)
// Handle Upload to cloud provider (Kerberos Hub, Kerberos Vault and others)
go cloud.HandleUpload(configuration, communication)
// Handle ONVIF actions
go onvif.HandleONVIFActions(configuration, communication)
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// This will go into a blocking state, once this channel is triggered
// the agent will cleanup and restart.
status = <-communication.HandleBootstrap
// Here we are cleaning up everything!
communication.HandleStream <- "stop"
communication.HandleHeartBeat <- "stop"
communication.HandleUpload <- "stop"
infile.Close()
queue.Close()
close(communication.HandleONVIF)
close(communication.HandleLiveHDHandshake)
close(communication.HandleMotion)
routers.DisconnectMQTT(mqttClient)
decoder.Close()
// Waiting for some seconds to make sure everything is properly closed.
log.Log.Info("RunAgent: waiting 1 second to make sure everything is properly closed.")
time.Sleep(time.Second * 1)
cameraSettings.RTSP = rtspUrl
cameraSettings.SubRTSP = subRtspUrl
cameraSettings.Width = width
cameraSettings.Height = height
cameraSettings.Initialized = true
} else {
log.Log.Error("Something went wrong while opening RTSP: " + err.Error())
time.Sleep(time.Second * 3)
log.Log.Info("components.Kerberos.RunAgent(): camera settings did not change, keeping decoder")
}
log.Log.Debug("RunAgent: finished")
// We are creating a queue to store the RTSP frames in, these frames will be
// processed by the different consumers: motion detection, recording, etc.
queue = packets.NewQueue()
communication.Queue = queue
// Set the maximum GOP count, this is used to determine the pre-recording time.
log.Log.Info("components.Kerberos.RunAgent(): SetMaxGopCount was set with: " + strconv.Itoa(int(config.Capture.PreRecording)+1))
queue.SetMaxGopCount(int(config.Capture.PreRecording) + 1) // GOP time frame is set to prerecording (we'll add 2 gops to leave some room).
queue.WriteHeader(videoStreams)
go rtspClient.Start(context.Background(), "main", queue, configuration, communication)
// Main stream is connected and ready to go.
communication.MainStreamConnected = true
// Try to create backchannel
rtspBackChannelClient := captureDevice.SetBackChannelClient(rtspUrl)
err = rtspBackChannelClient.ConnectBackChannel(context.Background())
if err == nil {
log.Log.Info("components.Kerberos.RunAgent(): opened RTSP backchannel stream: " + rtspUrl)
go rtspBackChannelClient.StartBackChannel(context.Background())
}
rtspSubClient := captureDevice.RTSPSubClient
if subStreamEnabled && rtspSubClient != nil {
subQueue = packets.NewQueue()
communication.SubQueue = subQueue
subQueue.SetMaxGopCount(1) // GOP time frame is set to prerecording (we'll add 2 gops to leave some room).
subQueue.WriteHeader(videoSubStreams)
go rtspSubClient.Start(context.Background(), "sub", subQueue, configuration, communication)
// Sub stream is connected and ready to go.
communication.SubStreamConnected = true
}
// Handle livestream SD (low resolution over MQTT)
if subStreamEnabled {
livestreamCursor := subQueue.Latest()
go cloud.HandleLiveStreamSD(livestreamCursor, configuration, communication, mqttClient, rtspSubClient)
} else {
livestreamCursor := queue.Latest()
go cloud.HandleLiveStreamSD(livestreamCursor, configuration, communication, mqttClient, rtspClient)
}
// Handle livestream HD (high resolution over WEBRTC)
communication.HandleLiveHDHandshake = make(chan models.RequestHDStreamPayload, 1)
if subStreamEnabled {
livestreamHDCursor := subQueue.Latest()
go cloud.HandleLiveStreamHD(livestreamHDCursor, configuration, communication, mqttClient, rtspSubClient)
} else {
livestreamHDCursor := queue.Latest()
go cloud.HandleLiveStreamHD(livestreamHDCursor, configuration, communication, mqttClient, rtspClient)
}
// Handle recording, will write an mp4 to disk.
go capture.HandleRecordStream(queue, configDirectory, configuration, communication, rtspClient)
// Handle processing of motion
communication.HandleMotion = make(chan models.MotionDataPartial, 1)
if subStreamEnabled {
motionCursor := subQueue.Latest()
go computervision.ProcessMotion(motionCursor, configuration, communication, mqttClient, rtspSubClient)
} else {
motionCursor := queue.Latest()
go computervision.ProcessMotion(motionCursor, configuration, communication, mqttClient, rtspClient)
}
// Handle Upload to cloud provider (Kerberos Hub, Kerberos Vault and others)
go cloud.HandleUpload(configDirectory, configuration, communication)
// Handle ONVIF actions
communication.HandleONVIF = make(chan models.OnvifAction, 1)
go onvif.HandleONVIFActions(configuration, communication)
communication.HandleAudio = make(chan models.AudioDataPartial, 1)
if rtspBackChannelClient.HasBackChannel {
communication.HasBackChannel = true
go WriteAudioToBackchannel(communication, rtspBackChannelClient)
}
// If we reach this point, we have a working RTSP connection.
communication.CameraConnected = true
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// This will go into a blocking state, once this channel is triggered
// the agent will cleanup and restart.
status = <-communication.HandleBootstrap
// If we reach this point, we are stopping the stream.
communication.CameraConnected = false
communication.MainStreamConnected = false
communication.SubStreamConnected = false
// Cancel the main context, this will stop all the other goroutines.
(*communication.CancelContext)()
// We will re open the configuration, might have changed :O!
configService.OpenConfig(configDirectory, configuration)
// We will override the configuration with the environment variables
configService.OverrideWithEnvironmentVariables(configuration)
// Here we are cleaning up everything!
if configuration.Config.Offline != "true" {
communication.HandleUpload <- "stop"
}
communication.HandleStream <- "stop"
// We use the steam channel to stop both main and sub stream.
//if subStreamEnabled {
// communication.HandleSubStream <- "stop"
//}
time.Sleep(time.Second * 3)
err = rtspClient.Close()
if err != nil {
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP stream: " + err.Error())
time.Sleep(time.Second * 3)
return status
}
queue.Close()
queue = nil
communication.Queue = nil
if subStreamEnabled {
err = rtspSubClient.Close()
if err != nil {
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP sub stream: " + err.Error())
time.Sleep(time.Second * 3)
return status
}
subQueue.Close()
subQueue = nil
communication.SubQueue = nil
}
err = rtspBackChannelClient.Close()
if err != nil {
log.Log.Error("components.Kerberos.RunAgent(): error closing RTSP backchannel stream: " + err.Error())
}
time.Sleep(time.Second * 3)
close(communication.HandleLiveHDHandshake)
communication.HandleLiveHDHandshake = nil
close(communication.HandleMotion)
communication.HandleMotion = nil
close(communication.HandleAudio)
communication.HandleAudio = nil
close(communication.HandleONVIF)
communication.HandleONVIF = nil
// Waiting for some seconds to make sure everything is properly closed.
log.Log.Info("components.Kerberos.RunAgent(): waiting 3 seconds to make sure everything is properly closed.")
time.Sleep(time.Second * 3)
return status
}
// ControlAgent will check if the camera is still connected, if not it will restart the agent.
// In the other thread we are keeping track of the number of packets received, and particular the keyframe packets.
// Once we are not receiving any packets anymore, we will restart the agent.
func ControlAgent(communication *models.Communication) {
log.Log.Debug("ControlAgent: started")
log.Log.Debug("components.Kerberos.ControlAgent(): started")
packageCounter := communication.PackageCounter
packageSubCounter := communication.PackageCounterSub
go func() {
// A channel to check the camera activity
var previousPacket int64 = 0
var previousPacketSub int64 = 0
var occurence = 0
var occurenceSub = 0
for {
packetsR := packageCounter.Load().(int64)
if packetsR == previousPacket {
// If we are already reconfiguring,
// we dont need to check if the stream is blocking.
if !communication.IsConfiguring.IsSet() {
occurence = occurence + 1
// If camera is connected, we'll check if we are still receiving packets.
if communication.CameraConnected {
// First we'll check the main stream.
packetsR := packageCounter.Load().(int64)
if packetsR == previousPacket {
// If we are already reconfiguring,
// we dont need to check if the stream is blocking.
if !communication.IsConfiguring.IsSet() {
occurence = occurence + 1
}
} else {
occurence = 0
}
} else {
occurence = 0
}
log.Log.Info("ControlAgent: Number of packets read " + strconv.FormatInt(packetsR, 10))
log.Log.Info("components.Kerberos.ControlAgent(): Number of packets read from mainstream: " + strconv.FormatInt(packetsR, 10))
// After 15 seconds without activity this is thrown..
if occurence == 3 {
log.Log.Info("Main: Restarting machinery.")
communication.HandleBootstrap <- "restart"
time.Sleep(2 * time.Second)
occurence = 0
// After 15 seconds without activity this is thrown..
if occurence == 3 {
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking mainstream.")
communication.HandleBootstrap <- "restart"
time.Sleep(2 * time.Second)
occurence = 0
}
// Now we'll check the sub stream.
packetsSubR := packageSubCounter.Load().(int64)
if communication.SubStreamConnected {
if packetsSubR == previousPacketSub {
// If we are already reconfiguring,
// we dont need to check if the stream is blocking.
if !communication.IsConfiguring.IsSet() {
occurenceSub = occurenceSub + 1
}
} else {
occurenceSub = 0
}
log.Log.Info("components.Kerberos.ControlAgent(): Number of packets read from substream: " + strconv.FormatInt(packetsSubR, 10))
// After 15 seconds without activity this is thrown..
if occurenceSub == 3 {
log.Log.Info("components.Kerberos.ControlAgent(): Restarting machinery because of blocking substream.")
communication.HandleBootstrap <- "restart"
time.Sleep(2 * time.Second)
occurenceSub = 0
}
}
previousPacket = packageCounter.Load().(int64)
previousPacketSub = packageSubCounter.Load().(int64)
}
previousPacket = packageCounter.Load().(int64)
time.Sleep(5 * time.Second)
}
}()
log.Log.Debug("ControlAgent: finished")
log.Log.Debug("components.Kerberos.ControlAgent(): finished")
}
// GetDashboard godoc
// @Router /api/dashboard [get]
// @ID dashboard
// @Tags general
// @Summary Get all information showed on the dashboard.
// @Description Get all information showed on the dashboard.
// @Success 200
func GetDashboard(c *gin.Context, configDirectory string, configuration *models.Configuration, communication *models.Communication) {
// Check if camera is online.
cameraIsOnline := communication.CameraConnected
// If an agent is properly setup with Kerberos Hub, we will send
// a ping to Kerberos Hub every 15seconds. On receiving a positive response
// it will update the CloudTimestamp value.
cloudIsOnline := false
if communication.CloudTimestamp != nil && communication.CloudTimestamp.Load() != nil {
timestamp := communication.CloudTimestamp.Load().(int64)
if timestamp > 0 {
cloudIsOnline = true
}
}
// The total number of recordings stored in the directory.
recordingDirectory := configDirectory + "/data/recordings"
numberOfRecordings := utils.NumberOfMP4sInDirectory(recordingDirectory)
// All days stored in this agent.
days := []string{}
latestEvents := []models.Media{}
files, err := utils.ReadDirectory(recordingDirectory)
if err == nil {
events := utils.GetSortedDirectory(files)
// Get All days
days = utils.GetDays(events, recordingDirectory, configuration)
// Get all latest events
var eventFilter models.EventFilter
eventFilter.NumberOfElements = 5
latestEvents = utils.GetMediaFormatted(events, recordingDirectory, configuration, eventFilter) // will get 5 latest recordings.
}
c.JSON(200, gin.H{
"offlineMode": configuration.Config.Offline,
"cameraOnline": cameraIsOnline,
"cloudOnline": cloudIsOnline,
"numberOfRecordings": numberOfRecordings,
"days": days,
"latestEvents": latestEvents,
})
}
// GetLatestEvents godoc
// @Router /api/latest-events [post]
// @ID latest-events
// @Tags general
// @Param eventFilter body models.EventFilter true "Event filter"
// @Summary Get the latest recordings (events) from the recordings directory.
// @Description Get the latest recordings (events) from the recordings directory.
// @Success 200
func GetLatestEvents(c *gin.Context, configDirectory string, configuration *models.Configuration, communication *models.Communication) {
var eventFilter models.EventFilter
err := c.BindJSON(&eventFilter)
if err == nil {
// Default to 10 if no limit is set.
if eventFilter.NumberOfElements == 0 {
eventFilter.NumberOfElements = 10
}
recordingDirectory := configDirectory + "/data/recordings"
files, err := utils.ReadDirectory(recordingDirectory)
if err == nil {
events := utils.GetSortedDirectory(files)
// We will get all recordings from the directory (as defined by the filter).
fileObjects := utils.GetMediaFormatted(events, recordingDirectory, configuration, eventFilter)
c.JSON(200, gin.H{
"events": fileObjects,
})
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}
// GetDays godoc
// @Router /api/days [get]
// @ID days
// @Tags general
// @Summary Get all days stored in the recordings directory.
// @Description Get all days stored in the recordings directory.
// @Success 200
func GetDays(c *gin.Context, configDirectory string, configuration *models.Configuration, communication *models.Communication) {
recordingDirectory := configDirectory + "/data/recordings"
files, err := utils.ReadDirectory(recordingDirectory)
if err == nil {
events := utils.GetSortedDirectory(files)
days := utils.GetDays(events, recordingDirectory, configuration)
c.JSON(200, gin.H{
"events": days,
})
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}
// StopAgent godoc
// @Router /api/camera/stop [post]
// @ID camera-stop
// @Tags camera
// @Summary Stop the agent.
// @Description Stop the agent.
// @Success 200 {object} models.APIResponse
func StopAgent(c *gin.Context, communication *models.Communication) {
log.Log.Info("components.Kerberos.StopAgent(): sending signal to stop agent, this will os.Exit(0).")
communication.HandleBootstrap <- "stop"
c.JSON(200, gin.H{
"stopped": true,
})
}
// RestartAgent godoc
// @Router /api/camera/restart [post]
// @ID camera-restart
// @Tags camera
// @Summary Restart the agent.
// @Description Restart the agent.
// @Success 200 {object} models.APIResponse
func RestartAgent(c *gin.Context, communication *models.Communication) {
log.Log.Info("components.Kerberos.RestartAgent(): sending signal to restart agent.")
communication.HandleBootstrap <- "restart"
c.JSON(200, gin.H{
"restarted": true,
})
}
// MakeRecording godoc
// @Router /api/camera/record [post]
// @ID camera-record
// @Tags camera
// @Summary Make a recording.
// @Description Make a recording.
// @Success 200 {object} models.APIResponse
func MakeRecording(c *gin.Context, communication *models.Communication) {
log.Log.Info("components.Kerberos.MakeRecording(): sending signal to start recording.")
dataToPass := models.MotionDataPartial{
Timestamp: time.Now().Unix(),
NumberOfChanges: 100000000, // hack set the number of changes to a high number to force recording
}
communication.HandleMotion <- dataToPass //Save data to the channel
c.JSON(200, gin.H{
"recording": true,
})
}
// GetSnapshotBase64 godoc
// @Router /api/camera/snapshot/base64 [get]
// @ID snapshot-base64
// @Tags camera
// @Summary Get a snapshot from the camera in base64.
// @Description Get a snapshot from the camera in base64.
// @Success 200
func GetSnapshotBase64(c *gin.Context, captureDevice *capture.Capture, configuration *models.Configuration, communication *models.Communication) {
// We'll try to get a snapshot from the camera.
base64Image := capture.Base64Image(captureDevice, communication)
if base64Image != "" {
communication.Image = base64Image
}
c.JSON(200, gin.H{
"base64": communication.Image,
})
}
// GetSnapshotJpeg godoc
// @Router /api/camera/snapshot/jpeg [get]
// @ID snapshot-jpeg
// @Tags camera
// @Summary Get a snapshot from the camera in jpeg format.
// @Description Get a snapshot from the camera in jpeg format.
// @Success 200
func GetSnapshotRaw(c *gin.Context, captureDevice *capture.Capture, configuration *models.Configuration, communication *models.Communication) {
// We'll try to get a snapshot from the camera.
image := capture.JpegImage(captureDevice, communication)
// encode image to jpeg
bytes, _ := utils.ImageToBytes(&image)
// Return image/jpeg
c.Data(200, "image/jpeg", bytes)
}
// GetConfig godoc
// @Router /api/config [get]
// @ID config
// @Tags config
// @Summary Get the current configuration.
// @Description Get the current configuration.
// @Success 200
func GetConfig(c *gin.Context, captureDevice *capture.Capture, configuration *models.Configuration, communication *models.Communication) {
// We'll try to get a snapshot from the camera.
base64Image := capture.Base64Image(captureDevice, communication)
if base64Image != "" {
communication.Image = base64Image
}
c.JSON(200, gin.H{
"config": configuration.Config,
"custom": configuration.CustomConfig,
"global": configuration.GlobalConfig,
"snapshot": communication.Image,
})
}
// UpdateConfig godoc
// @Router /api/config [post]
// @ID config
// @Tags config
// @Param config body models.Config true "Configuration"
// @Summary Update the current configuration.
// @Description Update the current configuration.
// @Success 200
func UpdateConfig(c *gin.Context, configDirectory string, configuration *models.Configuration, communication *models.Communication) {
var config models.Config
err := c.BindJSON(&config)
if err == nil {
err := configService.SaveConfig(configDirectory, config, configuration, communication)
if err == nil {
c.JSON(200, gin.H{
"data": "☄ Reconfiguring",
})
} else {
c.JSON(200, gin.H{
"data": "☄ Reconfiguring",
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}

View File

@@ -1,25 +0,0 @@
package components
import (
"time"
"github.com/cedricve/go-onvif"
"github.com/kerberos-io/agent/machinery/src/log"
)
func Discover(timeout time.Duration) {
log.Log.Info("Discovering devices")
log.Log.Info("Waiting for " + (timeout * time.Second).String())
devices, err := onvif.StartDiscovery(timeout * time.Second)
if err != nil {
log.Log.Error(err.Error())
} else {
for _, device := range devices {
hostname, _ := device.GetHostname()
log.Log.Info(hostname.Name)
}
if len(devices) == 0 {
log.Log.Info("No devices descovered\n")
}
}
}

View File

@@ -1,83 +0,0 @@
package components
import (
"fmt"
"log"
"time"
"github.com/deepch/vdk/av"
"github.com/deepch/vdk/codec/h264parser"
"github.com/deepch/vdk/format/rtsp"
)
type Stream struct {
Name string
Url string
Debug bool
Codecs string
}
func CreateStream(name string, url string) *Stream {
return &Stream{
Name: name,
Url: url,
}
}
func (s Stream) Open() *rtsp.Client {
// Enable debugging
if s.Debug {
rtsp.DebugRtsp = true
}
fmt.Println("Dialing in to " + s.Url)
session, err := rtsp.Dial(s.Url)
if err != nil {
log.Println("Something went wrong dialing into stream: ", err)
time.Sleep(5 * time.Second)
}
session.RtpKeepAliveTimeout = 10 * time.Second
return session
}
func (s Stream) Close(session *rtsp.Client) {
fmt.Println("Closing RTSP session.")
err := session.Close()
if err != nil {
log.Println("Something went wrong while closing your RTSP session: ", err)
}
}
func (s Stream) GetCodecs() []av.CodecData {
session := s.Open()
codec, err := session.Streams()
log.Println("Reading codecs from stream: ", codec)
if err != nil {
log.Println("Something went wrong while reading codecs from stream: ", err)
time.Sleep(5 * time.Second)
}
s.Close(session)
return codec
}
func (s Stream) ReadPackets(packetChannel chan av.Packet) {
session := s.Open()
fmt.Println("Start reading H264 packages from stream")
for {
packet, err := session.ReadPacket()
if err != nil {
break
}
if len(packetChannel) < cap(packetChannel) {
packetChannel <- packet
}
}
s.Close(session)
}
func GetSPSFromCodec(codecs []av.CodecData) ([]byte, []byte) {
sps := codecs[0].(h264parser.CodecData).SPS()
pps := codecs[0].(h264parser.CodecData).PPS()
return sps, pps
}

View File

@@ -0,0 +1,96 @@
package components
import (
"bufio"
"fmt"
"os"
"time"
"github.com/kerberos-io/agent/machinery/src/capture"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/packets"
"github.com/kerberos-io/joy4/av"
"github.com/pion/rtp"
"github.com/zaf/g711"
)
func GetBackChannelAudioCodec(streams []av.CodecData, communication *models.Communication) av.AudioCodecData {
for _, stream := range streams {
if stream.Type().IsAudio() {
if stream.Type().String() == "PCM_MULAW" {
pcmuCodec := stream.(av.AudioCodecData)
if pcmuCodec.IsBackChannel() {
communication.HasBackChannel = true
return pcmuCodec
}
}
}
}
return nil
}
func WriteAudioToBackchannel(communication *models.Communication, rtspClient capture.RTSPClient) {
log.Log.Info("Audio.WriteAudioToBackchannel(): writing to backchannel audio codec")
length := uint32(0)
sequenceNumber := uint16(0)
for audio := range communication.HandleAudio {
// Encode PCM to MULAW
var bufferUlaw []byte
for _, v := range audio.Data {
b := g711.EncodeUlawFrame(v)
bufferUlaw = append(bufferUlaw, b)
}
pkt := packets.Packet{
Packet: &rtp.Packet{
Header: rtp.Header{
Version: 2,
Marker: true, // should be true
PayloadType: 0, //packet.PayloadType, // will be owerwriten
SequenceNumber: sequenceNumber,
Timestamp: uint32(length),
SSRC: 1293847657,
},
Payload: bufferUlaw,
},
}
err := rtspClient.WritePacket(pkt)
if err != nil {
log.Log.Error("Audio.WriteAudioToBackchannel(): error writing packet to backchannel")
}
length = (length + uint32(len(bufferUlaw))) % 65536
sequenceNumber = (sequenceNumber + 1) % 65535
time.Sleep(128 * time.Millisecond)
}
log.Log.Info("Audio.WriteAudioToBackchannel(): finished")
}
func WriteFileToBackChannel(infile av.DemuxCloser) {
// Do the warmup!
file, err := os.Open("./audiofile.bye")
if err != nil {
fmt.Println("WriteFileToBackChannel: error opening audiofile.bye file")
}
defer file.Close()
// Read file into buffer
reader := bufio.NewReader(file)
buffer := make([]byte, 1024)
count := 0
for {
_, err := reader.Read(buffer)
if err != nil {
break
}
// Send to backchannel
fmt.Println(buffer)
infile.Write(buffer, 2, uint32(count))
count = count + 1024
time.Sleep(128 * time.Millisecond)
}
}

View File

@@ -1,141 +1,71 @@
package computervision
import (
"fmt"
"image"
"io/ioutil"
"os"
"runtime"
"runtime/debug"
"sort"
"strconv"
"sync"
"time"
mqtt "github.com/eclipse/paho.mqtt.golang"
geo "github.com/kellydunn/golang-geo"
"github.com/kerberos-io/agent/machinery/src/capture"
"github.com/kerberos-io/agent/machinery/src/conditions"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/joy4/av/pubsub"
geo "github.com/kellydunn/golang-geo"
"github.com/kerberos-io/joy4/av"
"github.com/kerberos-io/joy4/cgo/ffmpeg"
"gocv.io/x/gocv"
"github.com/kerberos-io/agent/machinery/src/packets"
)
func GetRGBImage(pkt av.Packet, dec *ffmpeg.VideoDecoder, decoderMutex *sync.Mutex) gocv.Mat {
var rgb gocv.Mat
img, err := capture.DecodeImage(pkt, dec, decoderMutex)
if err == nil && img != nil {
rgb, _ = ToRGB8(img.Image)
gocv.Resize(rgb, &rgb, image.Pt(rgb.Cols()/4, rgb.Rows()/4), 0, 0, gocv.InterpolationArea)
}
return rgb
}
func ProcessMotion(motionCursor *packets.QueueCursor, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, rtspClient capture.RTSPClient) {
func GetImage(pkt av.Packet, dec *ffmpeg.VideoDecoder, decoderMutex *sync.Mutex) gocv.Mat {
var gray gocv.Mat
img, err := capture.DecodeImage(pkt, dec, decoderMutex)
if err == nil && img != nil {
// Check if we need to scale down.
width := img.Width()
height := img.Height()
newWidth := width
newHeight := height
// Try minify twice.
scaleFactor := 1.0
if newWidth > 800 {
newWidth = width / 2
newHeight = height / 2
scaleFactor *= 2
}
if newWidth > 800 {
newWidth = width / 2
newHeight = height / 2
scaleFactor *= 2
}
if newWidth > 800 {
newWidth = width / 2
newHeight = height / 2
scaleFactor *= 2
}
im := img.Image
rgb, _ := ToRGB8(im)
img.Free()
if scaleFactor > 1 {
gocv.Resize(rgb, &rgb, image.Pt(newWidth, newHeight), 0, 0, gocv.InterpolationArea)
}
gray = gocv.NewMat()
gocv.CvtColor(rgb, &gray, gocv.ColorBGRToGray)
rgb.Close()
}
return gray
}
func ToRGB8(img image.YCbCr) (gocv.Mat, error) {
bounds := img.Bounds()
x := bounds.Dx()
y := bounds.Dy()
bytes := make([]byte, 0, x*y*3)
for j := bounds.Min.Y; j < bounds.Max.Y; j++ {
for i := bounds.Min.X; i < bounds.Max.X; i++ {
r, g, b, _ := img.At(i, j).RGBA()
bytes = append(bytes, byte(b>>8), byte(g>>8), byte(r>>8))
}
}
return gocv.NewMatFromBytes(y, x, gocv.MatTypeCV8UC3, bytes)
}
func ProcessMotion(motionCursor *pubsub.QueueCursor, configuration *models.Configuration, communication *models.Communication, mqttClient mqtt.Client, decoder *ffmpeg.VideoDecoder, decoderMutex *sync.Mutex) { //, wg *sync.WaitGroup) {
log.Log.Debug("ProcessMotion: started")
log.Log.Debug("computervision.main.ProcessMotion(): start motion detection")
config := configuration.Config
loc, _ := time.LoadLocation(config.Timezone)
var isPixelChangeThresholdReached = false
var changesToReturn = 0
pixelThreshold := config.Capture.PixelChangeThreshold
// Might not be set in the config file, so set it to 150
if pixelThreshold == 0 {
pixelThreshold = 150
}
if config.Capture.Continuous == "true" {
log.Log.Info("ProcessMotion: Continuous recording, so no motion detection.")
log.Log.Info("computervision.main.ProcessMotion(): you've enabled continuous recording, so no motion detection required.")
} else {
log.Log.Info("ProcessMotion: Motion detection enabled.")
log.Log.Info("computervision.main.ProcessMotion(): motion detected is enabled, so starting the motion detection.")
key := ""
if config.Cloud == "s3" && config.S3.Publickey != "" {
key = config.S3.Publickey
} else if config.Cloud == "kstorage" && config.KStorage.CloudKey != "" {
key = config.KStorage.CloudKey
}
hubKey := config.HubKey
deviceKey := config.Key
// Initialise first 2 elements
var matArray [3]*gocv.Mat
var imageArray [3]*image.Gray
j := 0
//for pkt := range packets {
var cursorError error
var pkt av.Packet
var pkt packets.Packet
for cursorError == nil {
pkt, cursorError = motionCursor.ReadPacket()
// Check If valid package.
if len(pkt.Data) > 0 && pkt.IsKeyFrame {
rgb := GetImage(pkt, decoder, decoderMutex)
matArray[j] = &rgb
j++
grayImage, err := rtspClient.DecodePacketRaw(pkt)
if err == nil {
imageArray[j] = &grayImage
j++
}
}
if j == 2 {
if j == 3 {
break
}
}
img := matArray[0]
if img != nil {
// Calculate mask
var polyObjects []geo.Polygon
// Calculate mask
var polyObjects []geo.Polygon
if config.Region != nil {
for _, polygon := range config.Region.Polygon {
coords := polygon.Coordinates
poly := geo.Polygon{}
@@ -149,25 +79,33 @@ func ProcessMotion(motionCursor *pubsub.QueueCursor, configuration *models.Confi
}
polyObjects = append(polyObjects, poly)
}
}
rows := img.Rows()
cols := img.Cols()
var coordinatesToCheck [][]int
img := imageArray[0]
var coordinatesToCheck []int
if img != nil {
bounds := img.Bounds()
rows := bounds.Dy()
cols := bounds.Dx()
// Make fixed size array of uinty8
for y := 0; y < rows; y++ {
for x := 0; x < cols; x++ {
for _, poly := range polyObjects {
point := geo.NewPoint(float64(x), float64(y))
if poly.Contains(point) {
coordinatesToCheck = append(coordinatesToCheck, []int{x, y})
break
coordinatesToCheck = append(coordinatesToCheck, y*cols+x)
}
}
}
}
}
// If no region is set, we'll skip the motion detection
if len(coordinatesToCheck) > 0 {
// Start the motion detection
i := 0
loc, _ := time.LoadLocation(config.Timezone)
for cursorError == nil {
pkt, cursorError = motionCursor.ReadPacket()
@@ -177,112 +115,94 @@ func ProcessMotion(motionCursor *pubsub.QueueCursor, configuration *models.Confi
continue
}
rgb := GetImage(pkt, decoder, decoderMutex)
matArray[2] = &rgb
// Store snapshots (jpg) or hull.
if i%3 == 0 {
files, err := ioutil.ReadDir("./data/snapshots")
if err == nil {
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime().Before(files[j].ModTime())
})
if len(files) > 3 {
os.Remove("./data/snapshots/" + files[0].Name())
}
}
t := strconv.FormatInt(time.Now().Unix(), 10)
gocv.IMWrite("./data/snapshots/"+t+".png", rgb)
grayImage, err := rtspClient.DecodePacketRaw(pkt)
if err == nil {
imageArray[2] = &grayImage
}
// Check if continuous recording.
if config.Capture.Continuous == "true" {
// We might have different conditions enabled such as time window or uri response.
// We'll validate those conditions and if not valid we'll not do anything.
detectMotion, err := conditions.Validate(loc, configuration)
if !detectMotion && err != nil {
log.Log.Debug("computervision.main.ProcessMotion(): " + err.Error() + ".")
}
// Do not do anything! Just sleep as there is no
// motion detection needed
if config.Capture.Motion != "false" {
} else { // Do motion detection.
if detectMotion {
// Check if within time interval
detectMotion := true
now := time.Now().In(loc)
weekday := now.Weekday()
hour := now.Hour()
minute := now.Minute()
second := now.Second()
timeInterval := config.Timetable[int(weekday)]
if timeInterval != nil {
start1 := timeInterval.Start1
end1 := timeInterval.End1
start2 := timeInterval.Start2
end2 := timeInterval.End2
currentTimeInSeconds := hour*60*60 + minute*60 + second
if (currentTimeInSeconds >= start1 && currentTimeInSeconds <= end1) ||
(currentTimeInSeconds >= start2 && currentTimeInSeconds <= end2) {
// Remember additional information about the result of findmotion
isPixelChangeThresholdReached, changesToReturn = FindMotion(imageArray, coordinatesToCheck, pixelThreshold)
if isPixelChangeThresholdReached {
} else {
detectMotion = false
log.Log.Debug("ProcessMotion: Time interval not valid, disabling motion detection.")
// If offline mode is disabled, send a message to the hub
if config.Offline != "true" {
if mqttClient != nil {
if hubKey != "" {
message := models.Message{
Payload: models.Payload{
Action: "motion",
DeviceId: configuration.Config.Key,
Value: map[string]interface{}{
"timestamp": time.Now().Unix(),
},
},
}
payload, err := models.PackageMQTTMessage(configuration, message)
if err == nil {
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
} else {
log.Log.Info("computervision.main.ProcessMotion(): failed to package MQTT message: " + err.Error())
}
} else {
mqttClient.Publish("kerberos/agent/"+deviceKey, 2, false, "motion")
}
}
}
if config.Capture.Recording != "false" {
dataToPass := models.MotionDataPartial{
Timestamp: time.Now().Unix(),
NumberOfChanges: changesToReturn,
}
communication.HandleMotion <- dataToPass //Save data to the channel
}
}
}
if detectMotion && FindMotion(matArray, coordinatesToCheck) {
mqttClient.Publish("kerberos/"+key+"/device/"+config.Key+"/motion", 2, false, "motion")
fmt.Println(key)
communication.HandleMotion <- time.Now().Unix()
}
imageArray[0] = imageArray[1]
imageArray[1] = imageArray[2]
i++
}
}
matArray[0].Close()
matArray[0] = matArray[1]
matArray[1] = matArray[2]
i++
runtime.GC()
debug.FreeOSMemory()
if img != nil {
img = nil
}
}
if img != nil {
img.Close()
}
runtime.GC()
debug.FreeOSMemory()
}
log.Log.Debug("ProcessMotion: finished")
log.Log.Debug("computervision.main.ProcessMotion(): stop the motion detection.")
}
func FindMotion(matArray [3]*gocv.Mat, coordinatesToCheck [][]int) bool {
h1 := gocv.NewMat()
gocv.AbsDiff(*matArray[2], *matArray[0], &h1)
h2 := gocv.NewMat()
gocv.AbsDiff(*matArray[2], *matArray[1], &h2)
and := gocv.NewMat()
gocv.BitwiseAnd(h1, h2, &and)
h1.Close()
h2.Close()
thresh := gocv.NewMat()
gocv.Threshold(and, &thresh, 30.0, 255.0, gocv.ThresholdBinary)
and.Close()
kernel := gocv.GetStructuringElement(gocv.MorphRect, image.Pt(3, 3))
eroded := gocv.NewMat()
gocv.Erode(thresh, &eroded, kernel)
thresh.Close()
kernel.Close()
func FindMotion(imageArray [3]*image.Gray, coordinatesToCheck []int, pixelChangeThreshold int) (thresholdReached bool, changesDetected int) {
image1 := imageArray[0]
image2 := imageArray[1]
image3 := imageArray[2]
threshold := 60
changes := AbsDiffBitwiseAndThreshold(image1, image2, image3, threshold, coordinatesToCheck)
return changes > pixelChangeThreshold, changes
}
func AbsDiffBitwiseAndThreshold(img1 *image.Gray, img2 *image.Gray, img3 *image.Gray, threshold int, coordinatesToCheck []int) int {
changes := 0
for _, c := range coordinatesToCheck {
value := eroded.GetUCharAt(c[1], c[0])
if value > 0 {
for i := 0; i < len(coordinatesToCheck); i++ {
pixel := coordinatesToCheck[i]
diff := int(img3.Pix[pixel]) - int(img1.Pix[pixel])
diff2 := int(img3.Pix[pixel]) - int(img2.Pix[pixel])
if (diff > threshold || diff < -threshold) && (diff2 > threshold || diff2 < -threshold) {
changes++
}
}
eroded.Close()
log.Log.Info("FindMotion: Number of changes detected:" + strconv.Itoa(changes))
return changes > 75
return changes
}

View File

@@ -0,0 +1,28 @@
package conditions
import (
"errors"
"time"
"github.com/kerberos-io/agent/machinery/src/models"
)
func Validate(loc *time.Location, configuration *models.Configuration) (valid bool, err error) {
valid = true
err = nil
withinTimeInterval := IsWithinTimeInterval(loc, configuration)
if !withinTimeInterval {
valid = false
err = errors.New("time interval not valid")
return
}
validUriResponse := IsValidUriResponse(configuration)
if !validUriResponse {
valid = false
err = errors.New("uri response not valid")
return
}
return
}

View File

@@ -0,0 +1,39 @@
package conditions
import (
"time"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
)
func IsWithinTimeInterval(loc *time.Location, configuration *models.Configuration) (enabled bool) {
config := configuration.Config
timeEnabled := config.Time
enabled = true
if timeEnabled != "false" {
now := time.Now().In(loc)
weekday := now.Weekday()
hour := now.Hour()
minute := now.Minute()
second := now.Second()
if config.Timetable != nil && len(config.Timetable) > 0 {
timeInterval := config.Timetable[int(weekday)]
if timeInterval != nil {
start1 := timeInterval.Start1
end1 := timeInterval.End1
start2 := timeInterval.Start2
end2 := timeInterval.End2
currentTimeInSeconds := hour*60*60 + minute*60 + second
if (currentTimeInSeconds >= start1 && currentTimeInSeconds <= end1) ||
(currentTimeInSeconds >= start2 && currentTimeInSeconds <= end2) {
log.Log.Debug("conditions.timewindow.IsWithinTimeInterval(): time interval valid, enabling recording.")
} else {
log.Log.Info("conditions.timewindow.IsWithinTimeInterval(): time interval not valid, disabling recording.")
enabled = false
}
}
}
}
return
}

View File

@@ -0,0 +1,59 @@
package conditions
import (
"bytes"
"crypto/tls"
"fmt"
"net/http"
"os"
"time"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
)
func IsValidUriResponse(configuration *models.Configuration) (enabled bool) {
config := configuration.Config
conditionURI := config.ConditionURI
enabled = true
if conditionURI != "" {
// We will send a POST request to the conditionURI, and expect a 200 response.
// In the payload we will send some information, so the other end can decide
// if it should enable or disable recording.
var client *http.Client
if os.Getenv("AGENT_TLS_INSECURE") == "true" {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client = &http.Client{Transport: tr}
} else {
client = &http.Client{}
}
var object = fmt.Sprintf(`{
"camera_id" : "%s",
"camera_name" : "%s",
"site_id" : "%s",
"hub_key" : "%s",
"timestamp" : "%s",
}`, config.Key, config.FriendlyName, config.HubSite, config.HubKey, time.Now().Format("2006-01-02 15:04:05"))
var jsonStr = []byte(object)
buffy := bytes.NewBuffer(jsonStr)
req, _ := http.NewRequest("POST", conditionURI, buffy)
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if resp != nil {
resp.Body.Close()
}
if err == nil && resp.StatusCode == 200 {
log.Log.Info("conditions.uri.IsValidUriResponse(): response 200, enabling recording.")
} else {
log.Log.Info("conditions.uri.IsValidUriResponse(): response not 200, disabling recording.")
enabled = false
}
}
return
}

View File

@@ -0,0 +1,536 @@
package config
import (
"context"
"encoding/json"
"errors"
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"time"
"github.com/InVisionApp/conjungo"
"github.com/kerberos-io/agent/machinery/src/database"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"go.mongodb.org/mongo-driver/bson"
)
// ReadUserConfig Reads the user configuration of the Kerberos Open Source instance.
// This will return a models.User struct including the username, password,
// selected language, and if the installation was completed or not.
func ReadUserConfig(configDirectory string) (userConfig models.User) {
for {
jsonFile, err := os.Open(configDirectory + "/data/config/user.json")
if err != nil {
log.Log.Error("Config file is not found " + configDirectory + "/data/config/user.json, trying again in 5s: " + err.Error())
time.Sleep(5 * time.Second)
} else {
log.Log.Info("Successfully Opened user.json")
byteValue, _ := ioutil.ReadAll(jsonFile)
err = json.Unmarshal(byteValue, &userConfig)
if err != nil {
log.Log.Error("JSON file not valid: " + err.Error())
} else {
jsonFile.Close()
break
}
time.Sleep(5 * time.Second)
}
jsonFile.Close()
}
return
}
func OpenConfig(configDirectory string, configuration *models.Configuration) {
// We are checking which deployment this is running, so we can load
// into the configuration as expected.
if os.Getenv("DEPLOYMENT") == "factory" || os.Getenv("MACHINERY_ENVIRONMENT") == "kubernetes" {
// Factory deployment means that configuration is stored in MongoDB
// Multiple agents have there configuration stored, and can benefit from
// the concept of a global concept.
// Write to mongodb
client := database.New()
db := client.Database(database.DatabaseName)
collection := db.Collection("configuration")
var globalConfig models.Config
res := collection.FindOne(context.Background(), bson.M{
"type": "global",
})
if res.Err() != nil {
log.Log.Error("Could not find global configuration, using default configuration.")
panic("Could not find global configuration, using default configuration.")
}
err := res.Decode(&globalConfig)
if err != nil {
log.Log.Error("Could not find global configuration, using default configuration.")
panic("Could not find global configuration, using default configuration.")
}
if globalConfig.Type != "global" {
log.Log.Error("Could not find global configuration, might missed the mongodb connection.")
panic("Could not find global configuration, might missed the mongodb connection.")
}
configuration.GlobalConfig = globalConfig
var customConfig models.Config
deploymentName := os.Getenv("DEPLOYMENT_NAME")
res = collection.FindOne(context.Background(), bson.M{
"type": "config",
"name": deploymentName,
})
if res.Err() != nil {
log.Log.Error("Could not find configuration for " + deploymentName + ", using global configuration.")
}
err = res.Decode(&customConfig)
if err != nil {
log.Log.Error("Could not find configuration for " + deploymentName + ", using global configuration.")
}
if customConfig.Type != "config" {
log.Log.Error("Could not find custom configuration, might missed the mongodb connection.")
panic("Could not find custom configuration, might missed the mongodb connection.")
}
configuration.CustomConfig = customConfig
// We will merge both configs in a single config file.
// Read again from database but this store overwrite the same object.
opts := conjungo.NewOptions()
opts.SetTypeMergeFunc(
reflect.TypeOf(""),
func(t, s reflect.Value, o *conjungo.Options) (reflect.Value, error) {
targetStr, _ := t.Interface().(string)
sourceStr, _ := s.Interface().(string)
finalStr := targetStr
if sourceStr != "" {
finalStr = sourceStr
}
return reflect.ValueOf(finalStr), nil
},
)
// Reset main configuration Config.
configuration.Config = models.Config{}
// Merge the global settings in the main config
conjungo.Merge(&configuration.Config, configuration.GlobalConfig, opts)
// Now we might override some settings with the custom config
conjungo.Merge(&configuration.Config, configuration.CustomConfig, opts)
// Merge Kerberos Vault settings
var kerberosvault models.KStorage
conjungo.Merge(&kerberosvault, configuration.GlobalConfig.KStorage, opts)
conjungo.Merge(&kerberosvault, configuration.CustomConfig.KStorage, opts)
configuration.Config.KStorage = &kerberosvault
// Merge Kerberos S3 settings
var s3 models.S3
conjungo.Merge(&s3, configuration.GlobalConfig.S3, opts)
conjungo.Merge(&s3, configuration.CustomConfig.S3, opts)
configuration.Config.S3 = &s3
// Merge Encryption settings
var encryption models.Encryption
conjungo.Merge(&encryption, configuration.GlobalConfig.Encryption, opts)
conjungo.Merge(&encryption, configuration.CustomConfig.Encryption, opts)
configuration.Config.Encryption = &encryption
// Merge timetable manually because it's an array
configuration.Config.Timetable = configuration.CustomConfig.Timetable
// Cleanup
opts = nil
} else if os.Getenv("DEPLOYMENT") == "" || os.Getenv("DEPLOYMENT") == "agent" {
// Local deployment means we do a stand-alone installation
// Configuration is stored into a json file, and there is only 1 agent.
// Open device config
for {
jsonFile, err := os.Open(configDirectory + "/data/config/config.json")
if err != nil {
log.Log.Error("Config file is not found " + configDirectory + "/data/config/config.json" + ", trying again in 5s.")
time.Sleep(5 * time.Second)
} else {
log.Log.Info("Successfully Opened config.json from " + configuration.Name)
byteValue, _ := ioutil.ReadAll(jsonFile)
err = json.Unmarshal(byteValue, &configuration.Config)
jsonFile.Close()
if err != nil {
log.Log.Error("JSON file not valid: " + err.Error())
} else {
err = json.Unmarshal(byteValue, &configuration.CustomConfig)
if err != nil {
log.Log.Error("JSON file not valid: " + err.Error())
} else {
break
}
}
time.Sleep(5 * time.Second)
}
jsonFile.Close()
}
}
return
}
// This function will override the configuration with environment variables.
func OverrideWithEnvironmentVariables(configuration *models.Configuration) {
environmentVariables := os.Environ()
for _, env := range environmentVariables {
if strings.Contains(env, "AGENT_") {
key := strings.Split(env, "=")[0]
value := os.Getenv(key)
switch key {
/* General configuration */
case "AGENT_KEY":
configuration.Config.Key = value
break
case "AGENT_NAME":
configuration.Config.FriendlyName = value
break
case "AGENT_TIMEZONE":
configuration.Config.Timezone = value
break
case "AGENT_OFFLINE":
configuration.Config.Offline = value
break
case "AGENT_AUTO_CLEAN":
configuration.Config.AutoClean = value
break
case "AGENT_AUTO_CLEAN_MAX_SIZE":
size, err := strconv.ParseInt(value, 10, 64)
if err == nil {
configuration.Config.MaxDirectorySize = size
}
break
/* Camera configuration */
case "AGENT_CAPTURE_IPCAMERA_RTSP":
configuration.Config.Capture.IPCamera.RTSP = value
break
case "AGENT_CAPTURE_IPCAMERA_SUB_RTSP":
configuration.Config.Capture.IPCamera.SubRTSP = value
break
/* ONVIF connnection settings */
case "AGENT_CAPTURE_IPCAMERA_ONVIF":
configuration.Config.Capture.IPCamera.ONVIF = value
break
case "AGENT_CAPTURE_IPCAMERA_ONVIF_XADDR":
configuration.Config.Capture.IPCamera.ONVIFXAddr = value
break
case "AGENT_CAPTURE_IPCAMERA_ONVIF_USERNAME":
configuration.Config.Capture.IPCamera.ONVIFUsername = value
break
case "AGENT_CAPTURE_IPCAMERA_ONVIF_PASSWORD":
configuration.Config.Capture.IPCamera.ONVIFPassword = value
break
/* Recording mode */
case "AGENT_CAPTURE_RECORDING":
configuration.Config.Capture.Recording = value
break
case "AGENT_CAPTURE_CONTINUOUS":
configuration.Config.Capture.Continuous = value
break
case "AGENT_CAPTURE_LIVEVIEW":
configuration.Config.Capture.Liveview = value
break
case "AGENT_CAPTURE_MOTION":
configuration.Config.Capture.Motion = value
break
case "AGENT_CAPTURE_SNAPSHOTS":
configuration.Config.Capture.Snapshots = value
break
case "AGENT_CAPTURE_PRERECORDING":
duration, err := strconv.ParseInt(value, 10, 64)
if err == nil {
configuration.Config.Capture.PreRecording = duration
}
break
case "AGENT_CAPTURE_POSTRECORDING":
duration, err := strconv.ParseInt(value, 10, 64)
if err == nil {
configuration.Config.Capture.PostRecording = duration
}
break
case "AGENT_CAPTURE_MAXLENGTH":
duration, err := strconv.ParseInt(value, 10, 64)
if err == nil {
configuration.Config.Capture.MaxLengthRecording = duration
}
break
case "AGENT_CAPTURE_PIXEL_CHANGE":
count, err := strconv.Atoi(value)
if err == nil {
configuration.Config.Capture.PixelChangeThreshold = count
}
break
case "AGENT_CAPTURE_FRAGMENTED":
configuration.Config.Capture.Fragmented = value
break
case "AGENT_CAPTURE_FRAGMENTED_DURATION":
duration, err := strconv.ParseInt(value, 10, 64)
if err == nil {
configuration.Config.Capture.FragmentedDuration = duration
}
break
/* Conditions */
case "AGENT_TIME":
configuration.Config.Time = value
break
case "AGENT_TIMETABLE":
var timetable []*models.Timetable
// Convert value to timetable array with (start1, end1, start2, end2)
// Where days are limited by ; and time by ,
// su;mo;tu;we;th;fr;sa
// 0,43199,43200,86400;0,43199,43200,86400
// Split days
daysString := strings.Split(value, ";")
for _, dayString := range daysString {
// Split time
timeString := strings.Split(dayString, ",")
if len(timeString) == 4 {
start1, err := strconv.ParseInt(timeString[0], 10, 64)
if err != nil {
continue
}
end1, err := strconv.ParseInt(timeString[1], 10, 64)
if err != nil {
continue
}
start2, err := strconv.ParseInt(timeString[2], 10, 64)
if err != nil {
continue
}
end2, err := strconv.ParseInt(timeString[3], 10, 64)
if err != nil {
continue
}
timetable = append(timetable, &models.Timetable{
Start1: int(start1),
End1: int(end1),
Start2: int(start2),
End2: int(end2),
})
}
}
configuration.Config.Timetable = timetable
break
case "AGENT_REGION_POLYGON":
var coordinates []models.Coordinate
// Convert value to coordinates array
// 0,0;1,1;2,2;3,3
coordinatesString := strings.Split(value, ";")
for _, coordinateString := range coordinatesString {
coordinate := strings.Split(coordinateString, ",")
if len(coordinate) == 2 {
x, err := strconv.ParseFloat(coordinate[0], 64)
if err != nil {
continue
}
y, err := strconv.ParseFloat(coordinate[1], 64)
if err != nil {
continue
}
coordinates = append(coordinates, models.Coordinate{
X: x,
Y: y,
})
}
}
configuration.Config.Region.Polygon = []models.Polygon{
{
Coordinates: coordinates,
ID: "0",
},
}
break
/* MQTT settings for bi-directional communication */
case "AGENT_MQTT_URI":
configuration.Config.MQTTURI = value
break
case "AGENT_MQTT_USERNAME":
configuration.Config.MQTTUsername = value
break
case "AGENT_MQTT_PASSWORD":
configuration.Config.MQTTPassword = value
break
/* WebRTC settings for live-streaming (remote) */
case "AGENT_STUN_URI":
configuration.Config.STUNURI = value
break
case "AGENT_TURN_URI":
configuration.Config.TURNURI = value
break
case "AGENT_TURN_USERNAME":
configuration.Config.TURNUsername = value
break
case "AGENT_TURN_PASSWORD":
configuration.Config.TURNPassword = value
break
/* Cloud settings for persisting recordings */
case "AGENT_CLOUD":
configuration.Config.Cloud = value
break
case "AGENT_REMOVE_AFTER_UPLOAD":
configuration.Config.RemoveAfterUpload = value
break
/* When connected and storing in Kerberos Hub (SAAS) */
case "AGENT_HUB_ENCRYPTION":
configuration.Config.HubEncryption = value
break
case "AGENT_HUB_URI":
configuration.Config.HubURI = value
break
case "AGENT_HUB_KEY":
configuration.Config.HubKey = value
break
case "AGENT_HUB_PRIVATE_KEY":
configuration.Config.HubPrivateKey = value
break
case "AGENT_HUB_SITE":
configuration.Config.HubSite = value
break
case "AGENT_HUB_REGION":
configuration.Config.S3.Region = value
break
/* When storing in a Kerberos Vault */
case "AGENT_KERBEROSVAULT_URI":
configuration.Config.KStorage.URI = value
break
case "AGENT_KERBEROSVAULT_ACCESS_KEY":
configuration.Config.KStorage.AccessKey = value
break
case "AGENT_KERBEROSVAULT_SECRET_KEY":
configuration.Config.KStorage.SecretAccessKey = value
break
case "AGENT_KERBEROSVAULT_PROVIDER":
configuration.Config.KStorage.Provider = value
break
case "AGENT_KERBEROSVAULT_DIRECTORY":
configuration.Config.KStorage.Directory = value
break
/* When storing in dropbox */
case "AGENT_DROPBOX_ACCESS_TOKEN":
configuration.Config.Dropbox.AccessToken = value
break
case "AGENT_DROPBOX_DIRECTORY":
configuration.Config.Dropbox.Directory = value
break
/* When encryption is enabled */
case "AGENT_ENCRYPTION":
configuration.Config.Encryption.Enabled = value
break
case "AGENT_ENCRYPTION_RECORDINGS":
configuration.Config.Encryption.Recordings = value
break
case "AGENT_ENCRYPTION_FINGERPRINT":
configuration.Config.Encryption.Fingerprint = value
break
case "AGENT_ENCRYPTION_PRIVATE_KEY":
encryptionPrivateKey := strings.ReplaceAll(value, "\\n", "\n")
configuration.Config.Encryption.PrivateKey = encryptionPrivateKey
break
case "AGENT_ENCRYPTION_SYMMETRIC_KEY":
configuration.Config.Encryption.SymmetricKey = value
break
}
}
}
}
func SaveConfig(configDirectory string, config models.Config, configuration *models.Configuration, communication *models.Communication) error {
if !communication.IsConfiguring.IsSet() {
communication.IsConfiguring.Set()
err := StoreConfig(configDirectory, config)
if err != nil {
communication.IsConfiguring.UnSet()
return err
}
if communication.CameraConnected {
select {
case communication.HandleBootstrap <- "restart":
default:
}
}
communication.IsConfiguring.UnSet()
return nil
} else {
return errors.New("☄ Already reconfiguring")
}
}
func StoreConfig(configDirectory string, config models.Config) error {
// Encryption key can be set wrong.
if config.Encryption != nil {
encryptionPrivateKey := config.Encryption.PrivateKey
// Replace \\n by \n
encryptionPrivateKey = strings.ReplaceAll(encryptionPrivateKey, "\\n", "\n")
config.Encryption.PrivateKey = encryptionPrivateKey
}
// Save into database
if os.Getenv("DEPLOYMENT") == "factory" || os.Getenv("MACHINERY_ENVIRONMENT") == "kubernetes" {
// Write to mongodb
client := database.New()
db := client.Database(database.DatabaseName)
collection := db.Collection("configuration")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := collection.UpdateOne(ctx, bson.M{
"type": "config",
"name": os.Getenv("DEPLOYMENT_NAME"),
}, bson.M{"$set": config})
return err
// Save into file
} else if os.Getenv("DEPLOYMENT") == "" || os.Getenv("DEPLOYMENT") == "agent" {
res, _ := json.MarshalIndent(config, "", "\t")
err := ioutil.WriteFile(configDirectory+"/data/config/config.json", res, 0644)
return err
}
return errors.New("Not able to update config")
}

View File

@@ -1,45 +1,55 @@
package database
import (
"context"
"fmt"
"os"
"strings"
"sync"
"time"
"gopkg.in/mgo.v2"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type DB struct {
Session *mgo.Session
Client *mongo.Client
}
var _init_ctx sync.Once
var _instance *DB
var DatabaseName = "KerberosFactory"
func New() *mgo.Session {
func New() *mongo.Client {
host := os.Getenv("MONGODB_HOST")
database := os.Getenv("MONGODB_DATABASE_CREDENTIALS")
databaseCredentials := os.Getenv("MONGODB_DATABASE_CREDENTIALS")
replicaset := os.Getenv("MONGODB_REPLICASET")
username := os.Getenv("MONGODB_USERNAME")
password := os.Getenv("MONGODB_PASSWORD")
authentication := "SCRAM-SHA-256"
_init_ctx.Do(func() {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_instance = new(DB)
mongoDBDialInfo := &mgo.DialInfo{
Addrs: strings.Split(host, ","),
Timeout: 3 * time.Second,
Database: database,
Username: username,
Password: password,
mongodbURI := fmt.Sprintf("mongodb://%s:%s@%s", username, password, host)
if replicaset != "" {
mongodbURI = fmt.Sprintf("%s/?replicaSet=%s", mongodbURI, replicaset)
}
session, err := mgo.DialWithInfo(mongoDBDialInfo)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(mongodbURI).SetAuth(options.Credential{
AuthMechanism: authentication,
AuthSource: databaseCredentials,
Username: username,
Password: password,
}))
if err != nil {
fmt.Printf("Error en mongo: %+v\n", err)
fmt.Printf("Error setting up mongodb connection: %+v\n", err)
os.Exit(1)
}
_instance.Session = session
_instance.Client = client
})
return _instance.Session
return _instance.Client
}

View File

@@ -0,0 +1,126 @@
package encryption
import (
"bytes"
"crypto"
"crypto/aes"
"crypto/cipher"
"crypto/md5"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"errors"
"hash"
)
func DecryptWithPrivateKey(ciphertext string, privateKey *rsa.PrivateKey) ([]byte, error) {
cipheredValue, _ := base64.StdEncoding.DecodeString(ciphertext)
out, err := rsa.DecryptPKCS1v15(nil, privateKey, cipheredValue)
return out, err
}
func SignWithPrivateKey(data []byte, privateKey *rsa.PrivateKey) ([]byte, error) {
hashed := sha256.Sum256(data)
signature, err := rsa.SignPKCS1v15(nil, privateKey, crypto.SHA256, hashed[:])
return signature, err
}
func AesEncrypt(content []byte, password string) ([]byte, error) {
salt := make([]byte, 8)
_, err := rand.Read(salt)
if err != nil {
return nil, err
}
key, iv, err := DefaultEvpKDF([]byte(password), salt)
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
mode := cipher.NewCBCEncrypter(block, iv)
cipherBytes := PKCS5Padding(content, aes.BlockSize)
mode.CryptBlocks(cipherBytes, cipherBytes)
cipherText := make([]byte, 16+len(cipherBytes))
copy(cipherText[:8], []byte("Salted__"))
copy(cipherText[8:16], salt)
copy(cipherText[16:], cipherBytes)
return cipherText, nil
}
func AesDecrypt(cipherText []byte, password string) ([]byte, error) {
if string(cipherText[:8]) != "Salted__" {
return nil, errors.New("invalid crypto js aes encryption")
}
salt := cipherText[8:16]
cipherBytes := cipherText[16:]
key, iv, err := DefaultEvpKDF([]byte(password), salt)
if err != nil {
return nil, err
}
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
mode := cipher.NewCBCDecrypter(block, iv)
mode.CryptBlocks(cipherBytes, cipherBytes)
result := PKCS5UnPadding(cipherBytes)
return result, nil
}
func EvpKDF(password []byte, salt []byte, keySize int, iterations int, hashAlgorithm string) ([]byte, error) {
var block []byte
var hasher hash.Hash
derivedKeyBytes := make([]byte, 0)
switch hashAlgorithm {
case "md5":
hasher = md5.New()
default:
return []byte{}, errors.New("not implement hasher algorithm")
}
for len(derivedKeyBytes) < keySize*4 {
if len(block) > 0 {
hasher.Write(block)
}
hasher.Write(password)
hasher.Write(salt)
block = hasher.Sum([]byte{})
hasher.Reset()
for i := 1; i < iterations; i++ {
hasher.Write(block)
block = hasher.Sum([]byte{})
hasher.Reset()
}
derivedKeyBytes = append(derivedKeyBytes, block...)
}
return derivedKeyBytes[:keySize*4], nil
}
func DefaultEvpKDF(password []byte, salt []byte) (key []byte, iv []byte, err error) {
keySize := 256 / 32
ivSize := 128 / 32
derivedKeyBytes, err := EvpKDF(password, salt, keySize+ivSize, 1, "md5")
if err != nil {
return []byte{}, []byte{}, err
}
return derivedKeyBytes[:keySize*4], derivedKeyBytes[keySize*4:], nil
}
func PKCS5UnPadding(src []byte) []byte {
length := len(src)
unpadding := int(src[length-1])
return src[:(length - unpadding)]
}
func PKCS5Padding(src []byte, blockSize int) []byte {
padding := blockSize - len(src)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(src, padtext...)
}

View File

@@ -2,6 +2,7 @@ package log
import (
"os"
"time"
"github.com/op/go-logging"
"github.com/sirupsen/logrus"
@@ -11,7 +12,6 @@ import (
// The logging library being used everywhere.
var Log = Logging{
Logger: "logrus",
Level: "debug",
}
// -----------------
@@ -20,7 +20,7 @@ var Log = Logging{
var gologging = logging.MustGetLogger("gologger")
func ConfigureGoLogging() {
func ConfigureGoLogging(configDirectory string, timezone *time.Location) {
// Logging
var format = logging.MustStringFormatter(
`%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}`,
@@ -31,7 +31,7 @@ func ConfigureGoLogging() {
stdBackend := logging.NewLogBackend(os.Stderr, "", 0)
stdBackendLeveled := logging.NewBackendFormatter(stdBackend, format)
fileBackend := logging.NewLogBackend(&lumberjack.Logger{
Filename: "./data/log/machinery.txt",
Filename: configDirectory + "/data/log/machinery.txt",
MaxSize: 2, // megabytes
Compress: true, // disabled by default
}, "", 0)
@@ -44,38 +44,67 @@ func ConfigureGoLogging() {
// This a logrus
// -> github.com/sirupsen/logrus
func ConfigureLogrus() {
// Log as JSON instead of the default ASCII formatter.
logrus.SetFormatter(&logrus.JSONFormatter{})
func ConfigureLogrus(level string, output string, timezone *time.Location) {
if output == "json" {
// Log as JSON instead of the default ASCII formatter.
logrus.SetFormatter(LocalTimeZoneFormatter{
Timezone: timezone,
Formatter: &logrus.JSONFormatter{},
})
} else if output == "text" {
// Log as text with colors.
formatter := logrus.TextFormatter{
ForceColors: true,
FullTimestamp: true,
}
logrus.SetFormatter(LocalTimeZoneFormatter{
Timezone: timezone,
Formatter: &formatter,
})
}
// Use local timezone for providing datetime in logs!
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
logrus.SetOutput(os.Stdout)
// Only log the warning severity or above.
logrus.SetLevel(logrus.InfoLevel)
logLevel := logrus.InfoLevel
if level == "error" {
logLevel = logrus.ErrorLevel
} else if level == "debug" {
logLevel = logrus.DebugLevel
logrus.SetReportCaller(true)
} else if level == "fatal" {
logLevel = logrus.FatalLevel
} else if level == "warning" {
logLevel = logrus.WarnLevel
} // Add this line for logging filename and line number!
logrus.SetLevel(logLevel)
}
func NewLogger(logger string, level string) *Logging {
loggy := Logging{
Logger: logger,
Level: level,
}
loggy.Init()
return &loggy
type LocalTimeZoneFormatter struct {
Timezone *time.Location
Formatter logrus.Formatter
}
func (u LocalTimeZoneFormatter) Format(e *logrus.Entry) ([]byte, error) {
e.Time = e.Time.In(u.Timezone)
return u.Formatter.Format(e)
}
type Logging struct {
Logger string
Level string
}
func (self *Logging) Init() {
func (self *Logging) Init(level string, logoutput string, configDirectory string, timezone *time.Location) {
switch self.Logger {
case "go-logging":
ConfigureGoLogging()
ConfigureGoLogging(configDirectory, timezone)
case "logrus":
ConfigureLogrus()
ConfigureLogrus(level, logoutput, timezone)
default:
}
}

View File

@@ -1,5 +0,0 @@
package models
type APIResponse struct {
Data interface{} `json:"data" bson:"data"`
}

View File

@@ -0,0 +1,6 @@
package models
type AudioDataPartial struct {
Timestamp int64 `json:"timestamp" bson:"timestamp"`
Data []int16 `json:"data" bson:"data"`
}

View File

@@ -0,0 +1,15 @@
package models
import "github.com/kerberos-io/joy4/av"
type Camera struct {
Width int
Height int
Num int
Denum int
Framerate float64
RTSP string
SubRTSP string
Codec av.CodecType
Initialized bool
}

View File

@@ -1,24 +1,41 @@
package models
import (
"context"
"sync/atomic"
"github.com/kerberos-io/agent/machinery/src/packets"
"github.com/tevino/abool"
)
// The communication struct that is managing
// all the communication between the different goroutines.
type Communication struct {
Context *context.Context
CancelContext *context.CancelFunc
PackageCounter *atomic.Value
LastPacketTimer *atomic.Value
PackageCounterSub *atomic.Value
LastPacketTimerSub *atomic.Value
CloudTimestamp *atomic.Value
HandleBootstrap chan string
HandleStream chan string
HandleMotion chan int64
HandleSubStream chan string
HandleMotion chan MotionDataPartial
HandleAudio chan AudioDataPartial
HandleUpload chan string
HandleHeartBeat chan string
HandleLiveSD chan int64
HandleLiveHDKeepalive chan string
HandleLiveHDHandshake chan SDPPayload
HandleLiveHDHandshake chan RequestHDStreamPayload
HandleLiveHDPeers chan string
HandleONVIF chan OnvifAction
IsConfiguring *abool.AtomicBool
Queue *packets.Queue
SubQueue *packets.Queue
Image string
CameraConnected bool
MainStreamConnected bool
SubStreamConnected bool
HasBackChannel bool
}

View File

@@ -9,83 +9,99 @@ type Configuration struct {
GlobalConfig Config
}
//Config is the highlevel struct which contains all the configuration of
//your Kerberos Open Source instance.
// Config is the highlevel struct which contains all the configuration of
// your Kerberos Open Source instance.
type Config struct {
Type string `json:"type" binding:"required"`
Key string `json:"key"`
Name string `json:"name"`
Time string `json:"time,omitempty" bson:"time"`
Timezone string `json:"timezone,omitempty" bson:"timezone,omitempty"`
Capture Capture `json:"capture"`
Timetable []*Timetable `json:"timetable"`
Region *Region `json:"region"`
Cloud string `json:"cloud,omitempty" bson:"cloud,omitempty"`
S3 *S3 `json:"s3,omitempty" bson:"s3,omitempty"`
KStorage *KStorage `json:"kstorage,omitempty" bson:"kstorage,omitempty"`
MQTTURI string `json:"mqtturi,omitempty" bson:"mqtturi,omitempty"`
MQTTUsername string `json:"mqtt_username,omitempty" bson:"mqtt_username"`
MQTTPassword string `json:"mqtt_password,omitempty" bson:"mqtt_password"`
STUNURI string `json:"stunuri,omitempty" bson:"stunuri"`
TURNURI string `json:"turnuri,omitempty" bson:"turnuri"`
TURNUsername string `json:"turn_username,omitempty" bson:"turn_username"`
TURNPassword string `json:"turn_password,omitempty" bson:"turn_password"`
HeartbeatURI string `json:"heartbeaturi,omitempty" bson:"heartbeaturi"` /*obsolete*/
HubURI string `json:"hub_uri,omitempty" bson:"hub_uri"`
HubKey string `json:"hub_key,omitempty" bson:"hub_key"`
HubPrivateKey string `json:"hub_private_key,omitempty" bson:"hub_private_key"`
HubSite string `json:"hub_site,omitempty" bson:"hub_site"`
ConditionURI string `json:"condition_uri,omitempty" bson:"condition_uri"`
Type string `json:"type"`
Key string `json:"key"`
Name string `json:"name"`
FriendlyName string `json:"friendly_name"`
Time string `json:"time" bson:"time"`
Offline string `json:"offline"`
AutoClean string `json:"auto_clean"`
RemoveAfterUpload string `json:"remove_after_upload"`
MaxDirectorySize int64 `json:"max_directory_size"`
Timezone string `json:"timezone"`
Capture Capture `json:"capture"`
Timetable []*Timetable `json:"timetable"`
Region *Region `json:"region"`
Cloud string `json:"cloud" bson:"cloud"`
S3 *S3 `json:"s3,omitempty" bson:"s3,omitempty"`
KStorage *KStorage `json:"kstorage,omitempty" bson:"kstorage,omitempty"`
Dropbox *Dropbox `json:"dropbox,omitempty" bson:"dropbox,omitempty"`
MQTTURI string `json:"mqtturi" bson:"mqtturi,omitempty"`
MQTTUsername string `json:"mqtt_username" bson:"mqtt_username"`
MQTTPassword string `json:"mqtt_password" bson:"mqtt_password"`
STUNURI string `json:"stunuri" bson:"stunuri"`
TURNURI string `json:"turnuri" bson:"turnuri"`
TURNUsername string `json:"turn_username" bson:"turn_username"`
TURNPassword string `json:"turn_password" bson:"turn_password"`
HeartbeatURI string `json:"heartbeaturi" bson:"heartbeaturi"` /*obsolete*/
HubEncryption string `json:"hub_encryption" bson:"hub_encryption"`
HubURI string `json:"hub_uri" bson:"hub_uri"`
HubKey string `json:"hub_key" bson:"hub_key"`
HubPrivateKey string `json:"hub_private_key" bson:"hub_private_key"`
HubSite string `json:"hub_site" bson:"hub_site"`
ConditionURI string `json:"condition_uri" bson:"condition_uri"`
Encryption *Encryption `json:"encryption,omitempty" bson:"encryption,omitempty"`
}
//Capture defines which camera type (Id) you are using (IP, USB or Raspberry Pi camera),
//and also contains recording specific parameters.
// Capture defines which camera type (Id) you are using (IP, USB or Raspberry Pi camera),
// and also contains recording specific parameters.
type Capture struct {
Name string `json:"name"`
IPCamera IPCamera `json:"ipcamera"`
USBCamera USBCamera `json:"usbcamera"`
RaspiCamera RaspiCamera `json:"raspicamera"`
Recording string `json:"recording,omitempty"`
Snapshots string `json:"snapshots,omitempty"`
Motion string `json:"motion,omitempty"`
Liveview string `json:"liveview,omitempty"`
Continuous string `json:"continuous,omitempty"`
PostRecording int64 `json:"postrecording"`
PreRecording int `json:"prerecording"`
PreRecording int64 `json:"prerecording"`
MaxLengthRecording int64 `json:"maxlengthrecording"`
TranscodingWebRTC string `json:"transcodingwebrtc"`
TranscodingResolution int64 `json:"transcodingresolution"`
ForwardWebRTC string `json:"forwardwebrtc"`
Fragmented string `json:"fragmented,omitempty" bson:"fragmented,omitempty"`
FragmentedDuration int64 `json:"fragmentedduration,omitempty" bson:"fragmentedduration,omitempty"`
PixelChangeThreshold int `json:"pixelChangeThreshold,omitempty"`
}
//IPCamera configuration, such as the RTSP url of the IPCamera and the FPS.
//Also includes ONVIF integration
// IPCamera configuration, such as the RTSP url of the IPCamera and the FPS.
// Also includes ONVIF integration
type IPCamera struct {
RTSP string `json:"rtsp"`
Width int `json:"width"`
Height int `json:"height"`
FPS string `json:"fps"`
ONVIF bool `json:"onvif,omitempty" bson:"onvif"`
ONVIFXAddr string `json:"onvif_xaddr,omitempty" bson:"onvif_xaddr"`
ONVIFUsername string `json:"onvif_username,omitempty" bson:"onvif_username"`
ONVIFPassword string `json:"onvif_password,omitempty" bson:"onvif_password"`
RTSP string `json:"rtsp"`
SubRTSP string `json:"sub_rtsp"`
ONVIF string `json:"onvif,omitempty" bson:"onvif"`
ONVIFXAddr string `json:"onvif_xaddr" bson:"onvif_xaddr"`
ONVIFUsername string `json:"onvif_username" bson:"onvif_username"`
ONVIFPassword string `json:"onvif_password" bson:"onvif_password"`
}
//USBCamera configuration, such as the device path (/dev/video*)
// USBCamera configuration, such as the device path (/dev/video*)
type USBCamera struct {
Device string `json:"device"`
}
//RaspiCamera configuration, such as the device path (/dev/video*)
// RaspiCamera configuration, such as the device path (/dev/video*)
type RaspiCamera struct {
Device string `json:"device"`
}
//Region specifies the type (Id) of Region Of Interest (ROI), you
//would like to use.
// Region specifies the type (Id) of Region Of Interest (ROI), you
// would like to use.
type Region struct {
Name string `json:"name"`
Rectangle Rectangle `json:"rectangle"`
Polygon []Polygon `json:"polygon"`
}
//Rectangle is defined by a starting point, left top (x1,y1) and end point (x2,y2).
// Rectangle is defined by a starting point, left top (x1,y1) and end point (x2,y2).
type Rectangle struct {
X1 int `json:"x1"`
Y1 int `json:"y1"`
@@ -93,22 +109,22 @@ type Rectangle struct {
Y2 int `json:"y2"`
}
//Polygon is a sequence of coordinates (x,y). The ID specifies an unique identifier,
//as multiple polygons can be defined.
// Polygon is a sequence of coordinates (x,y). The ID specifies an unique identifier,
// as multiple polygons can be defined.
type Polygon struct {
ID string `json:"id"`
Coordinates []Coordinate `json:"coordinates"`
}
//Coordinate belongs to a Polygon.
// Coordinate belongs to a Polygon.
type Coordinate struct {
X float64 `json:"x"`
Y float64 `json:"y"`
}
//Timetable allows you to set a Time Of Intterest (TOI), which limits recording or
//detection to a predefined time interval. Two tracks can be set, which allows you
//to give some flexibility.
// Timetable allows you to set a Time Of Intterest (TOI), which limits recording or
// detection to a predefined time interval. Two tracks can be set, which allows you
// to give some flexibility.
type Timetable struct {
Start1 int `json:"start1"`
End1 int `json:"end1"`
@@ -116,7 +132,7 @@ type Timetable struct {
End2 int `json:"end2"`
}
//S3 integration
// S3 integration
type S3 struct {
Proxy string `json:"proxy,omitempty" bson:"proxy,omitempty"`
ProxyURI string `json:"proxyuri,omitempty" bson:"proxyuri,omitempty"`
@@ -127,13 +143,28 @@ type S3 struct {
Secretkey string `json:"secretkey,omitempty" bson:"secretkey,omitempty"`
}
//KStorage contains the credentials of the Kerberos Storage/Kerberos Cloud instance.
//By defining KStorage you can make your recordings available in the cloud, at a centrel place.
// KStorage contains the credentials of the Kerberos Storage/Kerberos Cloud instance.
// By defining KStorage you can make your recordings available in the cloud, at a centrel place.
type KStorage struct {
URI string `json:"uri,omitempty" bson:"uri,omitempty"`
CloudKey string `json:"cloud_key,omitempty" bson:"cloud_key,omitempty"`
CloudKey string `json:"cloud_key,omitempty" bson:"cloud_key,omitempty"` /* old way, remove this */
AccessKey string `json:"access_key,omitempty" bson:"access_key,omitempty"`
SecretAccessKey string `json:"secret_access_key,omitempty" bson:"secret_access_key,omitempty"`
Provider string `json:"provider,omitempty" bson:"provider,omitempty"`
Directory string `json:"directory,omitempty" bson:"directory,omitempty"`
}
// Dropbox integration
type Dropbox struct {
AccessToken string `json:"access_token,omitempty" bson:"access_token,omitempty"`
Directory string `json:"directory,omitempty" bson:"directory,omitempty"`
}
// Encryption
type Encryption struct {
Enabled string `json:"enabled" bson:"enabled"`
Recordings string `json:"recordings" bson:"recordings"`
Fingerprint string `json:"fingerprint" bson:"fingerprint"`
PrivateKey string `json:"private_key" bson:"private_key"`
SymmetricKey string `json:"symmetric_key" bson:"symmetric_key"`
}

View File

@@ -0,0 +1,190 @@
package models
import (
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"io"
"strings"
"time"
"github.com/gofrs/uuid"
"github.com/kerberos-io/agent/machinery/src/encryption"
"github.com/kerberos-io/agent/machinery/src/log"
)
func PackageMQTTMessage(configuration *Configuration, msg Message) ([]byte, error) {
// Create a Version 4 UUID.
u2, err := uuid.NewV4()
if err != nil {
log.Log.Error("failed to generate UUID: " + err.Error())
}
// We'll generate an unique id, and encrypt / decrypt it using the private key if available.
msg.Mid = u2.String()
msg.DeviceId = msg.Payload.DeviceId
msg.Timestamp = time.Now().Unix()
// We'll hide the message (by default in latest version)
// We will encrypt using the Kerberos Hub private key if set.
/*msg.Hidden = false
if configuration.Config.HubPrivateKey != "" {
msg.Hidden = true
pload := msg.Payload
// Pload to base64
data, err := json.Marshal(pload)
if err != nil {
msg.Hidden = false
} else {
k := configuration.Config.Encryption.SymmetricKey
encryptedValue, err := encryption.AesEncrypt(data, k)
if err == nil {
data := base64.StdEncoding.EncodeToString(encryptedValue)
msg.Payload.HiddenValue = data
msg.Payload.Value = make(map[string]interface{})
}
}
}*/
// Next to hiding the message, we can also encrypt it using your own private key.
// Which is not stored in a remote environment (hence you are the only one owning it).
msg.Encrypted = false
if configuration.Config.Encryption != nil && configuration.Config.Encryption.Enabled == "true" {
msg.Encrypted = true
}
msg.PublicKey = ""
msg.Fingerprint = ""
if msg.Encrypted {
pload := msg.Payload
// Pload to base64
data, err := json.Marshal(pload)
if err != nil {
log.Log.Error("models.mqtt.PackageMQTTMessage(): failed to marshal payload: " + err.Error())
}
// Encrypt the value
privateKey := configuration.Config.Encryption.PrivateKey
r := strings.NewReader(privateKey)
pemBytes, _ := io.ReadAll(r)
block, _ := pem.Decode(pemBytes)
if block == nil {
log.Log.Error("models.mqtt.PackageMQTTMessage(): error decoding PEM block containing private key")
} else {
// Parse private key
b := block.Bytes
key, err := x509.ParsePKCS8PrivateKey(b)
if err != nil {
log.Log.Error("models.mqtt.PackageMQTTMessage(): error parsing private key: " + err.Error())
}
// Conver key to *rsa.PrivateKey
rsaKey, _ := key.(*rsa.PrivateKey)
// Create a 16bit key random
k := configuration.Config.Encryption.SymmetricKey
encryptedValue, err := encryption.AesEncrypt(data, k)
if err == nil {
data := base64.StdEncoding.EncodeToString(encryptedValue)
// Sign the encrypted value
signature, err := encryption.SignWithPrivateKey([]byte(data), rsaKey)
if err == nil {
base64Signature := base64.StdEncoding.EncodeToString(signature)
msg.Payload.EncryptedValue = data
msg.Payload.Signature = base64Signature
msg.Payload.Value = make(map[string]interface{})
}
}
}
}
payload, err := json.Marshal(msg)
return payload, err
}
// The message structure which is used to send over
// and receive messages from the MQTT broker
type Message struct {
Mid string `json:"mid"`
DeviceId string `json:"device_id"`
Timestamp int64 `json:"timestamp"`
Encrypted bool `json:"encrypted"`
Hidden bool `json:"hidden"`
PublicKey string `json:"public_key"`
Fingerprint string `json:"fingerprint"`
Payload Payload `json:"payload"`
}
// The payload structure which is used to send over
// and receive messages from the MQTT broker
type Payload struct {
Action string `json:"action"`
DeviceId string `json:"device_id"`
Signature string `json:"signature"`
EncryptedValue string `json:"encrypted_value"`
HiddenValue string `json:"hidden_value"`
Value map[string]interface{} `json:"value"`
}
// We received a audio input
type AudioPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp of the recording request.
Data []int16 `json:"data"`
}
// We received a recording request, we'll send it to the motion handler.
type RecordPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp of the recording request.
}
// We received a preset position request, we'll request it through onvif and send it back.
type PTZPositionPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp of the preset request.
}
// We received a request config request, we'll fetch the current config and send it back.
type RequestConfigPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp of the preset request.
}
// We received a update config request, we'll update the current config and send a confirmation back.
type UpdateConfigPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp of the preset request.
Config Config `json:"config"`
}
// We received a request SD stream request
type RequestSDStreamPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp
}
// We received a request HD stream request
type RequestHDStreamPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp
HubKey string `json:"hub_key"` // hub key
SessionID string `json:"session_id"` // session id
SessionDescription string `json:"session_description"` // session description
}
// We received a receive HD candidates request
type ReceiveHDCandidatesPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp
SessionID string `json:"session_id"` // session id
Candidate string `json:"candidate"` // candidate
}
type NavigatePTZPayload struct {
Timestamp int64 `json:"timestamp"` // timestamp
DeviceId string `json:"device_id"` // device id
Action string `json:"action"` // action
}
type TriggerRelay struct {
Timestamp int64 `json:"timestamp"` // timestamp
DeviceId string `json:"device_id"` // device id
Token string `json:"token"` // token
}

View File

@@ -0,0 +1,18 @@
package models
type Media struct {
Key string `json:"key"`
Path string `json:"path"`
Day string `json:"day"`
ShortDay string `json:"short_day"`
Time string `json:"time"`
Timestamp string `json:"timestamp"`
CameraName string `json:"camera_name"`
CameraKey string `json:"camera_key"`
}
type EventFilter struct {
TimestampOffsetStart int64 `json:"timestamp_offset_start"`
TimestampOffsetEnd int64 `json:"timestamp_offset_end"`
NumberOfElements int `json:"number_of_elements"`
}

View File

@@ -6,9 +6,19 @@ type OnvifAction struct {
}
type OnvifActionPTZ struct {
Left int `json:"left" bson:"left"`
Right int `json:"right" bson:"right"`
Up int `json:"up" bson:"up"`
Down int `json:"down" bson:"down"`
Center int `json:"center" bson:"center"`
Left int `json:"left" bson:"left"`
Right int `json:"right" bson:"right"`
Up int `json:"up" bson:"up"`
Down int `json:"down" bson:"down"`
Center int `json:"center" bson:"center"`
Zoom float64 `json:"zoom" bson:"zoom"`
X float64 `json:"x" bson:"x"`
Y float64 `json:"y" bson:"y"`
Z float64 `json:"z" bson:"z"`
Preset string `json:"preset" bson:"preset"`
}
type OnvifActionPreset struct {
Name string `json:"name" bson:"name"`
Token string `json:"token" bson:"token"`
}

View File

@@ -0,0 +1,17 @@
package models
type System struct {
CPUId string `json:"cpu_idle" bson:"cpu_idle"`
Hostname string `json:"hostname" bson:"hostname"`
Version string `json:"version" bson:"version"`
Release string `json:"release" bson:"release"`
BootTime uint64 `json:"boot_time" bson:"boot_time"`
KernelVersion string `json:"kernel_version" bson:"kernel_version"`
MACs []string `json:"macs" bson:"macs"`
IPs []string `json:"ips" bson:"ips"`
Architecture string `json:"architecture" bson:"architecture"`
UsedMemory uint64 `json:"used_memory" bson:"used_memory"`
TotalMemory uint64 `json:"total_memory" bson:"total_memory"`
FreeMemory uint64 `json:"free_memory" bson:"free_memory"`
ProcessUsedMemory uint64 `json:"process_used_memory" bson:"process_used_memory"`
}

View File

@@ -7,3 +7,16 @@ type User struct {
Role string `json:"role" bson:"role"`
Language string `json:"language" bson:"language"`
}
type Authentication struct {
Username string `json:"username" bson:"username"`
Password string `json:"password" bson:"password"`
}
type Authorization struct {
Code int `json:"code" bson:"code"`
Token string `json:"token" bson:"token"`
Expire string `json:"expire" bson:"expire"`
Username string `json:"username" bson:"username"`
Role string `json:"role" bson:"role"`
}

View File

@@ -0,0 +1,36 @@
package models
type APIResponse struct {
Data interface{} `json:"data" bson:"data"`
Message interface{} `json:"message" bson:"message"`
PTZFunctions interface{} `json:"ptz_functions" bson:"ptz_functions"`
CanZoom bool `json:"can_zoom" bson:"can_zoom"`
CanPanTilt bool `json:"can_pan_tilt" bson:"can_pan_tilt"`
}
type OnvifCredentials struct {
ONVIFXAddr string `json:"onvif_xaddr,omitempty" bson:"onvif_xaddr"`
ONVIFUsername string `json:"onvif_username,omitempty" bson:"onvif_username"`
ONVIFPassword string `json:"onvif_password,omitempty" bson:"onvif_password"`
}
type CameraStreams struct {
RTSP string `json:"rtsp"`
SubRTSP string `json:"sub_rtsp"`
}
type OnvifPanTilt struct {
OnvifCredentials OnvifCredentials `json:"onvif_credentials,omitempty" bson:"onvif_credentials"`
Pan float64 `json:"pan,omitempty" bson:"pan"`
Tilt float64 `json:"tilt,omitempty" bson:"tilt"`
}
type OnvifZoom struct {
OnvifCredentials OnvifCredentials `json:"onvif_credentials,omitempty" bson:"onvif_credentials"`
Zoom float64 `json:"zoom,omitempty" bson:"zoom"`
}
type OnvifPreset struct {
OnvifCredentials OnvifCredentials `json:"onvif_credentials,omitempty" bson:"onvif_credentials"`
Preset string `json:"preset,omitempty" bson:"preset"`
}

View File

@@ -0,0 +1,16 @@
package models
type MotionDataPartial struct {
Timestamp int64 `json:"timestamp" bson:"timestamp"`
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
}
type MotionDataFull struct {
Timestamp int64 `json:"timestamp" bson:"timestamp"`
Size float64 `json:"size" bson:"size"`
Microseconds float64 `json:"microseconds" bson:"microseconds"`
DeviceName string `json:"deviceName" bson:"deviceName"`
Region string `json:"region" bson:"region"`
NumberOfChanges int `json:"numberOfChanges" bson:"numberOfChanges"`
Token int `json:"token" bson:"token"`
}

View File

@@ -0,0 +1,15 @@
package models
import "time"
// The OutputMessage contains the relevant information
// to specify the type of triggers we want to execute.
type OutputMessage struct {
Name string
Outputs []string
Trigger string
Timestamp time.Time
File string
CameraId string
SiteId string
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,59 @@
package outputs
import (
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
)
type Output interface {
// Triggers the integration
Trigger(message models.OutputMessage) error
}
func Execute(message *models.OutputMessage) (err error) {
err = nil
outputs := message.Outputs
for _, output := range outputs {
switch output {
case "slack":
slack := &SlackOutput{}
err := slack.Trigger(message)
if err == nil {
log.Log.Debug("outputs.main.Execute(slack): message was processed by output.")
} else {
log.Log.Error("outputs.main.Execute(slack): " + err.Error())
}
break
case "webhook":
webhook := &WebhookOutput{}
err := webhook.Trigger(message)
if err == nil {
log.Log.Debug("outputs.main.Execute(webhook): message was processed by output.")
} else {
log.Log.Error("outputs.main.Execute(webhook): " + err.Error())
}
break
case "onvif_relay":
onvif := &OnvifRelayOutput{}
err := onvif.Trigger(message)
if err == nil {
log.Log.Debug("outputs.main.Execute(onvif): message was processed by output.")
} else {
log.Log.Error("outputs.main.Execute(onvif): " + err.Error())
}
break
case "script":
script := &ScriptOutput{}
err := script.Trigger(message)
if err == nil {
log.Log.Debug("outputs.main.Execute(script): message was processed by output.")
} else {
log.Log.Error("outputs.main.Execute(script): " + err.Error())
}
break
}
}
return err
}

View File

@@ -0,0 +1,12 @@
package outputs
import "github.com/kerberos-io/agent/machinery/src/models"
type OnvifRelayOutput struct {
Output
}
func (o *OnvifRelayOutput) Trigger(message *models.OutputMessage) (err error) {
err = nil
return err
}

View File

@@ -0,0 +1,12 @@
package outputs
import "github.com/kerberos-io/agent/machinery/src/models"
type ScriptOutput struct {
Output
}
func (scr *ScriptOutput) Trigger(message *models.OutputMessage) (err error) {
err = nil
return err
}

View File

@@ -0,0 +1,12 @@
package outputs
import "github.com/kerberos-io/agent/machinery/src/models"
type SlackOutput struct {
Output
}
func (s *SlackOutput) Trigger(message *models.OutputMessage) (err error) {
err = nil
return err
}

View File

@@ -0,0 +1,12 @@
package outputs
import "github.com/kerberos-io/agent/machinery/src/models"
type WebhookOutput struct {
Output
}
func (w *WebhookOutput) Trigger(message *models.OutputMessage) (err error) {
err = nil
return err
}

View File

@@ -0,0 +1,69 @@
package packets
type Buf struct {
Head, Tail BufPos
pkts []Packet
Size int
Count int
}
func NewBuf() *Buf {
return &Buf{
pkts: make([]Packet, 64),
}
}
func (self *Buf) Pop() Packet {
if self.Count == 0 {
panic("pktque.Buf: Pop() when count == 0")
}
i := int(self.Head) & (len(self.pkts) - 1)
pkt := self.pkts[i]
self.pkts[i] = Packet{}
self.Size -= len(pkt.Data)
self.Head++
self.Count--
return pkt
}
func (self *Buf) grow() {
newpkts := make([]Packet, len(self.pkts)*2)
for i := self.Head; i.LT(self.Tail); i++ {
newpkts[int(i)&(len(newpkts)-1)] = self.pkts[int(i)&(len(self.pkts)-1)]
}
self.pkts = newpkts
}
func (self *Buf) Push(pkt Packet) {
if self.Count == len(self.pkts) {
self.grow()
}
self.pkts[int(self.Tail)&(len(self.pkts)-1)] = pkt
self.Tail++
self.Count++
self.Size += len(pkt.Data)
}
func (self *Buf) Get(pos BufPos) Packet {
return self.pkts[int(pos)&(len(self.pkts)-1)]
}
func (self *Buf) IsValidPos(pos BufPos) bool {
return pos.GE(self.Head) && pos.LT(self.Tail)
}
type BufPos int
func (self BufPos) LT(pos BufPos) bool {
return self-pos < 0
}
func (self BufPos) GE(pos BufPos) bool {
return self-pos >= 0
}
func (self BufPos) GT(pos BufPos) bool {
return self-pos > 0
}

View File

@@ -0,0 +1,20 @@
package packets
import (
"time"
"github.com/pion/rtp"
)
// Packet represents an RTP Packet
type Packet struct {
Packet *rtp.Packet
IsAudio bool // packet is audio
IsVideo bool // packet is video
IsKeyFrame bool // video packet is key frame
Idx int8 // stream index in container format
Codec string // codec name
CompositionTime time.Duration // packet presentation time minus decode time for H264 B-Frame
Time time.Duration // packet decode time
Data []byte // packet data
}

View File

@@ -0,0 +1,225 @@
// Packege pubsub implements publisher-subscribers model used in multi-channel streaming.
package packets
import (
"io"
"sync"
"time"
)
// time
// ----------------->
//
// V-A-V-V-A-V-V-A-V-V
// | |
// 0 5 10
// head tail
// oldest latest
//
// One publisher and multiple subscribers thread-safe packet buffer queue.
type Queue struct {
buf *Buf
head, tail int
lock *sync.RWMutex
cond *sync.Cond
curgopcount, maxgopcount int
streams []Stream
videoidx int
closed bool
}
func NewQueue() *Queue {
q := &Queue{}
q.buf = NewBuf()
q.maxgopcount = 2
q.lock = &sync.RWMutex{}
q.cond = sync.NewCond(q.lock.RLocker())
q.videoidx = -1
return q
}
func (self *Queue) SetMaxGopCount(n int) {
self.lock.Lock()
self.maxgopcount = n
self.lock.Unlock()
return
}
func (self *Queue) WriteHeader(streams []Stream) error {
self.lock.Lock()
self.streams = streams
for i, stream := range streams {
if stream.IsVideo {
self.videoidx = i
}
}
self.cond.Broadcast()
self.lock.Unlock()
return nil
}
func (self *Queue) WriteTrailer() error {
return nil
}
// After Close() called, all QueueCursor's ReadPacket will return io.EOF.
func (self *Queue) Close() (err error) {
self.lock.Lock()
self.closed = true
self.cond.Broadcast()
// Close all QueueCursor's ReadPacket
for i := 0; i < self.buf.Size; i++ {
pkt := self.buf.Pop()
pkt.Data = nil
}
self.lock.Unlock()
return
}
func (self *Queue) GetSize() int {
return self.buf.Count
}
// Put packet into buffer, old packets will be discared.
func (self *Queue) WritePacket(pkt Packet) (err error) {
self.lock.Lock()
self.buf.Push(pkt)
if pkt.Idx == int8(self.videoidx) && pkt.IsKeyFrame {
self.curgopcount++
}
for self.curgopcount >= self.maxgopcount && self.buf.Count > 1 {
pkt := self.buf.Pop()
if pkt.Idx == int8(self.videoidx) && pkt.IsKeyFrame {
self.curgopcount--
}
if self.curgopcount < self.maxgopcount {
break
}
}
//println("shrink", self.curgopcount, self.maxgopcount, self.buf.Head, self.buf.Tail, "count", self.buf.Count, "size", self.buf.Size)
self.cond.Broadcast()
self.lock.Unlock()
return
}
type QueueCursor struct {
que *Queue
pos BufPos
gotpos bool
init func(buf *Buf, videoidx int) BufPos
}
func (self *Queue) newCursor() *QueueCursor {
return &QueueCursor{
que: self,
}
}
// Create cursor position at latest packet.
func (self *Queue) Latest() *QueueCursor {
cursor := self.newCursor()
cursor.init = func(buf *Buf, videoidx int) BufPos {
return buf.Tail
}
return cursor
}
// Create cursor position at oldest buffered packet.
func (self *Queue) Oldest() *QueueCursor {
cursor := self.newCursor()
cursor.init = func(buf *Buf, videoidx int) BufPos {
return buf.Head
}
return cursor
}
// Create cursor position at specific time in buffered packets.
func (self *Queue) DelayedTime(dur time.Duration) *QueueCursor {
cursor := self.newCursor()
cursor.init = func(buf *Buf, videoidx int) BufPos {
i := buf.Tail - 1
if buf.IsValidPos(i) {
end := buf.Get(i)
for buf.IsValidPos(i) {
if end.Time-buf.Get(i).Time > dur {
break
}
i--
}
}
return i
}
return cursor
}
// Create cursor position at specific delayed GOP count in buffered packets.
func (self *Queue) DelayedGopCount(n int) *QueueCursor {
cursor := self.newCursor()
cursor.init = func(buf *Buf, videoidx int) BufPos {
i := buf.Tail - 1
if videoidx != -1 {
for gop := 0; buf.IsValidPos(i) && gop < n; i-- {
pkt := buf.Get(i)
if pkt.Idx == int8(self.videoidx) && pkt.IsKeyFrame {
gop++
}
}
}
return i
}
return cursor
}
func (self *QueueCursor) Streams() (streams []Stream, err error) {
self.que.cond.L.Lock()
for self.que.streams == nil && !self.que.closed {
self.que.cond.Wait()
}
if self.que.streams != nil {
streams = self.que.streams
} else {
err = io.EOF
}
self.que.cond.L.Unlock()
return
}
// ReadPacket will not consume packets in Queue, it's just a cursor.
func (self *QueueCursor) ReadPacket() (pkt Packet, err error) {
self.que.cond.L.Lock()
buf := self.que.buf
if !self.gotpos {
self.pos = self.init(buf, self.que.videoidx)
self.gotpos = true
}
for {
if self.pos.LT(buf.Head) {
self.pos = buf.Head
} else if self.pos.GT(buf.Tail) {
self.pos = buf.Tail
}
if buf.IsValidPos(self.pos) {
pkt = buf.Get(self.pos)
self.pos++
break
}
if self.que.closed {
err = io.EOF
break
}
self.que.cond.Wait()
}
self.que.cond.L.Unlock()
return
}

View File

@@ -0,0 +1,42 @@
package packets
type Stream struct {
// The name of the stream.
Name string
// The URL of the stream.
URL string
// Is the stream a video stream.
IsVideo bool
// Is the stream a audio stream.
IsAudio bool
// The width of the stream.
Width int
// The height of the stream.
Height int
// Num is the numerator of the framerate.
Num int
// Denum is the denominator of the framerate.
Denum int
// FPS is the framerate of the stream.
FPS float64
// For H264, this is the sps.
SPS []byte
// For H264, this is the pps.
PPS []byte
// For H265, this is the vps.
VPS []byte
// IsBackChannel is true if this stream is a back channel.
IsBackChannel bool
}

View File

@@ -0,0 +1,60 @@
package packets
import (
"time"
)
/*
pop push
seg seg seg
|--------| |---------| |---|
20ms 40ms 5ms
----------------- time -------------------->
headtm tailtm
*/
type tlSeg struct {
tm, dur time.Duration
}
type Timeline struct {
segs []tlSeg
headtm time.Duration
}
func (self *Timeline) Push(tm time.Duration, dur time.Duration) {
if len(self.segs) > 0 {
tail := self.segs[len(self.segs)-1]
diff := tm - (tail.tm + tail.dur)
if diff < 0 {
tm -= diff
}
}
self.segs = append(self.segs, tlSeg{tm, dur})
}
func (self *Timeline) Pop(dur time.Duration) (tm time.Duration) {
if len(self.segs) == 0 {
return self.headtm
}
tm = self.segs[0].tm
for dur > 0 && len(self.segs) > 0 {
seg := &self.segs[0]
sub := dur
if seg.dur < sub {
sub = seg.dur
}
seg.dur -= sub
dur -= sub
seg.tm += sub
self.headtm += sub
if seg.dur == 0 {
copy(self.segs[0:], self.segs[1:])
self.segs = self.segs[:len(self.segs)-1]
}
}
return
}

View File

@@ -1,103 +0,0 @@
package http
import (
"encoding/json"
"io/ioutil"
"os"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"gopkg.in/mgo.v2/bson"
"github.com/kerberos-io/agent/machinery/src/components"
"github.com/kerberos-io/agent/machinery/src/database"
"github.com/kerberos-io/agent/machinery/src/models"
)
func AddRoutes(r *gin.Engine, authMiddleware *jwt.GinJWTMiddleware, configuration *models.Configuration, communication *models.Communication) *gin.RouterGroup {
r.GET("/config", func(c *gin.Context) {
c.JSON(200, gin.H{
"config": configuration.Config,
"custom": configuration.CustomConfig,
"global": configuration.GlobalConfig,
"snapshot": components.GetSnapshot(),
})
})
r.POST("/config", func(c *gin.Context) {
if !communication.IsConfiguring.IsSet() {
communication.IsConfiguring.Set()
// Save into file
var conf models.Config
c.BindJSON(&conf)
if os.Getenv("DEPLOYMENT") == "factory" || os.Getenv("MACHINERY_ENVIRONMENT") == "kubernetes" {
// Write to mongodb
session := database.New().Copy()
defer session.Close()
db := session.DB(database.DatabaseName)
collection := db.C("configuration")
collection.Update(bson.M{
"type": "config",
"name": os.Getenv("DEPLOYMENT_NAME"),
}, &conf)
} else if os.Getenv("DEPLOYMENT") == "" || os.Getenv("DEPLOYMENT") == "agent" {
res, _ := json.MarshalIndent(conf, "", "\t")
ioutil.WriteFile("./data/config/config.json", res, 0644)
}
select {
case communication.HandleBootstrap <- "restart":
default:
}
communication.IsConfiguring.UnSet()
c.JSON(200, gin.H{
"data": "☄ Reconfiguring",
})
} else {
c.JSON(200, gin.H{
"data": "☄ Already reconfiguring",
})
}
})
api := r.Group("/api")
{
api.POST("/login", authMiddleware.LoginHandler)
api.GET("/config", func(c *gin.Context) {
c.JSON(200, gin.H{
"config": configuration.Config,
"custom": configuration.CustomConfig,
"global": configuration.GlobalConfig,
"snapshot": components.GetSnapshot(),
})
})
api.GET("/restart", func(c *gin.Context) {
communication.HandleBootstrap <- "restart"
c.JSON(200, gin.H{
"restarted": true,
})
})
api.GET("/stop", func(c *gin.Context) {
communication.HandleBootstrap <- "stop"
c.JSON(200, gin.H{
"stopped": true,
})
})
api.Use(authMiddleware.MiddlewareFunc())
{
// Secured endpoints..
}
}
return api
}

View File

@@ -1,6 +1,10 @@
package http
import (
"io"
"os"
"strconv"
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-contrib/pprof"
"github.com/gin-gonic/contrib/static"
@@ -10,6 +14,8 @@ import (
"log"
_ "github.com/kerberos-io/agent/machinery/docs"
"github.com/kerberos-io/agent/machinery/src/capture"
"github.com/kerberos-io/agent/machinery/src/encryption"
"github.com/kerberos-io/agent/machinery/src/models"
swaggerFiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
@@ -33,7 +39,10 @@ import (
// @in header
// @name Authorization
func StartServer(configuration *models.Configuration, communication *models.Communication) {
func StartServer(configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
// Set release mode
gin.SetMode(gin.ReleaseMode)
// Initialize REST API
r := gin.Default()
@@ -55,14 +64,28 @@ func StartServer(configuration *models.Configuration, communication *models.Comm
}
// Add all routes
AddRoutes(r, authMiddleware, configuration, communication)
AddRoutes(r, authMiddleware, configDirectory, configuration, communication, captureDevice)
// Update environment variables
environmentVariables := configDirectory + "/www/env.js"
if os.Getenv("AGENT_MODE") == "demo" {
demoEnvironmentVariables := configDirectory + "/www/env.demo.js"
// Move demo environment variables to environment variables
err := os.Rename(demoEnvironmentVariables, environmentVariables)
if err != nil {
log.Fatal(err)
}
}
// Add static routes to UI
r.Use(static.Serve("/", static.LocalFile("./www", true)))
r.Use(static.Serve("/dashboard", static.LocalFile("./www", true)))
r.Use(static.Serve("/media", static.LocalFile("./www", true)))
r.Use(static.Serve("/settings", static.LocalFile("./www", true)))
r.Use(static.Serve("/login", static.LocalFile("./www", true)))
r.Use(static.Serve("/", static.LocalFile(configDirectory+"/www", true)))
r.Use(static.Serve("/dashboard", static.LocalFile(configDirectory+"/www", true)))
r.Use(static.Serve("/media", static.LocalFile(configDirectory+"/www", true)))
r.Use(static.Serve("/settings", static.LocalFile(configDirectory+"/www", true)))
r.Use(static.Serve("/login", static.LocalFile(configDirectory+"/www", true)))
r.Handle("GET", "/file/*filepath", func(c *gin.Context) {
Files(c, configDirectory, configuration)
})
// Run the api on port
err = r.Run(":" + configuration.Port)
@@ -70,3 +93,52 @@ func StartServer(configuration *models.Configuration, communication *models.Comm
log.Fatal(err)
}
}
func Files(c *gin.Context, configDirectory string, configuration *models.Configuration) {
// Get File
filePath := configDirectory + "/data/recordings" + c.Param("filepath")
_, err := os.Open(filePath)
if err != nil {
c.JSON(404, gin.H{"error": "File not found"})
return
}
contents, err := os.ReadFile(filePath)
if err == nil {
// Get symmetric key
symmetricKey := configuration.Config.Encryption.SymmetricKey
encryptedRecordings := configuration.Config.Encryption.Recordings
// Decrypt file
if encryptedRecordings == "true" && symmetricKey != "" {
// Read file
if err != nil {
c.JSON(404, gin.H{"error": "File not found"})
return
}
// Decrypt file
contents, err = encryption.AesDecrypt(contents, symmetricKey)
if err != nil {
c.JSON(404, gin.H{"error": "File not found"})
return
}
}
// Get fileSize from contents
fileSize := len(contents)
// Send file to gin
c.Header("Access-Control-Allow-Origin", "*")
c.Header("Content-Disposition", "attachment; filename="+filePath)
c.Header("Content-Type", "video/mp4")
c.Header("Content-Length", strconv.Itoa(fileSize))
// Send contents to gin
io.WriteString(c.Writer, string(contents))
} else {
c.JSON(404, gin.H{"error": "File not found"})
return
}
}

View File

@@ -2,6 +2,7 @@ package http
import (
"net/http"
"os"
"time"
jwt "github.com/appleboy/gin-jwt/v2"
@@ -45,9 +46,18 @@ func JWTMiddleWare() jwt.GinJWTMiddleware {
username := loginVals.Username
password := loginVals.Password
usernameENV := "root"
passwordENV := "root"
if username == usernameENV && password == passwordENV {
// Get username from ENV
usernameFromConfig := os.Getenv("AGENT_USERNAME")
if usernameFromConfig == "" {
usernameFromConfig = "root"
}
// Get password from ENV
passwordFromConfig := os.Getenv("AGENT_PASSWORD")
if passwordFromConfig == "" {
passwordFromConfig = "root"
}
if username == usernameFromConfig && password == passwordFromConfig {
return &models.User{
Username: username,
Role: "admin",

View File

@@ -0,0 +1,570 @@
package http
import (
"github.com/gin-gonic/gin"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/onvif"
)
// Login godoc
// @Router /api/login [post]
// @ID login
// @Tags authentication
// @Summary Get Authorization token.
// @Description Get Authorization token.
// @Param credentials body models.Authentication true "Credentials"
// @Success 200 {object} models.Authorization
func Login() {}
// LoginToOnvif godoc
// @Router /api/camera/onvif/login [post]
// @ID camera-onvif-login
// @Tags onvif
// @Param config body models.OnvifCredentials true "OnvifCredentials"
// @Summary Try to login into ONVIF supported camera.
// @Description Try to login into ONVIF supported camera.
// @Success 200 {object} models.APIResponse
func LoginToOnvif(c *gin.Context) {
var onvifCredentials models.OnvifCredentials
err := c.BindJSON(&onvifCredentials)
if err == nil && onvifCredentials.ONVIFXAddr != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifCredentials.ONVIFUsername,
ONVIFPassword: onvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
device, capabilities, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
// Get token from the first profile
token, err := onvif.GetTokenFromProfile(device, 0)
if err == nil {
c.JSON(200, gin.H{
"device": device,
"capabilities": capabilities,
"token": token,
})
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}
// GetOnvifCapabilities godoc
// @Router /api/camera/onvif/capabilities [post]
// @ID camera-onvif-capabilities
// @Tags onvif
// @Param config body models.OnvifCredentials true "OnvifCredentials"
// @Summary Will return the ONVIF capabilities for the specific camera.
// @Description Will return the ONVIF capabilities for the specific camera.
// @Success 200 {object} models.APIResponse
func GetOnvifCapabilities(c *gin.Context) {
var onvifCredentials models.OnvifCredentials
err := c.BindJSON(&onvifCredentials)
if err == nil && onvifCredentials.ONVIFXAddr != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifCredentials.ONVIFUsername,
ONVIFPassword: onvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
_, capabilities, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
c.JSON(200, gin.H{
"capabilities": capabilities,
})
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}
// DoOnvifPanTilt godoc
// @Router /api/camera/onvif/pantilt [post]
// @ID camera-onvif-pantilt
// @Tags onvif
// @Param panTilt body models.OnvifPanTilt true "OnvifPanTilt"
// @Summary Panning or/and tilting the camera.
// @Description Panning or/and tilting the camera using a direction (x,y).
// @Success 200 {object} models.APIResponse
func DoOnvifPanTilt(c *gin.Context) {
var onvifPanTilt models.OnvifPanTilt
err := c.BindJSON(&onvifPanTilt)
if err == nil && onvifPanTilt.OnvifCredentials.ONVIFXAddr != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifPanTilt.OnvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifPanTilt.OnvifCredentials.ONVIFUsername,
ONVIFPassword: onvifPanTilt.OnvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
// Get token from the first profile
token, err := onvif.GetTokenFromProfile(device, 0)
if err == nil {
// Get the configurations from the device
ptzConfigurations, err := onvif.GetPTZConfigurationsFromDevice(device)
if err == nil {
pan := onvifPanTilt.Pan
tilt := onvifPanTilt.Tilt
err := onvif.ContinuousPanTilt(device, ptzConfigurations, token, pan, tilt)
if err == nil {
c.JSON(200, models.APIResponse{
Message: "Successfully pan/tilted the camera",
})
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
}
// DoOnvifZoom godoc
// @Router /api/camera/onvif/zoom [post]
// @ID camera-onvif-zoom
// @Tags onvif
// @Param zoom body models.OnvifZoom true "OnvifZoom"
// @Summary Zooming in or out the camera.
// @Description Zooming in or out the camera.
// @Success 200 {object} models.APIResponse
func DoOnvifZoom(c *gin.Context) {
var onvifZoom models.OnvifZoom
err := c.BindJSON(&onvifZoom)
if err == nil && onvifZoom.OnvifCredentials.ONVIFXAddr != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifZoom.OnvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifZoom.OnvifCredentials.ONVIFUsername,
ONVIFPassword: onvifZoom.OnvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
// Get token from the first profile
token, err := onvif.GetTokenFromProfile(device, 0)
if err == nil {
// Get the PTZ configurations from the device
ptzConfigurations, err := onvif.GetPTZConfigurationsFromDevice(device)
if err == nil {
zoom := onvifZoom.Zoom
err := onvif.ContinuousZoom(device, ptzConfigurations, token, zoom)
if err == nil {
c.JSON(200, models.APIResponse{
Message: "Successfully zoomed the camera",
})
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, models.APIResponse{
Message: "Something went wrong: " + err.Error(),
})
}
}
// GetOnvifPresets godoc
// @Router /api/camera/onvif/presets [post]
// @ID camera-onvif-presets
// @Tags onvif
// @Param config body models.OnvifCredentials true "OnvifCredentials"
// @Summary Will return the ONVIF presets for the specific camera.
// @Description Will return the ONVIF presets for the specific camera.
// @Success 200 {object} models.APIResponse
func GetOnvifPresets(c *gin.Context) {
var onvifCredentials models.OnvifCredentials
err := c.BindJSON(&onvifCredentials)
if err == nil && onvifCredentials.ONVIFXAddr != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifCredentials.ONVIFUsername,
ONVIFPassword: onvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
presets, err := onvif.GetPresetsFromDevice(device)
if err == nil {
c.JSON(200, gin.H{
"presets": presets,
})
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}
// GoToOnvifPReset godoc
// @Router /api/camera/onvif/gotopreset [post]
// @ID camera-onvif-gotopreset
// @Tags onvif
// @Param config body models.OnvifPreset true "OnvifPreset"
// @Summary Will activate the desired ONVIF preset.
// @Description Will activate the desired ONVIF preset.
// @Success 200 {object} models.APIResponse
func GoToOnvifPreset(c *gin.Context) {
var onvifPreset models.OnvifPreset
err := c.BindJSON(&onvifPreset)
if err == nil && onvifPreset.OnvifCredentials.ONVIFXAddr != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifPreset.OnvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifPreset.OnvifCredentials.ONVIFUsername,
ONVIFPassword: onvifPreset.OnvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
err := onvif.GoToPresetFromDevice(device, onvifPreset.Preset)
if err == nil {
c.JSON(200, gin.H{
"data": "Camera preset activated: " + onvifPreset.Preset,
})
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}
// DoGetDigitalInputs godoc
// @Router /api/camera/onvif/inputs [post]
// @ID get-digital-inputs
// @Security Bearer
// @securityDefinitions.apikey Bearer
// @in header
// @name Authorization
// @Tags onvif
// @Param config body models.OnvifCredentials true "OnvifCredentials"
// @Summary Will get the digital inputs from the ONVIF device.
// @Description Will get the digital inputs from the ONVIF device.
// @Success 200 {object} models.APIResponse
func DoGetDigitalInputs(c *gin.Context) {
var onvifCredentials models.OnvifCredentials
err := c.BindJSON(&onvifCredentials)
if err == nil && onvifCredentials.ONVIFXAddr != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifCredentials.ONVIFUsername,
ONVIFPassword: onvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
_, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
// Get the digital inputs and outputs from the device
inputOutputs, err := onvif.GetInputOutputs()
if err == nil {
if err == nil {
// Get the digital outputs from the device
var inputs []onvif.ONVIFEvents
for _, event := range inputOutputs {
if event.Type == "input" {
inputs = append(inputs, event)
}
}
c.JSON(200, gin.H{
"data": inputs,
})
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}
// DoGetRelayOutputs godoc
// @Router /api/camera/onvif/outputs [post]
// @ID get-relay-outputs
// @Security Bearer
// @securityDefinitions.apikey Bearer
// @in header
// @name Authorization
// @Tags onvif
// @Param config body models.OnvifCredentials true "OnvifCredentials"
// @Summary Will get the relay outputs from the ONVIF device.
// @Description Will get the relay outputs from the ONVIF device.
// @Success 200 {object} models.APIResponse
func DoGetRelayOutputs(c *gin.Context) {
var onvifCredentials models.OnvifCredentials
err := c.BindJSON(&onvifCredentials)
if err == nil && onvifCredentials.ONVIFXAddr != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifCredentials.ONVIFUsername,
ONVIFPassword: onvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
_, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
// Get the digital inputs and outputs from the device
inputOutputs, err := onvif.GetInputOutputs()
if err == nil {
if err == nil {
// Get the digital outputs from the device
var outputs []onvif.ONVIFEvents
for _, event := range inputOutputs {
if event.Type == "output" {
outputs = append(outputs, event)
}
}
c.JSON(200, gin.H{
"data": outputs,
})
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
} else {
c.JSON(400, gin.H{
"data": "Something went wrong: " + err.Error(),
})
}
}
// DoTriggerRelayOutput godoc
// @Router /api/camera/onvif/outputs/{output} [post]
// @ID trigger-relay-output
// @Security Bearer
// @securityDefinitions.apikey Bearer
// @in header
// @name Authorization
// @Tags onvif
// @Param config body models.OnvifCredentials true "OnvifCredentials"
// @Param output path string true "Output"
// @Summary Will trigger the relay output from the ONVIF device.
// @Description Will trigger the relay output from the ONVIF device.
// @Success 200 {object} models.APIResponse
func DoTriggerRelayOutput(c *gin.Context) {
var onvifCredentials models.OnvifCredentials
err := c.BindJSON(&onvifCredentials)
// Get the output from the url
output := c.Param("output")
if err == nil && onvifCredentials.ONVIFXAddr != "" && output != "" {
configuration := &models.Configuration{
Config: models.Config{
Capture: models.Capture{
IPCamera: models.IPCamera{
ONVIFXAddr: onvifCredentials.ONVIFXAddr,
ONVIFUsername: onvifCredentials.ONVIFUsername,
ONVIFPassword: onvifCredentials.ONVIFPassword,
},
},
},
}
cameraConfiguration := configuration.Config.Capture.IPCamera
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
err := onvif.TriggerRelayOutput(device, output)
if err == nil {
msg := "relay output triggered: " + output
log.Log.Info("routers.http.methods.DoTriggerRelayOutput(): " + msg)
c.JSON(200, gin.H{
"data": msg,
})
} else {
msg := "something went wrong: " + err.Error()
log.Log.Error("routers.http.methods.DoTriggerRelayOutput(): " + msg)
c.JSON(400, gin.H{
"data": msg,
})
}
} else {
msg := "something went wrong: " + err.Error()
log.Log.Error("routers.http.methods.DoTriggerRelayOutput(): " + msg)
c.JSON(400, gin.H{
"data": msg,
})
}
} else {
msg := "something went wrong: " + err.Error()
log.Log.Error("routers.http.methods.DoTriggerRelayOutput(): " + msg)
c.JSON(400, gin.H{
"data": msg,
})
}
}

View File

@@ -0,0 +1,111 @@
package http
import (
jwt "github.com/appleboy/gin-jwt/v2"
"github.com/gin-gonic/gin"
"github.com/kerberos-io/agent/machinery/src/capture"
"github.com/kerberos-io/agent/machinery/src/components"
"github.com/kerberos-io/agent/machinery/src/onvif"
"github.com/kerberos-io/agent/machinery/src/routers/websocket"
"github.com/kerberos-io/agent/machinery/src/cloud"
"github.com/kerberos-io/agent/machinery/src/models"
)
func AddRoutes(r *gin.Engine, authMiddleware *jwt.GinJWTMiddleware, configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) *gin.RouterGroup {
r.GET("/ws", func(c *gin.Context) {
websocket.WebsocketHandler(c, communication, captureDevice)
})
// This is legacy should be removed in future! Now everything
// lives under the /api prefix.
r.GET("/config", func(c *gin.Context) {
components.GetConfig(c, captureDevice, configuration, communication)
})
// This is legacy should be removed in future! Now everything
// lives under the /api prefix.
r.POST("/config", func(c *gin.Context) {
components.UpdateConfig(c, configDirectory, configuration, communication)
})
api := r.Group("/api")
{
api.POST("/login", authMiddleware.LoginHandler)
api.GET("/dashboard", func(c *gin.Context) {
components.GetDashboard(c, configDirectory, configuration, communication)
})
api.POST("/latest-events", func(c *gin.Context) {
components.GetLatestEvents(c, configDirectory, configuration, communication)
})
api.GET("/days", func(c *gin.Context) {
components.GetDays(c, configDirectory, configuration, communication)
})
api.GET("/config", func(c *gin.Context) {
components.GetConfig(c, captureDevice, configuration, communication)
})
api.POST("/config", func(c *gin.Context) {
components.UpdateConfig(c, configDirectory, configuration, communication)
})
// Will verify the current hub settings.
api.POST("/hub/verify", func(c *gin.Context) {
cloud.VerifyHub(c)
})
// Will verify the current persistence settings.
api.POST("/persistence/verify", func(c *gin.Context) {
cloud.VerifyPersistence(c, configDirectory)
})
// Camera specific methods. Doesn't require any authorization.
// These are available for anyone, but require the agent, to reach
// the camera.
api.POST("/camera/restart", func(c *gin.Context) {
components.RestartAgent(c, communication)
})
api.POST("/camera/stop", func(c *gin.Context) {
components.StopAgent(c, communication)
})
api.POST("/camera/record", func(c *gin.Context) {
components.MakeRecording(c, communication)
})
api.GET("/camera/snapshot/jpeg", func(c *gin.Context) {
components.GetSnapshotRaw(c, captureDevice, configuration, communication)
})
api.GET("/camera/snapshot/base64", func(c *gin.Context) {
components.GetSnapshotBase64(c, captureDevice, configuration, communication)
})
// Onvif specific methods. Doesn't require any authorization.
// Will verify the current onvif settings.
api.POST("/camera/onvif/verify", onvif.VerifyOnvifConnection)
api.POST("/camera/onvif/login", LoginToOnvif)
api.POST("/camera/onvif/capabilities", GetOnvifCapabilities)
api.POST("/camera/onvif/presets", GetOnvifPresets)
api.POST("/camera/onvif/gotopreset", GoToOnvifPreset)
api.POST("/camera/onvif/pantilt", DoOnvifPanTilt)
api.POST("/camera/onvif/zoom", DoOnvifZoom)
api.POST("/camera/onvif/inputs", DoGetDigitalInputs)
api.POST("/camera/onvif/outputs", DoGetRelayOutputs)
api.POST("/camera/onvif/outputs/:output", DoTriggerRelayOutput)
api.POST("/camera/verify/:streamType", capture.VerifyCamera)
// Secured endpoints..
api.Use(authMiddleware.MiddlewareFunc())
{
}
}
return api
}

View File

@@ -1,10 +1,11 @@
package routers
import (
"github.com/kerberos-io/agent/machinery/src/capture"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/routers/http"
)
func StartWebserver(configuration *models.Configuration, communication *models.Communication) {
http.StartServer(configuration, communication)
func StartWebserver(configDirectory string, configuration *models.Configuration, communication *models.Communication, captureDevice *capture.Capture) {
http.StartServer(configDirectory, configuration, communication, captureDevice)
}

View File

@@ -1,191 +1,549 @@
package mqtt
import (
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"io/ioutil"
"math/rand"
"strconv"
"strings"
"time"
mqtt "github.com/eclipse/paho.mqtt.golang"
configService "github.com/kerberos-io/agent/machinery/src/config"
"github.com/kerberos-io/agent/machinery/src/encryption"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/onvif"
"github.com/kerberos-io/agent/machinery/src/webrtc"
)
func ConfigureMQTT(configuration *models.Configuration, communication *models.Communication) mqtt.Client {
// We'll cache the MQTT settings to know if we need to reinitialize the MQTT client connection.
// If we update the configuration but no new MQTT settings are provided, we don't need to restart it.
var PREV_MQTTURI string
var PREV_MQTTUsername string
var PREV_MQTTPassword string
var PREV_HubKey string
var PREV_AgentKey string
config := configuration.Config
opts := mqtt.NewClientOptions()
// We will set the MQTT endpoint to which we want to connect
// and share and receive messages to/from.
mqttURL := config.MQTTURI
opts.AddBroker(mqttURL)
log.Log.Info("ConfigureMQTT: Set broker uri " + mqttURL)
// Our MQTT broker can have username/password credentials
// to protect it from the outside.
mqtt_username := config.MQTTUsername
mqtt_password := config.MQTTPassword
if mqtt_username != "" || mqtt_password != "" {
opts.SetUsername(mqtt_username)
opts.SetPassword(mqtt_password)
log.Log.Info("ConfigureMQTT: Set username " + mqtt_username)
log.Log.Info("ConfigureMQTT: Set password " + mqtt_password)
func HasMQTTClientModified(configuration *models.Configuration) bool {
MTTURI := configuration.Config.MQTTURI
MTTUsername := configuration.Config.MQTTUsername
MQTTPassword := configuration.Config.MQTTPassword
HubKey := configuration.Config.HubKey
AgentKey := configuration.Config.Key
if PREV_MQTTURI != MTTURI || PREV_MQTTUsername != MTTUsername || PREV_MQTTPassword != MQTTPassword || PREV_HubKey != HubKey || PREV_AgentKey != AgentKey {
log.Log.Info("HasMQTTClientModified: MQTT settings have been modified, restarting MQTT client.")
return true
}
// Some extra options to make sure the connection behaves
// properly. More information here: github.com/eclipse/paho.mqtt.golang.
opts.SetCleanSession(true)
opts.SetConnectRetry(true)
opts.SetAutoReconnect(true)
opts.SetConnectTimeout(30 * time.Second)
hubKey := ""
// This is the old way ;)
if config.Cloud == "s3" && config.S3.Publickey != "" {
hubKey = config.S3.Publickey
} else if config.Cloud == "kstorage" && config.KStorage.CloudKey != "" {
hubKey = config.KStorage.CloudKey
}
// This is the new way ;)
if config.HubKey != "" {
hubKey = config.HubKey
}
if hubKey != "" {
rand.Seed(time.Now().UnixNano())
random := rand.Intn(100)
mqttClientID := config.Key + strconv.Itoa(random) // this random int is to avoid conflicts.
// This is a worked-around.
// current S3 (Kerberos Hub SAAS) is using a secured MQTT, where the client id,
// should match the kerberos hub key.
if config.Cloud == "s3" {
mqttClientID = config.Key
}
opts.SetClientID(mqttClientID)
log.Log.Info("ConfigureMQTT: Set ClientID " + mqttClientID)
rand.Seed(time.Now().UnixNano())
webrtc.CandidateArrays = make(map[string](chan string))
opts.OnConnect = func(c mqtt.Client) {
// We managed to connect to the MQTT broker, hurray!
log.Log.Info("ConfigureMQTT: " + mqttClientID + " connected to " + mqttURL)
// Create a subscription to know if send out a livestream or not.
MQTTListenerHandleLiveSD(c, hubKey, configuration, communication)
// Create a subscription for the WEBRTC livestream.
MQTTListenerHandleLiveHDHandshake(c, hubKey, configuration, communication)
// Create a subscription for keeping alive the WEBRTC livestream.
MQTTListenerHandleLiveHDKeepalive(c, hubKey, configuration, communication)
// Create a subscription to listen to the number of WEBRTC peers.
MQTTListenerHandleLiveHDPeers(c, hubKey, configuration, communication)
// Create a subscription to listen for WEBRTC candidates.
MQTTListenerHandleLiveHDCandidates(c, hubKey, configuration, communication)
// Create a susbcription to listen for ONVIF actions: e.g. PTZ, Zoom, etc.
MQTTListenerHandleONVIF(c, hubKey, configuration, communication)
}
}
mqc := mqtt.NewClient(opts)
if token := mqc.Connect(); token.WaitTimeout(3 * time.Second) {
if token.Error() != nil {
log.Log.Error("ConfigureMQTT: unable to establish mqtt broker connection, error was: " + token.Error().Error())
}
}
return mqc
return false
}
func MQTTListenerHandleLiveSD(mqttClient mqtt.Client, hubKey string, configuration *models.Configuration, communication *models.Communication) {
// Configuring MQTT to subscribe for various bi-directional messaging
// Listen and reply (a generic method to share and retrieve information)
//
// - [SUBSCRIPTION] kerberos/agent/{hubkey} (hub -> agent)
// - [PUBLISH] kerberos/hub/{hubkey} (agent -> hub)
//
// !!! LEGACY METHODS BELOW, WE SHOULD LEVERAGE THE ABOVE METHOD!
// [PUBlISH]
// Next to subscribing to various topics, we'll also publish messages to various topics, find a list of available Publish methods.
// - kerberos/{hubkey}/device/{devicekey}/motion: a motion signal
func ConfigureMQTT(configDirectory string, configuration *models.Configuration, communication *models.Communication) mqtt.Client {
config := configuration.Config
topicRequest := "kerberos/" + hubKey + "/device/" + config.Key + "/request-live"
mqttClient.Subscribe(topicRequest, 0, func(c mqtt.Client, msg mqtt.Message) {
select {
case communication.HandleLiveSD <- time.Now().Unix():
default:
// Set the MQTT settings.
PREV_MQTTURI = configuration.Config.MQTTURI
PREV_MQTTUsername = configuration.Config.MQTTUsername
PREV_MQTTPassword = configuration.Config.MQTTPassword
PREV_HubKey = configuration.Config.HubKey
PREV_AgentKey = configuration.Config.Key
if config.Offline == "true" {
log.Log.Info("routers.mqtt.main.ConfigureMQTT(): not starting as running in Offline mode.")
} else {
opts := mqtt.NewClientOptions()
// We will set the MQTT endpoint to which we want to connect
// and share and receive messages to/from.
mqttURL := config.MQTTURI
opts.AddBroker(mqttURL)
log.Log.Debug("routers.mqtt.main.ConfigureMQTT(): Set broker uri " + mqttURL)
// Our MQTT broker can have username/password credentials
// to protect it from the outside.
mqtt_username := config.MQTTUsername
mqtt_password := config.MQTTPassword
if mqtt_username != "" || mqtt_password != "" {
opts.SetUsername(mqtt_username)
opts.SetPassword(mqtt_password)
log.Log.Debug("routers.mqtt.main.ConfigureMQTT(): Set username " + mqtt_username)
log.Log.Debug("routers.mqtt.main.ConfigureMQTT(): Set password " + mqtt_password)
}
log.Log.Info("MQTTListenerHandleLiveSD: received request to livestream.")
msg.Ack()
})
}
func MQTTListenerHandleLiveHDHandshake(mqttClient mqtt.Client, hubKey string, configuration *models.Configuration, communication *models.Communication) {
config := configuration.Config
topicRequestWebRtc := config.Key + "/register"
mqttClient.Subscribe(topicRequestWebRtc, 0, func(c mqtt.Client, msg mqtt.Message) {
log.Log.Info("MQTTListenerHandleLiveHDHandshake: received request to setup webrtc.")
var sdp models.SDPPayload
json.Unmarshal(msg.Payload(), &sdp)
select {
case communication.HandleLiveHDHandshake <- sdp:
default:
// Some extra options to make sure the connection behaves
// properly. More information here: github.com/eclipse/paho.mqtt.golang.
opts.SetCleanSession(true)
opts.SetConnectRetry(true)
//opts.SetAutoReconnect(true)
opts.SetConnectTimeout(30 * time.Second)
hubKey := ""
// This is the old way ;)
if config.Cloud == "s3" && config.S3 != nil && config.S3.Publickey != "" {
hubKey = config.S3.Publickey
} else if config.Cloud == "kstorage" && config.KStorage != nil && config.KStorage.CloudKey != "" {
hubKey = config.KStorage.CloudKey
}
// This is the new way ;)
if config.HubKey != "" {
hubKey = config.HubKey
}
msg.Ack()
})
}
func MQTTListenerHandleLiveHDKeepalive(mqttClient mqtt.Client, hubKey string, configuration *models.Configuration, communication *models.Communication) {
config := configuration.Config
topicKeepAlive := fmt.Sprintf("kerberos/webrtc/keepalivehub/%s", config.Key)
mqttClient.Subscribe(topicKeepAlive, 0, func(c mqtt.Client, msg mqtt.Message) {
alive := string(msg.Payload())
communication.HandleLiveHDKeepalive <- alive
log.Log.Info("MQTTListenerHandleLiveHDKeepalive: Received keepalive: " + alive)
})
}
if hubKey != "" {
func MQTTListenerHandleLiveHDPeers(mqttClient mqtt.Client, hubKey string, configuration *models.Configuration, communication *models.Communication) {
config := configuration.Config
topicPeers := fmt.Sprintf("kerberos/webrtc/peers/%s", config.Key)
mqttClient.Subscribe(topicPeers, 0, func(c mqtt.Client, msg mqtt.Message) {
peerCount := string(msg.Payload())
communication.HandleLiveHDPeers <- peerCount
log.Log.Info("MQTTListenerHandleLiveHDPeers: Number of peers listening: " + peerCount)
})
}
rand.Seed(time.Now().UnixNano())
random := rand.Intn(100)
mqttClientID := config.Key + strconv.Itoa(random) // this random int is to avoid conflicts.
func MQTTListenerHandleLiveHDCandidates(mqttClient mqtt.Client, hubKey string, configuration *models.Configuration, communication *models.Communication) {
config := configuration.Config
topicCandidates := "candidate/cloud"
mqttClient.Subscribe(topicCandidates, 0, func(c mqtt.Client, msg mqtt.Message) {
var candidate models.Candidate
json.Unmarshal(msg.Payload(), &candidate)
if candidate.CloudKey == config.Key {
key := candidate.CloudKey + "/" + candidate.Cuuid
candidatesExists := false
var channel chan string
for !candidatesExists {
webrtc.CandidatesMutex.Lock()
channel, candidatesExists = webrtc.CandidateArrays[key]
webrtc.CandidatesMutex.Unlock()
// This is a worked-around.
// current S3 (Kerberos Hub SAAS) is using a secured MQTT, where the client id,
// should match the kerberos hub key.
if config.Cloud == "s3" {
mqttClientID = config.Key
}
opts.SetClientID(mqttClientID)
log.Log.Info("routers.mqtt.main.ConfigureMQTT(): Set ClientID " + mqttClientID)
rand.Seed(time.Now().UnixNano())
webrtc.CandidateArrays = make(map[string](chan string))
opts.OnConnect = func(c mqtt.Client) {
// We managed to connect to the MQTT broker, hurray!
log.Log.Info("routers.mqtt.main.ConfigureMQTT(): " + mqttClientID + " connected to " + mqttURL)
// Create a susbcription for listen and reply
MQTTListenerHandler(c, hubKey, configDirectory, configuration, communication)
}
log.Log.Info("MQTTListenerHandleLiveHDCandidates: " + string(msg.Payload()))
channel <- string(msg.Payload())
}
})
mqc := mqtt.NewClient(opts)
if token := mqc.Connect(); token.WaitTimeout(3 * time.Second) {
if token.Error() != nil {
log.Log.Error("routers.mqtt.main.ConfigureMQTT(): unable to establish mqtt broker connection, error was: " + token.Error().Error())
}
}
return mqc
}
return nil
}
func MQTTListenerHandleONVIF(mqttClient mqtt.Client, hubKey string, configuration *models.Configuration, communication *models.Communication) {
config := configuration.Config
topicOnvif := fmt.Sprintf("kerberos/onvif/%s", config.Key)
mqttClient.Subscribe(topicOnvif, 0, func(c mqtt.Client, msg mqtt.Message) {
var onvifAction models.OnvifAction
json.Unmarshal(msg.Payload(), &onvifAction)
communication.HandleONVIF <- onvifAction
log.Log.Info("MQTTListenerHandleONVIF: Received an action - " + onvifAction.Action)
})
func MQTTListenerHandler(mqttClient mqtt.Client, hubKey string, configDirectory string, configuration *models.Configuration, communication *models.Communication) {
if hubKey == "" {
log.Log.Info("routers.mqtt.main.MQTTListenerHandler(): no hub key provided, not subscribing to kerberos/hub/{hubkey}")
} else {
agentListener := fmt.Sprintf("kerberos/agent/%s", hubKey)
mqttClient.Subscribe(agentListener, 1, func(c mqtt.Client, msg mqtt.Message) {
// Decode the message, we are expecting following format.
// {
// mid: string, "unique id for the message"
// timestamp: int64, "unix timestamp when the message was generated"
// encrypted: boolean,
// fingerprint: string, "fingerprint of the message to validate authenticity"
// payload: Payload, "a json object which might be encrypted"
// }
var message models.Message
json.Unmarshal(msg.Payload(), &message)
// We will receive all messages from our hub, so we'll need to filter to the relevant device.
if message.Mid != "" && message.Timestamp != 0 && message.DeviceId == configuration.Config.Key {
// Messages might be encrypted, if so we'll
// need to decrypt them.
var payload models.Payload
if message.Encrypted && configuration.Config.Encryption != nil && configuration.Config.Encryption.Enabled == "true" {
encryptedValue := message.Payload.EncryptedValue
if len(encryptedValue) > 0 {
symmetricKey := configuration.Config.Encryption.SymmetricKey
privateKey := configuration.Config.Encryption.PrivateKey
r := strings.NewReader(privateKey)
pemBytes, _ := ioutil.ReadAll(r)
block, _ := pem.Decode(pemBytes)
if block == nil {
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error decoding PEM block containing private key")
return
} else {
// Parse private key
b := block.Bytes
key, err := x509.ParsePKCS8PrivateKey(b)
if err != nil {
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error parsing private key: " + err.Error())
return
} else {
// Conver key to *rsa.PrivateKey
rsaKey, _ := key.(*rsa.PrivateKey)
// Get encrypted key from message, delimited by :::
encryptedKey := strings.Split(encryptedValue, ":::")[0] // encrypted with RSA
encryptedValue := strings.Split(encryptedValue, ":::")[1] // encrypted with AES
// Convert encrypted value to []byte
decryptedKey, err := encryption.DecryptWithPrivateKey(encryptedKey, rsaKey)
if decryptedKey != nil {
if string(decryptedKey) == symmetricKey {
// Decrypt value with decryptedKey
data, err := base64.StdEncoding.DecodeString(encryptedValue)
if err != nil {
return
}
decryptedValue, err := encryption.AesDecrypt(data, string(decryptedKey))
if err != nil {
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error decrypting message: " + err.Error())
return
}
json.Unmarshal(decryptedValue, &payload)
} else {
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error decrypting message, assymetric keys do not match.")
return
}
} else if err != nil {
log.Log.Error("routers.mqtt.main.MQTTListenerHandler(): error decrypting message: " + err.Error())
return
}
}
}
}
} else {
payload = message.Payload
}
// We'll find out which message we received, and act accordingly.
log.Log.Info("routers.mqtt.main.MQTTListenerHandler(): received message with action: " + payload.Action)
switch payload.Action {
case "record":
go HandleRecording(mqttClient, hubKey, payload, configuration, communication)
case "get-audio-backchannel":
go HandleAudio(mqttClient, hubKey, payload, configuration, communication)
case "get-ptz-position":
go HandleGetPTZPosition(mqttClient, hubKey, payload, configuration, communication)
case "update-ptz-position":
go HandleUpdatePTZPosition(mqttClient, hubKey, payload, configuration, communication)
case "navigate-ptz":
go HandleNavigatePTZ(mqttClient, hubKey, payload, configuration, communication)
case "request-config":
go HandleRequestConfig(mqttClient, hubKey, payload, configuration, communication)
case "update-config":
go HandleUpdateConfig(mqttClient, hubKey, payload, configDirectory, configuration, communication)
case "request-sd-stream":
go HandleRequestSDStream(mqttClient, hubKey, payload, configuration, communication)
case "request-hd-stream":
go HandleRequestHDStream(mqttClient, hubKey, payload, configuration, communication)
case "receive-hd-candidates":
go HandleReceiveHDCandidates(mqttClient, hubKey, payload, configuration, communication)
case "trigger-relay":
go HandleTriggerRelay(mqttClient, hubKey, payload, configuration, communication)
}
}
})
}
}
func DisconnectMQTT(mqttClient mqtt.Client) {
mqttClient.Disconnect(1000)
func HandleRecording(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to RecordPayload
jsonData, _ := json.Marshal(value)
var recordPayload models.RecordPayload
json.Unmarshal(jsonData, &recordPayload)
if recordPayload.Timestamp != 0 {
motionDataPartial := models.MotionDataPartial{
Timestamp: recordPayload.Timestamp,
}
communication.HandleMotion <- motionDataPartial
}
}
func HandleAudio(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to AudioPayload
jsonData, _ := json.Marshal(value)
var audioPayload models.AudioPayload
json.Unmarshal(jsonData, &audioPayload)
if audioPayload.Timestamp != 0 {
audioDataPartial := models.AudioDataPartial{
Timestamp: audioPayload.Timestamp,
Data: audioPayload.Data,
}
communication.HandleAudio <- audioDataPartial
}
}
func HandleGetPTZPosition(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to PTZPositionPayload
jsonData, _ := json.Marshal(value)
var positionPayload models.PTZPositionPayload
json.Unmarshal(jsonData, &positionPayload)
if positionPayload.Timestamp != 0 {
// Get Position from device
pos, err := onvif.GetPositionFromDevice(*configuration)
if err != nil {
log.Log.Error("routers.mqtt.main.HandlePTZPosition(): error getting position from device: " + err.Error())
} else {
// Needs to wrapped!
posString := fmt.Sprintf("%f,%f,%f", pos.PanTilt.X, pos.PanTilt.Y, pos.Zoom.X)
message := models.Message{
Payload: models.Payload{
Action: "ptz-position",
DeviceId: configuration.Config.Key,
Value: map[string]interface{}{
"timestamp": positionPayload.Timestamp,
"position": posString,
},
},
}
payload, err := models.PackageMQTTMessage(configuration, message)
if err == nil {
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
} else {
log.Log.Info("routers.mqtt.main.HandlePTZPosition(): something went wrong while sending position to hub: " + string(payload))
}
}
}
}
func HandleUpdatePTZPosition(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to PTZPositionPayload
jsonData, _ := json.Marshal(value)
var onvifAction models.OnvifAction
json.Unmarshal(jsonData, &onvifAction)
if onvifAction.Action != "" {
if communication.CameraConnected {
communication.HandleONVIF <- onvifAction
log.Log.Info("routers.mqtt.main.MQTTListenerHandleONVIF(): Received an action - " + onvifAction.Action)
} else {
log.Log.Info("routers.mqtt.main.MQTTListenerHandleONVIF(): received action, but camera is not connected.")
}
}
}
func HandleRequestConfig(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to RequestConfigPayload
jsonData, _ := json.Marshal(value)
var configPayload models.RequestConfigPayload
json.Unmarshal(jsonData, &configPayload)
if configPayload.Timestamp != 0 {
// Get Config from the device
key := configuration.Config.Key
name := configuration.Config.Name
if key != "" && name != "" {
// Copy the config, as we don't want to share the encryption part.
deepCopy := configuration.Config
var configMap map[string]interface{}
inrec, _ := json.Marshal(deepCopy)
json.Unmarshal(inrec, &configMap)
// Unset encryption part.
delete(configMap, "encryption")
message := models.Message{
Payload: models.Payload{
Action: "receive-config",
DeviceId: configuration.Config.Key,
Value: configMap,
},
}
payload, err := models.PackageMQTTMessage(configuration, message)
if err == nil {
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
} else {
log.Log.Info("routers.mqtt.main.HandleRequestConfig(): something went wrong while sending config to hub: " + string(payload))
}
} else {
log.Log.Info("routers.mqtt.main.HandleRequestConfig(): no config available")
}
log.Log.Info("routers.mqtt.main.HandleRequestConfig(): Received a request for the config")
}
}
func HandleUpdateConfig(mqttClient mqtt.Client, hubKey string, payload models.Payload, configDirectory string, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to UpdateConfigPayload
jsonData, _ := json.Marshal(value)
var configPayload models.UpdateConfigPayload
json.Unmarshal(jsonData, &configPayload)
if configPayload.Timestamp != 0 {
config := configPayload.Config
// Make sure to remove Encryption part, as we don't want to save it.
config.Encryption = configuration.Config.Encryption
err := configService.SaveConfig(configDirectory, config, configuration, communication)
if err == nil {
log.Log.Info("routers.mqtt.main.HandleUpdateConfig(): Config updated")
message := models.Message{
Payload: models.Payload{
Action: "acknowledge-update-config",
DeviceId: configuration.Config.Key,
},
}
payload, err := models.PackageMQTTMessage(configuration, message)
if err == nil {
mqttClient.Publish("kerberos/hub/"+hubKey, 0, false, payload)
} else {
log.Log.Info("routers.mqtt.main.HandleUpdateConfig(): something went wrong while sending acknowledge config to hub: " + string(payload))
}
} else {
log.Log.Info("routers.mqtt.main.HandleUpdateConfig(): Config update failed")
}
}
}
func HandleRequestSDStream(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to RequestSDStreamPayload
jsonData, _ := json.Marshal(value)
var requestSDStreamPayload models.RequestSDStreamPayload
json.Unmarshal(jsonData, &requestSDStreamPayload)
if requestSDStreamPayload.Timestamp != 0 {
if communication.CameraConnected {
select {
case communication.HandleLiveSD <- time.Now().Unix():
default:
}
log.Log.Info("routers.mqtt.main.HandleRequestSDStream(): received request to livestream.")
} else {
log.Log.Info("routers.mqtt.main.HandleRequestSDStream(): received request to livestream, but camera is not connected.")
}
}
}
func HandleRequestHDStream(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to RequestHDStreamPayload
jsonData, _ := json.Marshal(value)
var requestHDStreamPayload models.RequestHDStreamPayload
json.Unmarshal(jsonData, &requestHDStreamPayload)
if requestHDStreamPayload.Timestamp != 0 {
if communication.CameraConnected {
// Set the Hub key, so we can send back the answer.
requestHDStreamPayload.HubKey = hubKey
select {
case communication.HandleLiveHDHandshake <- requestHDStreamPayload:
default:
}
log.Log.Info("routers.mqtt.main.HandleRequestHDStream(): received request to setup webrtc.")
} else {
log.Log.Info("routers.mqtt.main.HandleRequestHDStream(): received request to setup webrtc, but camera is not connected.")
}
}
}
func HandleReceiveHDCandidates(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
// Convert map[string]interface{} to ReceiveHDCandidatesPayload
jsonData, _ := json.Marshal(value)
var receiveHDCandidatesPayload models.ReceiveHDCandidatesPayload
json.Unmarshal(jsonData, &receiveHDCandidatesPayload)
if receiveHDCandidatesPayload.Timestamp != 0 {
if communication.CameraConnected {
// Register candidate channel
key := configuration.Config.Key + "/" + receiveHDCandidatesPayload.SessionID
go webrtc.RegisterCandidates(key, receiveHDCandidatesPayload)
} else {
log.Log.Info("routers.mqtt.main.HandleReceiveHDCandidates(): received candidate, but camera is not connected.")
}
}
}
func HandleNavigatePTZ(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
jsonData, _ := json.Marshal(value)
var navigatePTZPayload models.NavigatePTZPayload
json.Unmarshal(jsonData, &navigatePTZPayload)
if navigatePTZPayload.Timestamp != 0 {
if communication.CameraConnected {
action := navigatePTZPayload.Action
var onvifAction models.OnvifAction
json.Unmarshal([]byte(action), &onvifAction)
communication.HandleONVIF <- onvifAction
log.Log.Info("routers.mqtt.main.HandleNavigatePTZ(): Received an action - " + onvifAction.Action)
} else {
log.Log.Info("routers.mqtt.main.HandleNavigatePTZ(): received action, but camera is not connected.")
}
}
}
func HandleTriggerRelay(mqttClient mqtt.Client, hubKey string, payload models.Payload, configuration *models.Configuration, communication *models.Communication) {
value := payload.Value
jsonData, _ := json.Marshal(value)
var triggerRelayPayload models.TriggerRelay
json.Unmarshal(jsonData, &triggerRelayPayload)
if triggerRelayPayload.Timestamp != 0 {
if communication.CameraConnected {
// Get token (name of relay)
token := triggerRelayPayload.Token
// Connect to Onvif device
cameraConfiguration := configuration.Config.Capture.IPCamera
device, _, err := onvif.ConnectToOnvifDevice(&cameraConfiguration)
if err == nil {
// Trigger relay output
err := onvif.TriggerRelayOutput(device, token)
if err != nil {
log.Log.Error("routers.mqtt.main.HandleTriggerRelay(): error triggering relay: " + err.Error())
} else {
log.Log.Info("routers.mqtt.main.HandleTriggerRelay(): trigger (" + token + ") relay output.")
}
} else {
log.Log.Error("routers.mqtt.main.HandleTriggerRelay(): error connecting to device: " + err.Error())
}
} else {
log.Log.Info("routers.mqtt.main.HandleTriggerRelay(): received trigger, but camera is not connected.")
}
}
}
func DisconnectMQTT(mqttClient mqtt.Client, config *models.Config) {
if mqttClient != nil {
// Cleanup all subscriptions
// New methods
mqttClient.Unsubscribe("kerberos/agent/" + PREV_HubKey)
mqttClient.Disconnect(1000)
mqttClient = nil
log.Log.Info("routers.mqtt.main.DisconnectMQTT(): MQTT client disconnected.")
}
}

View File

@@ -0,0 +1,200 @@
package websocket
import (
"context"
"encoding/base64"
"image"
"net/http"
"sync"
"github.com/gin-gonic/gin"
"github.com/gorilla/websocket"
"github.com/kerberos-io/agent/machinery/src/capture"
"github.com/kerberos-io/agent/machinery/src/log"
"github.com/kerberos-io/agent/machinery/src/models"
"github.com/kerberos-io/agent/machinery/src/packets"
"github.com/kerberos-io/agent/machinery/src/utils"
)
type Message struct {
ClientID string `json:"client_id" bson:"client_id"`
MessageType string `json:"message_type" bson:"message_type"`
Message map[string]string `json:"message" bson:"message"`
}
type Connection struct {
Socket *websocket.Conn
mu sync.Mutex
Cancels map[string]context.CancelFunc
}
// Concurrency handling - sending messages
func (c *Connection) WriteJson(message Message) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.Socket.WriteJSON(message)
}
func (c *Connection) WriteMessage(bytes []byte) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.Socket.WriteMessage(websocket.TextMessage, bytes)
}
var sockets = make(map[string]*Connection)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
}
func WebsocketHandler(c *gin.Context, communication *models.Communication, captureDevice *capture.Capture) {
w := c.Writer
r := c.Request
conn, err := upgrader.Upgrade(w, r, nil)
// error handling here
if err == nil {
defer conn.Close()
var message Message
err = conn.ReadJSON(&message)
if err != nil {
log.Log.Error("routers.websocket.main.WebsocketHandler(): " + err.Error())
return
}
clientID := message.ClientID
if sockets[clientID] == nil {
connection := new(Connection)
connection.Socket = conn
sockets[clientID] = connection
sockets[clientID].Cancels = make(map[string]context.CancelFunc)
log.Log.Info("routers.websocket.main.WebsocketHandler(): " + clientID + ": connected.")
}
// Continuously read messages
for {
switch message.MessageType {
case "hello":
m := message.Message
bePolite := Message{
ClientID: clientID,
MessageType: "hello-back",
Message: map[string]string{
"message": "Hello " + m["client_id"] + "!",
},
}
sockets[clientID].WriteJson(bePolite)
case "stop-sd":
_, exists := sockets[clientID].Cancels["stream-sd"]
if exists {
sockets[clientID].Cancels["stream-sd"]()
} else {
log.Log.Error("routers.websocket.main.WebsocketHandler(): streaming sd does not exists for " + clientID)
}
case "stream-sd":
if communication.CameraConnected {
_, exists := sockets[clientID].Cancels["stream-sd"]
if exists {
log.Log.Debug("routers.websocket.main.WebsocketHandler(): already streaming sd for " + clientID)
} else {
startStream := Message{
ClientID: clientID,
MessageType: "stream-sd",
Message: map[string]string{
"message": "Start streaming low resolution",
},
}
sockets[clientID].WriteJson(startStream)
ctx, cancel := context.WithCancel(context.Background())
sockets[clientID].Cancels["stream-sd"] = cancel
go ForwardSDStream(ctx, clientID, sockets[clientID], communication, captureDevice)
}
}
}
err = conn.ReadJSON(&message)
if err != nil {
break
}
}
// If clientID is in sockets
_, exists := sockets[clientID]
if exists {
delete(sockets, clientID)
log.Log.Info("routers.websocket.main.WebsocketHandler(): " + clientID + ": terminated and disconnected websocket connection.")
}
}
}
func ForwardSDStream(ctx context.Context, clientID string, connection *Connection, communication *models.Communication, captureDevice *capture.Capture) {
var queue *packets.Queue
var cursor *packets.QueueCursor
// We'll pick the right client and decoder.
rtspClient := captureDevice.RTSPSubClient
if rtspClient != nil {
queue = communication.SubQueue
cursor = queue.Latest()
} else {
rtspClient = captureDevice.RTSPClient
queue = communication.Queue
cursor = queue.Latest()
}
logreader:
for {
var encodedImage string
if queue != nil && cursor != nil && rtspClient != nil {
pkt, err := cursor.ReadPacket()
if err == nil {
if !pkt.IsKeyFrame {
continue
}
var img image.YCbCr
img, err = (*rtspClient).DecodePacket(pkt)
if err == nil {
bytes, _ := utils.ImageToBytes(&img)
encodedImage = base64.StdEncoding.EncodeToString(bytes)
}
} else {
log.Log.Error("routers.websocket.main.ForwardSDStream():" + err.Error())
break logreader
}
}
startStrean := Message{
ClientID: clientID,
MessageType: "image",
Message: map[string]string{
"base64": encodedImage,
},
}
err := connection.WriteJson(startStrean)
if err != nil {
log.Log.Error("routers.websocket.main.ForwardSDStream():" + err.Error())
break logreader
}
select {
case <-ctx.Done():
break logreader
default:
}
}
// Close socket for streaming
_, exists := connection.Cancels["stream-sd"]
if exists {
delete(connection.Cancels, "stream-sd")
} else {
log.Log.Error("routers.websocket.main.ForwardSDStream(): streaming sd does not exists for " + clientID)
}
// Send stop streaming message
log.Log.Info("routers.websocket.main.ForwardSDStream(): stop sending streaming over websocket")
}

Some files were not shown because too many files have changed in this diff Show More