mirror of
https://github.com/outbackdingo/nDPId.git
synced 2026-01-28 02:19:37 +00:00
Compare commits
258 Commits
add/tls-pr
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d629fda779 | ||
|
|
643aa49d34 | ||
|
|
8dfaa7c86c | ||
|
|
59caa5231e | ||
|
|
9c0f5141bc | ||
|
|
e8ef267e0a | ||
|
|
2651833c58 | ||
|
|
bd7df393fe | ||
|
|
88cfecdf95 | ||
|
|
a91aab493c | ||
|
|
fe42e998d0 | ||
|
|
22e44c1e0b | ||
|
|
d8cad33a70 | ||
|
|
37989db0bb | ||
|
|
19f80ba163 | ||
|
|
c8c58e0b16 | ||
|
|
6d3dc99fad | ||
|
|
b8d3cf9e8f | ||
|
|
510b03cbcd | ||
|
|
66aca303b6 | ||
|
|
0e7e5216d8 | ||
|
|
7ab7bb3772 | ||
|
|
a47bc9caa3 | ||
|
|
7d94632811 | ||
|
|
2c81f116bf | ||
|
|
49b058d2d3 | ||
|
|
fea52d98ca | ||
|
|
02b686241e | ||
|
|
2cb0d7941b | ||
|
|
97e60ad7ec | ||
|
|
eea5a49638 | ||
|
|
a9934e9c9e | ||
|
|
644fa2dfb3 | ||
|
|
1a6b1feda9 | ||
|
|
648dedc7ba | ||
|
|
19036951c7 | ||
|
|
4e7e361d84 | ||
|
|
9809ae4ea0 | ||
|
|
97387d0f1c | ||
|
|
46ef266139 | ||
|
|
ae6864d4e4 | ||
|
|
f3c8ffe6c1 | ||
|
|
07d6018109 | ||
|
|
dd909adeb8 | ||
|
|
8848420a72 | ||
|
|
f8181d7f6a | ||
|
|
b747255a5d | ||
|
|
a52a37ef78 | ||
|
|
ae95c95617 | ||
|
|
42c54d3755 | ||
|
|
bb870cb98f | ||
|
|
e262227d65 | ||
|
|
899e5a80d6 | ||
|
|
053818b242 | ||
|
|
4048a8c300 | ||
|
|
09b246dbfa | ||
|
|
471ea83493 | ||
|
|
064bd3aefa | ||
|
|
acd9e871b6 | ||
|
|
b9465c09d8 | ||
|
|
3a4b7b0860 | ||
|
|
34f01b90e3 | ||
|
|
7b91ad8458 | ||
|
|
442900bc14 | ||
|
|
0a4f3cb0c8 | ||
|
|
4bed2a791f | ||
|
|
1aa7d9bdb6 | ||
|
|
bd269c9ead | ||
|
|
7e4c69635a | ||
|
|
9105b393e1 | ||
|
|
9efdecf4ef | ||
|
|
8c114e4916 | ||
|
|
a733d536ad | ||
|
|
9fc35e7a7e | ||
|
|
ce9752af16 | ||
|
|
f7933d0fdb | ||
|
|
d5a84ce630 | ||
|
|
ce5f448d3b | ||
|
|
2b48eb0514 | ||
|
|
ddc96ba614 | ||
|
|
7b2cd268bf | ||
|
|
817559ffa7 | ||
|
|
25944e2089 | ||
|
|
5423797267 | ||
|
|
7e126c205e | ||
|
|
7d58703bdb | ||
|
|
ae36f8df6c | ||
|
|
8c5ee1f7bb | ||
|
|
9969f955dc | ||
|
|
2c771c54b0 | ||
|
|
fb1dcc71de | ||
|
|
071a9bcb91 | ||
|
|
9a14454d3c | ||
|
|
f9d9849300 | ||
|
|
efed6f196e | ||
|
|
3e2ce661f0 | ||
|
|
76e1ea0598 | ||
|
|
0e792ba301 | ||
|
|
9ef17b7bd8 | ||
|
|
1c9aa85485 | ||
|
|
aef9d629f0 | ||
|
|
f97b3880b6 | ||
|
|
c55429c131 | ||
|
|
7bebd7b2c7 | ||
|
|
335708d3e3 | ||
|
|
2a0161c1bb | ||
|
|
adb8fe96f5 | ||
|
|
4efe7e43a2 | ||
|
|
5e4005162b | ||
|
|
a230eaf061 | ||
|
|
68e0c1f280 | ||
|
|
8271f15e25 | ||
|
|
f6f3a4daab | ||
|
|
762e6d36bf | ||
|
|
930aaf9276 | ||
|
|
165b18c829 | ||
|
|
1fbfd46fe8 | ||
|
|
5290f76b5f | ||
|
|
f4d0f80711 | ||
|
|
187ebeb4df | ||
|
|
71d2fcc491 | ||
|
|
86aaf0e808 | ||
|
|
e822bb6145 | ||
|
|
4c91038274 | ||
|
|
53126a0af9 | ||
|
|
15608bb571 | ||
|
|
e93a4c9a81 | ||
|
|
b46f15de03 | ||
|
|
c7eace426c | ||
|
|
33560d64d2 | ||
|
|
675640b0e6 | ||
|
|
5e5f268b3c | ||
|
|
7ef7667da3 | ||
|
|
d43a3d1436 | ||
|
|
b6e4162116 | ||
|
|
717d66b0e7 | ||
|
|
791b27219d | ||
|
|
a487e53015 | ||
|
|
aeb6e6f536 | ||
|
|
8af37b3770 | ||
|
|
8949ba39e6 | ||
|
|
ea968180a2 | ||
|
|
556025b34d | ||
|
|
feb2583ef6 | ||
|
|
7368f222db | ||
|
|
a007a907da | ||
|
|
876aef98e1 | ||
|
|
88cf57a16f | ||
|
|
7e81f5b1b7 | ||
|
|
8acf2d7273 | ||
|
|
71d933b0cd | ||
|
|
fbe07fd882 | ||
|
|
5432b06665 | ||
|
|
142a435bf6 | ||
|
|
f5c5bc88a7 | ||
|
|
53d8a28582 | ||
|
|
37f3770e3e | ||
|
|
7368d34d8d | ||
|
|
ff77bab398 | ||
|
|
d274a06176 | ||
|
|
a5dcc17396 | ||
|
|
3416db11dc | ||
|
|
830174c7b5 | ||
|
|
bb9f02719d | ||
|
|
f38f1ec37f | ||
|
|
fa7e76cc75 | ||
|
|
b0c343a795 | ||
|
|
d5266b7f44 | ||
|
|
82934b7271 | ||
|
|
4920b2a4be | ||
|
|
8ebaccc27d | ||
|
|
dcb595e161 | ||
|
|
b667f9e1da | ||
|
|
55c8a848d3 | ||
|
|
d80ea84d2e | ||
|
|
b1e679b0bb | ||
|
|
949fc0c35e | ||
|
|
5d56288a11 | ||
|
|
84b12cd02c | ||
|
|
93498fff02 | ||
|
|
1b67927169 | ||
|
|
17c21e1d27 | ||
|
|
5fb706e9a6 | ||
|
|
5335d84fe5 | ||
|
|
32ab500eb0 | ||
|
|
e124f2d660 | ||
|
|
6ff8982ffb | ||
|
|
315dc32baf | ||
|
|
3d0c06ef54 | ||
|
|
8dca2b546a | ||
|
|
e134eef5bb | ||
|
|
d29efd4d7c | ||
|
|
44adfc0b7d | ||
|
|
dfd0449306 | ||
|
|
07f2c2d9cc | ||
|
|
73b8c378f2 | ||
|
|
a0e0611c56 | ||
|
|
7f8e01d442 | ||
|
|
835a7bafb1 | ||
|
|
a7ac83385b | ||
|
|
0a0342ce28 | ||
|
|
7515c1b072 | ||
|
|
be07c16c0e | ||
|
|
e42e3fe406 | ||
|
|
96b0a8a474 | ||
|
|
091fd4d116 | ||
|
|
dfb8d3379f | ||
|
|
a7bd3570b0 | ||
|
|
b01498f011 | ||
|
|
cc60e819e8 | ||
|
|
5234f4621b | ||
|
|
86ac09a8db | ||
|
|
4b3031245d | ||
|
|
2b881d56e7 | ||
|
|
dd4357c238 | ||
|
|
7b15838696 | ||
|
|
0e31829401 | ||
|
|
d9f304e4b0 | ||
|
|
ebb439d959 | ||
|
|
79834df457 | ||
|
|
4b923bdf44 | ||
|
|
ba8236c1f7 | ||
|
|
d915530feb | ||
|
|
7bd8081cd2 | ||
|
|
bc0a5782cc | ||
|
|
8a8de12fb3 | ||
|
|
c57ace2fd3 | ||
|
|
344934b7d9 | ||
|
|
22ba5d5103 | ||
|
|
7217b90cd1 | ||
|
|
74a9f7d86b | ||
|
|
57d8dda350 | ||
|
|
425617abdf | ||
|
|
92b3c76446 | ||
|
|
967381a599 | ||
|
|
d107560049 | ||
|
|
c8ec505b9c | ||
|
|
2b1db0a556 | ||
|
|
d8c20d37e5 | ||
|
|
5a9b40779d | ||
|
|
d0c070a800 | ||
|
|
8a936a5072 | ||
|
|
c9514136b7 | ||
|
|
a4e5bab9b2 | ||
|
|
b76a0c4607 | ||
|
|
c9da8b0fd9 | ||
|
|
ca355b1fdb | ||
|
|
99accd03a2 | ||
|
|
225f4b3fb6 | ||
|
|
a8d46ef343 | ||
|
|
aafc72a44b | ||
|
|
0a959993bc | ||
|
|
595bd5c5e3 | ||
|
|
4236aafa0d | ||
|
|
23816f1403 | ||
|
|
42aad33ec8 | ||
|
|
c71284291e | ||
|
|
58439a6761 |
19
.circleci/config.yml
Normal file
19
.circleci/config.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
version: 2.1
|
||||
|
||||
jobs:
|
||||
build:
|
||||
docker:
|
||||
- image: ubuntu:latest
|
||||
steps:
|
||||
- checkout
|
||||
- run: export DEBIAN_FRONTEND=noninteractive
|
||||
- run: apt-get update -qq
|
||||
- run: |
|
||||
env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install -y -qq \
|
||||
coreutils wget git unzip make cmake binutils gcc g++ autoconf automake flex bison texinfo \
|
||||
libtool pkg-config gettext libjson-c-dev flex bison libpcap-dev zlib1g-dev
|
||||
- run: |
|
||||
cmake -S . -B build -DENABLE_SYSTEMD=ON -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON
|
||||
- run: |
|
||||
cmake --build build --verbose
|
||||
38
.github/workflows/build-archlinux.yml
vendored
Normal file
38
.github/workflows/build-archlinux.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: ArchLinux PKGBUILD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- tmp
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
types: [opened, synchronize, reopened]
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
CMAKE_C_FLAGS: -Werror
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
- name: Prepare for ArchLinux packaging
|
||||
run: |
|
||||
sudo chmod -R 0777 .
|
||||
mv -v packages/archlinux packages/ndpid-testing
|
||||
- uses: 2m/arch-pkgbuild-builder@v1.16
|
||||
with:
|
||||
debug: true
|
||||
target: 'pkgbuild'
|
||||
pkgname: 'packages/ndpid-testing'
|
||||
- name: Upload PKG
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nDPId-archlinux-packages
|
||||
path: packages/ndpid-testing/*.pkg.tar.zst
|
||||
59
.github/workflows/build-centos.yml
vendored
Normal file
59
.github/workflows/build-centos.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: CentOs
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- tmp
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
types: [opened, synchronize, reopened]
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
centos8:
|
||||
runs-on: ubuntu-latest
|
||||
container: 'centos:8'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
- name: Install CentOs Prerequisites
|
||||
run: |
|
||||
sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*
|
||||
sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
|
||||
yum -y update
|
||||
yum -y install curl gpg
|
||||
curl 'https://packages.ntop.org/centos/ntop.repo' > /etc/yum.repos.d/ntop.repo
|
||||
curl 'https://packages.ntop.org/centos/RPM-GPG-KEY-deri' | gpg --import
|
||||
yum -y install yum-utils dnf-plugins-core epel-release
|
||||
dnf config-manager --set-enabled powertools
|
||||
yum -y update
|
||||
yum -y install rpm-build gcc gcc-c++ autoconf automake make cmake flex bison gettext pkg-config libtool ndpi-dev libpcap-devel zlib-devel python3.8 git wget unzip /usr/lib64/libasan.so.5.0.0 /usr/lib64/libubsan.so.1.0.0
|
||||
repoquery -l ndpi-dev
|
||||
- name: Configure nDPId
|
||||
run: |
|
||||
mkdir build && cd build
|
||||
cmake .. -DENABLE_SYSTEMD=ON -DBUILD_EXAMPLES=ON -DENABLE_SANITIZER=ON -DNDPI_NO_PKGCONFIG=ON -DSTATIC_LIBNDPI_INSTALLDIR=/usr
|
||||
- name: Build nDPId
|
||||
run: |
|
||||
make -C build all VERBOSE=1
|
||||
- name: CPack RPM
|
||||
run: |
|
||||
cd ./build && cpack -G RPM && cd ..
|
||||
- name: Upload RPM
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nDPId-centos-packages
|
||||
path: build/*.rpm
|
||||
- name: Upload on Failure
|
||||
uses: actions/upload-artifact@v4
|
||||
if: failure()
|
||||
with:
|
||||
name: autoconf-config-log
|
||||
path: |
|
||||
build/CMakeCache.txt
|
||||
libnDPI/config.log
|
||||
25
.github/workflows/build-docker.yml
vendored
Normal file
25
.github/workflows/build-docker.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Docker Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
push: true
|
||||
tags: utoni/ndpid:latest
|
||||
39
.github/workflows/build-freebsd.yml
vendored
Normal file
39
.github/workflows/build-freebsd.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: FreeBSD Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At the end of every day
|
||||
- cron: '0 0 * * *'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- tmp
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
types: [opened, synchronize, reopened]
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
name: Build and Test
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Test in FreeBSD
|
||||
id: test
|
||||
uses: vmactions/freebsd-vm@main
|
||||
with:
|
||||
usesh: true
|
||||
prepare: |
|
||||
pkg install -y bash autoconf automake cmake gmake libtool gettext pkgconf gcc \
|
||||
git wget unzip flock \
|
||||
json-c flex bison libpcap curl openssl dbus
|
||||
run: |
|
||||
echo "Working Directory: $(pwd)"
|
||||
echo "User.............: $(whoami)"
|
||||
echo "FreeBSD Version..: $(freebsd-version)"
|
||||
# TODO: Make examples I/O event agnostic i.e. use nio
|
||||
cmake -S . -B build -DBUILD_NDPI=ON -DBUILD_EXAMPLES=OFF #-DENABLE_CURL=ON -DENABLE_DBUS=ON
|
||||
cmake --build build
|
||||
39
.github/workflows/build-openwrt.yml
vendored
39
.github/workflows/build-openwrt.yml
vendored
@@ -1,71 +1,54 @@
|
||||
name: OpenWrt Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At the end of every day
|
||||
- cron: '0 0 * * *'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- tmp
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
types: [opened, synchronize, reopened]
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: ${{ matrix.arch }} build
|
||||
name: ${{ matrix.arch }} ${{ matrix.target }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- arch: arc_archs
|
||||
target: archs38-generic
|
||||
|
||||
- arch: arm_cortex-a9_vfpv3-d16
|
||||
target: mvebu-cortexa9
|
||||
|
||||
- arch: mips_24kc
|
||||
target: ath79-generic
|
||||
|
||||
- arch: mipsel_24kc
|
||||
target: mt7621
|
||||
|
||||
- arch: powerpc_464fp
|
||||
target: apm821xx-nand
|
||||
|
||||
- arch: powerpc_8540
|
||||
target: mpc85xx-p1010
|
||||
|
||||
- arch: aarch64_cortex-a53
|
||||
target: mvebu-cortexa53
|
||||
|
||||
- arch: arm_cortex-a15_neon-vfpv4
|
||||
target: armvirt-32
|
||||
|
||||
- arch: i386_pentium-mmx
|
||||
target: x86-geode
|
||||
|
||||
- arch: x86_64
|
||||
target: x86-64
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Build
|
||||
uses: openwrt/gh-action-sdk@master
|
||||
uses: openwrt/gh-action-sdk@main
|
||||
env:
|
||||
ARCH: ${{ matrix.arch }}
|
||||
ARCH: ${{ matrix.arch }}-snapshot
|
||||
FEED_DIR: ${{ github.workspace }}/packages/openwrt
|
||||
FEEDNAME: ndpid_openwrt_packages_ci
|
||||
PACKAGES: nDPId-testing
|
||||
V: s
|
||||
|
||||
- name: Store packages
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.arch}}-packages
|
||||
name: nDPId-${{ matrix.arch}}-${{ matrix.target }}
|
||||
path: bin/packages/${{ matrix.arch }}/ndpid_openwrt_packages_ci/*.ipk
|
||||
|
||||
45
.github/workflows/build-rpm.yml
vendored
Normal file
45
.github/workflows/build-rpm.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: RPM Build
|
||||
on:
|
||||
schedule:
|
||||
# At the end of every day
|
||||
- cron: '0 0 * * *'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- tmp
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
types: [opened, synchronize, reopened]
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Ubuntu Prerequisites
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install fakeroot alien autoconf automake cmake libtool pkg-config gettext libjson-c-dev flex bison libpcap-dev zlib1g-dev libcurl4-openssl-dev libdbus-1-dev
|
||||
|
||||
- name: Build RPM package
|
||||
run: |
|
||||
cmake -S . -B build-rpm -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON -DCMAKE_BUILD_TYPE=Release
|
||||
cmake --build build-rpm --parallel
|
||||
cd build-rpm
|
||||
cpack -G RPM
|
||||
cd ..
|
||||
|
||||
- name: Convert/Install RPM package
|
||||
run: |
|
||||
fakeroot alien --scripts --to-deb --verbose ./build-rpm/nDPId-*.rpm
|
||||
sudo dpkg -i ./ndpid_*.deb
|
||||
|
||||
- name: Upload RPM
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nDPId-rpm-packages
|
||||
path: build-rpm/*.rpm
|
||||
347
.github/workflows/build.yml
vendored
347
.github/workflows/build.yml
vendored
@@ -1,6 +1,9 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# At the end of every day
|
||||
- cron: '0 0 * * *'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
@@ -14,51 +17,210 @@ on:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: ${{ matrix.os }} ${{ matrix.gcrypt }}
|
||||
name: ${{ matrix.os }} ${{ matrix.compiler }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
CMAKE_C_COMPILER: ${{ matrix.compiler }}
|
||||
CMAKE_C_FLAGS: -Werror
|
||||
CMAKE_C_FLAGS: -Werror ${{ matrix.cflags }}
|
||||
CMAKE_C_EXE_LINKER_FLAGS: ${{ matrix.ldflags }}
|
||||
CMAKE_MODULE_LINKER_FLAGS: ${{ matrix.ldflags }}
|
||||
DYLD_LIBRARY_PATH: /usr/local/lib
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
os: ["ubuntu-latest", "ubuntu-18.04"]
|
||||
ndpid_gcrypt: ["-DNDPI_WITH_GCRYPT=OFF", "-DNDPI_WITH_GCRYPT=ON"]
|
||||
ndpid_zlib: ["-DENABLE_ZLIB=OFF", "-DENABLE_ZLIB=ON"]
|
||||
ndpi_min_version: ["4.5"]
|
||||
include:
|
||||
- compiler: "default-cc"
|
||||
- compiler: "gcc"
|
||||
os: "ubuntu-latest"
|
||||
ndpi_build: "-DBUILD_NDPI=ON"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=ON"
|
||||
ndpid_rust_examples: "-DBUILD_RUST_EXAMPLES=ON"
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=OFF"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=ON"
|
||||
ndpid_extras: "-DENABLE_CRYPTO=ON"
|
||||
sanitizer: "-DENABLE_SANITIZER=OFF -DENABLE_SANITIZER_THREAD=OFF"
|
||||
coverage: "-DENABLE_COVERAGE=OFF"
|
||||
poll: "-DFORCE_POLL=OFF"
|
||||
upload: true
|
||||
upload_suffix: ""
|
||||
ndpi_min_version: "5.0"
|
||||
- compiler: "gcc"
|
||||
os: "ubuntu-latest"
|
||||
ndpi_build: "-DBUILD_NDPI=ON"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=ON"
|
||||
ndpid_rust_examples: ""
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=ON"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=ON"
|
||||
ndpid_extras: "-DENABLE_CRYPTO=ON -DNDPI_WITH_MAXMINDDB=ON -DNDPI_WITH_PCRE=ON -DENABLE_MEMORY_PROFILING=ON"
|
||||
sanitizer: "-DENABLE_SANITIZER=OFF -DENABLE_SANITIZER_THREAD=OFF"
|
||||
coverage: "-DENABLE_COVERAGE=OFF"
|
||||
poll: "-DFORCE_POLL=OFF"
|
||||
upload: true
|
||||
upload_suffix: "-host-gcrypt"
|
||||
ndpi_min_version: "5.0"
|
||||
- compiler: "clang"
|
||||
os: "ubuntu-latest"
|
||||
ndpi_build: "-DBUILD_NDPI=ON"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=ON"
|
||||
ndpid_rust_examples: ""
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=OFF"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=OFF"
|
||||
ndpid_extras: ""
|
||||
sanitizer: "-DENABLE_SANITIZER=OFF -DENABLE_SANITIZER_THREAD=OFF"
|
||||
coverage: "-DENABLE_COVERAGE=OFF"
|
||||
poll: "-DFORCE_POLL=OFF"
|
||||
upload: true
|
||||
upload_suffix: "-no-zlib"
|
||||
ndpi_min_version: "5.0"
|
||||
- compiler: "gcc"
|
||||
os: "ubuntu-latest"
|
||||
ndpi_build: "-DBUILD_NDPI=ON"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=ON"
|
||||
ndpid_rust_examples: ""
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=OFF"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=ON"
|
||||
ndpid_extras: ""
|
||||
sanitizer: "-DENABLE_SANITIZER=ON"
|
||||
coverage: "-DENABLE_COVERAGE=ON"
|
||||
poll: "-DFORCE_POLL=ON"
|
||||
upload: false
|
||||
ndpi_min_version: "5.0"
|
||||
- compiler: "clang"
|
||||
os: "ubuntu-latest"
|
||||
ndpi_build: "-DBUILD_NDPI=ON"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=ON"
|
||||
ndpid_rust_examples: ""
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=OFF"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=ON"
|
||||
ndpid_extras: "-DENABLE_CRYPTO=ON"
|
||||
sanitizer: "-DENABLE_SANITIZER=ON"
|
||||
coverage: "-DENABLE_COVERAGE=OFF"
|
||||
poll: "-DFORCE_POLL=OFF"
|
||||
upload: false
|
||||
ndpi_min_version: "5.0"
|
||||
- compiler: "clang-12"
|
||||
os: "ubuntu-latest"
|
||||
os: "ubuntu-22.04"
|
||||
ndpi_build: "-DBUILD_NDPI=ON"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=ON"
|
||||
ndpid_rust_examples: ""
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=OFF"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=ON"
|
||||
ndpid_extras: ""
|
||||
sanitizer: "-DENABLE_SANITIZER_THREAD=ON"
|
||||
coverage: "-DENABLE_COVERAGE=OFF"
|
||||
poll:
|
||||
upload: false
|
||||
ndpi_min_version: "5.0"
|
||||
- compiler: "gcc-10"
|
||||
os: "ubuntu-latest"
|
||||
os: "ubuntu-22.04"
|
||||
ndpi_build: "-DBUILD_NDPI=ON"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=ON"
|
||||
ndpid_rust_examples: ""
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=OFF"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=OFF"
|
||||
ndpid_extras: ""
|
||||
sanitizer: "-DENABLE_SANITIZER=ON"
|
||||
- compiler: "gcc-7"
|
||||
os: "ubuntu-18.04"
|
||||
coverage: "-DENABLE_COVERAGE=OFF"
|
||||
poll: "-DFORCE_POLL=ON"
|
||||
upload: false
|
||||
ndpi_min_version: "5.0"
|
||||
- compiler: "gcc-9"
|
||||
os: "ubuntu-22.04"
|
||||
ndpi_build: "-DBUILD_NDPI=ON"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=ON"
|
||||
ndpid_rust_examples: ""
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=OFF"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=ON"
|
||||
ndpid_extras: ""
|
||||
sanitizer: "-DENABLE_SANITIZER=ON"
|
||||
coverage: "-DENABLE_COVERAGE=OFF"
|
||||
poll: "-DFORCE_POLL=OFF"
|
||||
upload: false
|
||||
ndpi_min_version: "5.0"
|
||||
- compiler: "cc"
|
||||
os: "macOS-13"
|
||||
ndpi_build: "-DBUILD_NDPI=OFF"
|
||||
ndpid_examples: "-DBUILD_EXAMPLES=OFF"
|
||||
ndpid_rust_examples: ""
|
||||
ndpid_gcrypt: "-DNDPI_WITH_GCRYPT=OFF"
|
||||
ndpid_zlib: "-DENABLE_ZLIB=ON"
|
||||
ndpid_extras: ""
|
||||
examples: "-DBUILD_EXAMPLES=OFF"
|
||||
sanitizer: "-DENABLE_SANITIZER=OFF"
|
||||
coverage: "-DENABLE_COVERAGE=OFF"
|
||||
poll:
|
||||
upload: false
|
||||
ndpi_min_version: "5.0"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Print Matrix
|
||||
run: |
|
||||
echo '----------------------------------------'
|
||||
echo '| OS.......: ${{ matrix.os }}'
|
||||
echo '| CC.......: ${{ matrix.compiler }}'
|
||||
echo "| CFLAGS...: $CMAKE_C_FLAGS"
|
||||
echo "| LDFLAGS..: $CMAKE_C_EXE_LINKER_FLAGS"
|
||||
echo '|---------------------------------------'
|
||||
echo '| nDPI min.: ${{ matrix.ndpi_min_version }}'
|
||||
echo '| GCRYPT...: ${{ matrix.ndpid_gcrypt }}'
|
||||
echo '| ZLIB.....: ${{ matrix.ndpid_zlib }}'
|
||||
echo '| Extras...: ${{ matrix.ndpid_extras }}'
|
||||
echo '| ForcePoll: ${{ matrix.poll }}'
|
||||
echo '|---------------------------------------'
|
||||
echo '| SANITIZER: ${{ matrix.sanitizer }}'
|
||||
echo '| COVERAGE.: ${{ matrix.coverage }}'
|
||||
echo '|---------------------------------------'
|
||||
echo '| UPLOAD...: ${{ matrix.upload }}'
|
||||
echo '----------------------------------------'
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
- name: Install MacOS Prerequisites
|
||||
if: startsWith(matrix.os, 'macOS')
|
||||
run: |
|
||||
brew install coreutils automake make unzip
|
||||
wget 'https://www.tcpdump.org/release/libpcap-1.10.4.tar.gz'
|
||||
tar -xzvf libpcap-1.10.4.tar.gz
|
||||
cd libpcap-1.10.4
|
||||
./configure && make install
|
||||
cd ..
|
||||
wget 'https://github.com/ntop/nDPI/archive/refs/heads/dev.zip' -O libndpi-dev.zip
|
||||
unzip libndpi-dev.zip
|
||||
cd nDPI-dev
|
||||
./autogen.sh
|
||||
./configure --prefix=/usr/local --with-only-libndpi && make install
|
||||
- name: Fix kernel mmap rnd bits on Ubuntu
|
||||
if: startsWith(matrix.os, 'ubuntu')
|
||||
run: |
|
||||
# Workaround for compatinility between latest kernel and sanitizer
|
||||
# See https://github.com/actions/runner-images/issues/9491
|
||||
sudo sysctl vm.mmap_rnd_bits=28
|
||||
- name: Install Ubuntu Prerequisites
|
||||
if: startsWith(matrix.os, 'ubuntu')
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install autoconf automake cmake libtool pkg-config gettext libjson-c-dev flex bison libpcap-dev zlib1g-dev
|
||||
sudo apt-get install autoconf automake cmake libtool pkg-config gettext libjson-c-dev flex bison libpcap-dev zlib1g-dev libcurl4-openssl-dev libdbus-1-dev
|
||||
sudo apt-get install ${{ matrix.compiler }} lcov iproute2
|
||||
- name: Install Ubuntu Prerequisites (Rust/Cargo)
|
||||
if: startsWith(matrix.os, 'ubuntu') && startsWith(matrix.ndpid_rust_examples, '-DBUILD_RUST_EXAMPLES=ON')
|
||||
run: |
|
||||
sudo apt-get install cargo
|
||||
- name: Install Ubuntu Prerequisites (libgcrypt)
|
||||
if: startsWith(matrix.os, 'ubuntu') && startsWith(matrix.ndpid_gcrypt, '-DNDPI_WITH_GCRYPT=ON')
|
||||
run: |
|
||||
sudo apt-get install libgcrypt20-dev
|
||||
- name: Install Ubuntu Prerequisities (zlib)
|
||||
- name: Install Ubuntu Prerequisites (zlib)
|
||||
if: startsWith(matrix.os, 'ubuntu') && startsWith(matrix.ndpid_zlib, '-DENABLE_ZLIB=ON')
|
||||
run: |
|
||||
sudo apt-get install zlib1g-dev
|
||||
- name: Install Ubuntu Prerequisites (libmaxminddb, libpcre2)
|
||||
if: startsWith(matrix.ndpid_extras, '-D')
|
||||
run: |
|
||||
sudo apt-get install libmaxminddb-dev libpcre2-dev
|
||||
- name: Install Ubuntu Prerequisites (libnl-genl-3-dev)
|
||||
if: startsWith(matrix.ndpi_build, '-DBUILD_NDPI=ON') && startsWith(matrix.coverage, '-DENABLE_COVERAGE=OFF') && startsWith(matrix.sanitizer, '-DENABLE_SANITIZER=ON') && startsWith(matrix.ndpid_gcrypt, '-DNDPI_WITH_GCRYPT=OFF') && startsWith(matrix.ndpid_zlib, '-DENABLE_ZLIB=ON')
|
||||
run: |
|
||||
sudo apt-get install libnl-genl-3-dev
|
||||
- name: Checking Network Buffer Size
|
||||
run: |
|
||||
C_VAL=$(cat config.h | sed -n 's/^#define\s\+NETWORK_BUFFER_MAX_SIZE\s\+\([0-9]\+\).*$/\1/gp')
|
||||
@@ -66,49 +228,182 @@ jobs:
|
||||
test ${C_VAL} = ${PY_VAL}
|
||||
- name: Configure nDPId
|
||||
run: |
|
||||
mkdir build && cd build
|
||||
cmake .. -DENABLE_SYSTEMD=ON -DENABLE_COVERAGE=ON -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON ${{ matrix.sanitizer }} ${{ matrix.ndpid_zlib }} ${{ matrix.ndpid_gcrypt }}
|
||||
cmake -S . -B build -Werror=dev -Werror=deprecated -DCMAKE_C_COMPILER="$CMAKE_C_COMPILER" -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" -DCMAKE_MODULE_LINKER_FLAGS="$CMAKE_MODULE_LINKER_FLAGS" -DCMAKE_C_EXE_LINKER_FLAGS="$CMAKE_C_EXE_LINKER_FLAGS" \
|
||||
-DENABLE_DBUS=ON -DENABLE_CURL=ON -DENABLE_SYSTEMD=ON \
|
||||
${{ matrix.poll }} ${{ matrix.coverage }} ${{ matrix.sanitizer }} ${{ matrix.ndpi_build }} \
|
||||
${{ matrix.ndpid_examples }} ${{ matrix.ndpid_rust_examples }} ${{ matrix.ndpid_zlib }} ${{ matrix.ndpid_gcrypt }} ${{ matrix.ndpid_extras }}
|
||||
- name: Build nDPId
|
||||
run: |
|
||||
make -C build all VERBOSE=1
|
||||
cmake --build build --verbose
|
||||
- name: Build single nDPId/nDPIsrvd executables (invoke CC directly - dynamic nDPI lib)
|
||||
if: startsWith(matrix.ndpi_build, '-DBUILD_NDPI=OFF') && startsWith(matrix.coverage, '-DENABLE_COVERAGE=OFF') && startsWith(matrix.ndpid_gcrypt, '-DNDPI_WITH_GCRYPT=OFF')
|
||||
run: |
|
||||
pkg-config --cflags --libs libndpi
|
||||
cc -Wall -Wextra -std=gnu99 \
|
||||
${{ matrix.poll }} -DENABLE_MEMORY_PROFILING=1 \
|
||||
nDPId.c nio.c utils.c \
|
||||
$(pkg-config --cflags libndpi) -I. -I./dependencies -I./dependencies/jsmn -I./dependencies/uthash/include \
|
||||
-o /tmp/a.out \
|
||||
-lpcap $(pkg-config --libs libndpi) -pthread -lm
|
||||
cc -Wall -Wextra -std=gnu99 \
|
||||
${{ matrix.poll }} -DENABLE_MEMORY_PROFILING=1 \
|
||||
nDPIsrvd.c nio.c utils.c \
|
||||
-I. -I./dependencies -I./dependencies/jsmn -I./dependencies/uthash/include \
|
||||
-o /tmp/a.out
|
||||
- name: Build single nDPId/nDPIsrvd executables (invoke CC directly - static nDPI lib)
|
||||
if: startsWith(matrix.ndpi_build, '-DBUILD_NDPI=ON') && startsWith(matrix.coverage, '-DENABLE_COVERAGE=OFF') && startsWith(matrix.sanitizer, '-DENABLE_SANITIZER=ON') && startsWith(matrix.ndpid_gcrypt, '-DNDPI_WITH_GCRYPT=OFF') && startsWith(matrix.ndpid_zlib, '-DENABLE_ZLIB=ON')
|
||||
run: |
|
||||
cc -Wall -Wextra -std=gnu99 ${{ matrix.poll }} -DENABLE_ZLIB=1 -DENABLE_MEMORY_PROFILING=1 \
|
||||
-fsanitize=address -fsanitize=undefined -fno-sanitize=alignment -fsanitize=enum -fsanitize=leak \
|
||||
nDPId.c nio.c utils.c \
|
||||
-I./build/libnDPI/include/ndpi -I. -I./dependencies -I./dependencies/jsmn -I./dependencies/uthash/include \
|
||||
-o /tmp/a.out \
|
||||
-lpcap ./build/libnDPI/lib/libndpi.a -pthread -lm -lz
|
||||
- name: Test EXEC
|
||||
run: |
|
||||
./build/nDPId-test || test $? -eq 1
|
||||
./build/nDPId-test
|
||||
./build/nDPId -h || test $? -eq 1
|
||||
./build/nDPIsrvd -h || test $? -eq 1
|
||||
- name: Test DIFF
|
||||
if: startsWith(matrix.os, 'ubuntu') && startsWith(matrix.ndpid_gcrypt, '-DNDPI_WITH_GCRYPT=OFF')
|
||||
if: startsWith(matrix.os, 'macOS') == false && startsWith(matrix.ndpid_gcrypt, '-DNDPI_WITH_GCRYPT=OFF')
|
||||
run: |
|
||||
./test/run_tests.sh ./libnDPI ./build/nDPId-test
|
||||
./test/run_config_tests.sh ./libnDPI ./build/nDPId-test
|
||||
- name: Daemon
|
||||
if: startsWith(matrix.compiler, 'gcc') || endsWith(matrix.compiler, 'clang')
|
||||
run: |
|
||||
make -C ./build daemon VERBOSE=1
|
||||
make -C ./build daemon VERBOSE=1
|
||||
- name: Coverage
|
||||
if: startsWith(matrix.coverage, '-DENABLE_COVERAGE=ON')
|
||||
run: |
|
||||
make -C ./build coverage
|
||||
- name: Dist
|
||||
if: startsWith(matrix.os, 'macOS') == false && matrix.upload == false
|
||||
run: |
|
||||
make -C ./build dist
|
||||
RAND_ID=$(( ( RANDOM ) + 1 ))
|
||||
mkdir "nDPId-dist-${RAND_ID}"
|
||||
cd "nDPId-dist-${RAND_ID}"
|
||||
tar -xjf ../nDPId-*.tar.bz2
|
||||
cd ./nDPId-*
|
||||
cmake -S . -B ./build \
|
||||
-DENABLE_DBUS=ON -DENABLE_CURL=ON -DENABLE_SYSTEMD=ON \
|
||||
${{ matrix.poll }} ${{ matrix.coverage }} ${{ matrix.sanitizer }} ${{ matrix.ndpi_build }} \
|
||||
${{ matrix.ndpid_examples }} ${{ matrix.ndpid_rust_examples }} ${{ matrix.ndpid_zlib }} ${{ matrix.ndpid_gcrypt }} ${{ matrix.ndpid_extras }}
|
||||
cd ../..
|
||||
rm -rf "nDPId-dist-${RAND_ID}"
|
||||
- name: CPack DEB
|
||||
if: startsWith(matrix.os, 'macOS') == false
|
||||
run: |
|
||||
cd ./build && cpack -G DEB && sudo dpkg -i nDPId-*.deb && cd ..
|
||||
- name: systemd test
|
||||
if: startsWith(matrix.os, 'ubuntu-latest') && startsWith(matrix.compiler, 'default-cc')
|
||||
cd ./build && cpack -G DEB && \
|
||||
sudo dpkg -i nDPId-*.deb && \
|
||||
sudo apt purge ndpid && \
|
||||
sudo dpkg -i nDPId-*.deb && cd ..
|
||||
- name: Upload DEB
|
||||
if: startsWith(matrix.os, 'macOS') == false && matrix.upload
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: nDPId-debian-packages_${{ matrix.compiler }}${{ matrix.upload_suffix }}
|
||||
path: build/*.deb
|
||||
- name: Test systemd
|
||||
if: startsWith(matrix.os, 'ubuntu') && startsWith(matrix.compiler, 'gcc')
|
||||
run: |
|
||||
ip -c address
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable ndpid@lo
|
||||
sudo systemctl start ndpid@lo
|
||||
SYSTEMCTL_RET=3; while (( $SYSTEMCTL_RET == 3 )); do systemctl is-active ndpid@lo.service; SYSTEMCTL_RET=$?; sleep 1; done
|
||||
sudo systemctl status ndpisrvd.service ndpid@lo.service
|
||||
sudo systemctl show ndpisrvd.service ndpid@lo.service -p SubState,ActiveState
|
||||
sudo dpkg -i ./build/nDPId-*.deb
|
||||
sudo systemctl status ndpisrvd.service ndpid@lo.service
|
||||
sudo systemctl show ndpisrvd.service ndpid@lo.service -p SubState,ActiveState
|
||||
sudo systemctl stop ndpisrvd.service
|
||||
journalctl --no-tail --no-pager -u ndpisrvd.service -u ndpid@lo.service
|
||||
- name: Build PF_RING and nDPId (invoke CC directly - dynamic nDPI lib)
|
||||
if: startsWith(matrix.ndpi_build, '-DBUILD_NDPI=ON') && startsWith(matrix.coverage, '-DENABLE_COVERAGE=OFF') && startsWith(matrix.sanitizer, '-DENABLE_SANITIZER=ON') && startsWith(matrix.ndpid_gcrypt, '-DNDPI_WITH_GCRYPT=OFF') && startsWith(matrix.ndpid_zlib, '-DENABLE_ZLIB=ON')
|
||||
run: |
|
||||
git clone --depth=1 https://github.com/ntop/PF_RING.git
|
||||
cd PF_RING/userland && ./configure && make && sudo make install prefix=/usr
|
||||
cd ../..
|
||||
cc -Wall -Wextra -std=gnu99 ${{ matrix.poll }} -DENABLE_PFRING=1 -DENABLE_ZLIB=1 -DENABLE_MEMORY_PROFILING=1 \
|
||||
-fsanitize=address -fsanitize=undefined -fno-sanitize=alignment -fsanitize=enum -fsanitize=leak \
|
||||
nDPId.c npfring.c nio.c utils.c \
|
||||
-I. -I./dependencies -I./dependencies/jsmn -I./dependencies/uthash/include \
|
||||
-I./build/libnDPI/include/ndpi \
|
||||
-I./PF_RING/userland/lib -I./PF_RING/kernel \
|
||||
-o /tmp/a.out \
|
||||
-ldl /usr/lib/libpfring.a -lpcap ./build/libnDPI/lib/libndpi.a -pthread -lm -lz
|
||||
- name: Build against libnDPI-${{ matrix.ndpi_min_version }}
|
||||
if: startsWith(matrix.os, 'ubuntu')
|
||||
run: |
|
||||
mkdir build-local-ndpi && cd build-local-ndpi
|
||||
WGET_RET=0
|
||||
wget 'https://github.com/ntop/nDPI/archive/refs/tags/${{ matrix.ndpi_min_version }}.tar.gz' || { WGET_RET=$?; true; }
|
||||
echo "wget returned: ${WGET_RET}"
|
||||
test $WGET_RET -ne 8 || echo "::warning file=nDPId.c::New libnDPI release required to build against release tarball."
|
||||
test $WGET_RET -ne 0 || { tar -xzvf ${{ matrix.ndpi_min_version }}.tar.gz && cd nDPI-${{ matrix.ndpi_min_version }} && ./autogen.sh --prefix=/usr --with-only-libndpi CC=${{ matrix.compiler }} CXX=false CFLAGS='-Werror' && sudo make install && cd .. ; }
|
||||
test $WGET_RET -ne 0 || { echo "running cmake .."; cmake .. -DENABLE_COVERAGE=ON -DBUILD_EXAMPLES=ON -DBUILD_NDPI=OFF -DENABLE_SANITIZER=ON ${{ matrix.ndpi_min_version }} ; }
|
||||
test $WGET_RET -ne 0 || { echo "running make .."; make all VERBOSE=1 ; }
|
||||
test $WGET_RET -ne 8 && { \
|
||||
tar -xzvf ${{ matrix.ndpi_min_version }}.tar.gz; }
|
||||
test $WGET_RET -ne 8 || { \
|
||||
echo "::warning file=nDPId.c::New libnDPI release required to build against release tarball, falling back to dev branch."; \
|
||||
wget 'http://github.com/ntop/nDPI/archive/refs/heads/dev.tar.gz'; \
|
||||
WGET_RET=$?; \
|
||||
tar -xzvf dev.tar.gz; \
|
||||
mv -v 'nDPI-dev' 'nDPI-${{ matrix.ndpi_min_version }}'; }
|
||||
test $WGET_RET -ne 0 || { cd nDPI-${{ matrix.ndpi_min_version }}; \
|
||||
NDPI_CONFIGURE_ARGS=''; \
|
||||
test 'x${{ matrix.ndpid_gcrypt }}' != 'x-DNDPI_WITH_GCRYPT=ON' || NDPI_CONFIGURE_ARGS="$NDPI_CONFIGURE_ARGS --with-local-libgcrypt"; \
|
||||
test 'x${{ matrix.sanitizer }}' != 'x-DENABLE_SANITIZER=ON' || NDPI_CONFIGURE_ARGS="$NDPI_CONFIGURE_ARGS --with-sanitizer"; \
|
||||
echo "Configure arguments: '$NDPI_CONFIGURE_ARGS'"; \
|
||||
./autogen.sh; \
|
||||
./configure --prefix=/usr --with-only-libndpi $NDPI_CONFIGURE_ARGS CC="${{ matrix.compiler }}" CXX=false \
|
||||
CFLAGS="$CMAKE_C_FLAGS" && make && sudo make install; cd ..; }
|
||||
ls -alhR /usr/include/ndpi
|
||||
cd ..
|
||||
test $WGET_RET -ne 0 || { echo "Running CMake.. (pkgconfig)"; \
|
||||
cmake -S . -B ./build-local-pkgconfig \
|
||||
-DCMAKE_C_COMPILER="$CMAKE_C_COMPILER" -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" \
|
||||
-DCMAKE_C_EXE_LINKER_FLAGS="$CMAKE_C_EXE_LINKER_FLAGS" \
|
||||
-DBUILD_NDPI=OFF -DBUILD_EXAMPLES=ON \
|
||||
-DENABLE_DBUS=ON -DENABLE_CURL=ON -DENABLE_SYSTEMD=ON \
|
||||
${{ matrix.poll }} ${{ matrix.coverage }} \
|
||||
${{ matrix.sanitizer }} ${{ matrix.ndpid_examples }} ${{ matrix.ndpid_rust_examples }}; }
|
||||
test $WGET_RET -ne 0 || { echo "Running Make.. (pkgconfig)"; \
|
||||
cmake --build ./build-local-pkgconfig --verbose; }
|
||||
test $WGET_RET -ne 0 || { echo "Testing Executable.. (pkgconfig)"; \
|
||||
./build-local-pkgconfig/nDPId-test; \
|
||||
./build-local-pkgconfig/nDPId -h || test $? -eq 1; \
|
||||
./build-local-pkgconfig/nDPIsrvd -h || test $? -eq 1; }
|
||||
test $WGET_RET -ne 0 || { echo "Running CMake.. (static)"; \
|
||||
cmake -S . -B ./build-local-static \
|
||||
-DCMAKE_C_COMPILER="$CMAKE_C_COMPILER" -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" \
|
||||
-DCMAKE_C_EXE_LINKER_FLAGS="$CMAKE_C_EXE_LINKER_FLAGS" \
|
||||
-DBUILD_NDPI=OFF -DBUILD_EXAMPLES=ON \
|
||||
-DENABLE_DBUS=ON -DENABLE_CURL=ON -DENABLE_SYSTEMD=ON \
|
||||
-DNDPI_NO_PKGCONFIG=ON -DSTATIC_LIBNDPI_INSTALLDIR=/usr \
|
||||
${{ matrix.poll }} ${{ matrix.coverage }} ${{ matrix.ndpid_gcrypt }} \
|
||||
${{ matrix.sanitizer }} ${{ matrix.ndpid_examples }} ${{ matrix.ndpid_rust_examples }}; }
|
||||
test $WGET_RET -ne 0 || { echo "Running Make.. (static)"; \
|
||||
cmake --build ./build-local-static --verbose; }
|
||||
test $WGET_RET -ne 0 || { echo "Testing Executable.. (static)"; \
|
||||
./build-local-static/nDPId-test; \
|
||||
./build-local-static/nDPId -h || test $? -eq 1; \
|
||||
./build-local-static/nDPIsrvd -h || test $? -eq 1; }
|
||||
test $WGET_RET -ne 0 || test ! -d ./PF_RING || { echo "Running CMake.. (PF_RING)"; \
|
||||
cmake -S . -B ./build-local-pfring \
|
||||
-DCMAKE_C_COMPILER="$CMAKE_C_COMPILER" -DCMAKE_C_FLAGS="$CMAKE_C_FLAGS" \
|
||||
-DCMAKE_C_EXE_LINKER_FLAGS="$CMAKE_C_EXE_LINKER_FLAGS" \
|
||||
-DBUILD_NDPI=OFF -DBUILD_EXAMPLES=ON -DENABLE_PFRING=ON \
|
||||
-DENABLE_DBUS=ON -DENABLE_CURL=ON -DENABLE_SYSTEMD=ON \
|
||||
-DNDPI_NO_PKGCONFIG=ON -DSTATIC_LIBNDPI_INSTALLDIR=/usr \
|
||||
-DPFRING_LINK_STATIC=OFF \
|
||||
-DPFRING_INSTALLDIR=/usr -DPFRING_KERNEL_INC="$(realpath ./PF_RING/kernel)" \
|
||||
${{ matrix.poll }} ${{ matrix.coverage }} ${{ matrix.ndpid_gcrypt }} \
|
||||
${{ matrix.sanitizer }} ${{ matrix.ndpid_examples }} ${{ matrix.ndpid_rust_examples }}; }
|
||||
test $WGET_RET -ne 0 || test ! -d ./PF_RING || { echo "Running Make.. (PF_RING)"; \
|
||||
cmake --build ./build-local-pfring --verbose; }
|
||||
test $WGET_RET -ne 0 || test ! -d ./PF_RING || { echo "Testing Executable.. (PF_RING)"; \
|
||||
./build-local-pfring/nDPId-test; \
|
||||
./build-local-pfring/nDPId -h || test $? -eq 1; \
|
||||
./build-local-pfring/nDPIsrvd -h || test $? -eq 1; }
|
||||
test $WGET_RET -eq 0 -o $WGET_RET -eq 8
|
||||
|
||||
66
.github/workflows/sonarcloud.yml
vendored
Normal file
66
.github/workflows/sonarcloud.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- tmp
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
|
||||
name: Sonarcloud Scan
|
||||
jobs:
|
||||
sonarcloud:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
BUILD_WRAPPER_OUT_DIR: build_wrapper_output_directory
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Python 3.8 for gcovr
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: install gcovr 5.0
|
||||
run: |
|
||||
pip install gcovr==5.0 # 5.1 is not supported
|
||||
- name: Install sonar-scanner and build-wrapper
|
||||
uses: SonarSource/sonarcloud-github-c-cpp@v3.2.0
|
||||
- name: Install Prerequisites
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install autoconf automake cmake lcov \
|
||||
libtool pkg-config gettext \
|
||||
libjson-c-dev flex bison \
|
||||
libcurl4-openssl-dev libpcap-dev zlib1g-dev
|
||||
- name: Run build-wrapper
|
||||
run: |
|
||||
build-wrapper-linux-x86-64 --out-dir ${{ env.BUILD_WRAPPER_OUT_DIR }} ./scripts/build-sonarcloud.sh
|
||||
- name: Run tests
|
||||
run: |
|
||||
for file in $(ls libnDPI/tests/cfgs/*/pcap/*.pcap libnDPI/tests/cfgs/*/pcap/*.pcapng libnDPI/tests/cfgs/*/pcap/*.cap); do \
|
||||
echo -n "${file} "; \
|
||||
cd ./build-sonarcloud; \
|
||||
./nDPId-test "../${file}" >/dev/null 2>/dev/null; \
|
||||
cd ..; \
|
||||
echo "[ok]"; \
|
||||
done
|
||||
mkdir -p gcov_report
|
||||
cd gcov_report
|
||||
gcov ../build-sonarcloud/CMakeFiles/nDPId-test.dir/nDPId-test.c.o
|
||||
cd ..
|
||||
- name: Run sonar-scanner
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
run: |
|
||||
sonar-scanner \
|
||||
--define sonar.projectName=nDPId \
|
||||
--define sonar.projectVersion=1.7 \
|
||||
--define sonar.sourceEncoding=UTF-8 \
|
||||
--define sonar.branch.name=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}} \
|
||||
--define sonar.organization=lnslbrty \
|
||||
--define sonar.projectKey=lnslbrty_nDPId \
|
||||
--define sonar.python.version=3.8 \
|
||||
--define sonar.cfamily.compile-commands=${{ env.BUILD_WRAPPER_OUT_DIR }}/compile_commands.json \
|
||||
--define sonar.cfamily.gcov.reportsPath=gcov_report \
|
||||
--define sonar.exclusions=build-sonarcloud/**,libnDPI/**,test/results/**,dependencies/jsmn/**,dependencies/uthash/**,examples/js-rt-analyzer-frontend/**,examples/js-rt-analyzer/**,examples/c-collectd/www/**,examples/py-flow-dashboard/assets/**
|
||||
@@ -3,6 +3,9 @@ image: debian:stable
|
||||
stages:
|
||||
- build_and_test
|
||||
|
||||
variables:
|
||||
GIT_CLONE_PATH: '$CI_BUILDS_DIR/$CI_JOB_ID/$CI_PROJECT_NAME'
|
||||
|
||||
before_script:
|
||||
- export DEBIAN_FRONTEND=noninteractive
|
||||
- apt-get update -qq
|
||||
@@ -10,7 +13,7 @@ before_script:
|
||||
apt-get install -y -qq \
|
||||
coreutils sudo \
|
||||
build-essential make cmake binutils gcc clang autoconf automake \
|
||||
libtool pkg-config git \
|
||||
libtool pkg-config git wget unzip \
|
||||
libpcap-dev libgpg-error-dev libjson-c-dev zlib1g-dev \
|
||||
netcat-openbsd python3 python3-jsonschema tree lcov iproute2
|
||||
|
||||
@@ -31,12 +34,13 @@ build_and_test_static_libndpi_tsan:
|
||||
# static linked build
|
||||
- mkdir build-clang-tsan
|
||||
- cd build-clang-tsan
|
||||
- env CMAKE_C_FLAGS='-Werror' CMAKE_C_COMPILER='clang' cmake .. -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON -DENABLE_SANITIZER_THREAD=ON -DENABLE_ZLIB=ON
|
||||
- make distclean-libnDPI
|
||||
- env CMAKE_C_FLAGS='-Werror' CMAKE_C_COMPILER='clang' cmake .. -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON -DBUILD_NDPI_FORCE_GIT_UPDATE=ON -DENABLE_SANITIZER_THREAD=ON -DENABLE_ZLIB=ON
|
||||
- make clean-libnDPI
|
||||
- make libnDPI
|
||||
- tree libnDPI
|
||||
- make install VERBOSE=1 DESTDIR="$(realpath ../_install)"
|
||||
- cd ..
|
||||
- ./_install/usr/local/bin/nDPId-test
|
||||
- ./test/run_tests.sh ./libnDPI ./_install/usr/local/bin/nDPId-test
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
@@ -48,8 +52,8 @@ build_and_test_static_libndpi:
|
||||
script:
|
||||
- mkdir build-cmake-submodule
|
||||
- cd build-cmake-submodule
|
||||
- env CMAKE_C_FLAGS='-Werror' cmake .. -DENABLE_SYSTEMD=ON -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON -DENABLE_ZLIB=ON
|
||||
- make distclean-libnDPI
|
||||
- env CMAKE_C_FLAGS='-Werror' cmake .. -DENABLE_SYSTEMD=ON -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON -DBUILD_NDPI_FORCE_GIT_UPDATE=ON -DENABLE_ZLIB=ON
|
||||
- make clean-libnDPI
|
||||
- make libnDPI
|
||||
- tree libnDPI
|
||||
- make install VERBOSE=1 DESTDIR="$(realpath ../_install)"
|
||||
@@ -61,10 +65,12 @@ build_and_test_static_libndpi:
|
||||
- test -x /bin/systemctl && sudo systemctl start ndpid@lo
|
||||
- test -x /bin/systemctl && sudo systemctl status ndpisrvd.service ndpid@lo.service
|
||||
- test -x /bin/systemctl && sudo systemctl stop ndpid@lo
|
||||
- ./build-cmake-submodule/nDPId-test
|
||||
- ./test/run_tests.sh ./libnDPI ./build-cmake-submodule/nDPId-test
|
||||
- >
|
||||
if ldd ./build-cmake-submodule/nDPId | grep -qoEi libndpi; then \
|
||||
echo 'nDPId linked against a static libnDPI should not contain a shared linked libnDPI.' >&2; false; fi
|
||||
- cc -Wall -Wextra -std=gnu99 nDPId.c nio.c utils.c -I./build-cmake-submodule/libnDPI/include/ndpi -I. -I./dependencies -I./dependencies/jsmn -I./dependencies/uthash/include -o /tmp/a.out -lpcap ./build-cmake-submodule/libnDPI/lib/libndpi.a -pthread -lm -lz
|
||||
artifacts:
|
||||
expire_in: 1 week
|
||||
paths:
|
||||
@@ -76,15 +82,16 @@ build_and_test_static_libndpi_coverage:
|
||||
script:
|
||||
- mkdir build-cmake-submodule
|
||||
- cd build-cmake-submodule
|
||||
- env CMAKE_C_FLAGS='-Werror' cmake .. -DENABLE_SYSTEMD=ON -DENABLE_COVERAGE=ON -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON -DENABLE_SANITIZER=ON -DENABLE_ZLIB=ON
|
||||
- make distclean-libnDPI
|
||||
- env CMAKE_C_FLAGS='-Werror' cmake .. -DENABLE_SYSTEMD=ON -DENABLE_COVERAGE=ON -DBUILD_EXAMPLES=ON -DBUILD_NDPI=ON -DBUILD_NDPI_FORCE_GIT_UPDATE=ON -DENABLE_SANITIZER=ON -DENABLE_ZLIB=ON
|
||||
- make clean-libnDPI
|
||||
- make libnDPI
|
||||
- tree libnDPI
|
||||
- make install VERBOSE=1 DESTDIR="$(realpath ../_install)"
|
||||
- cd ..
|
||||
- ./build-cmake-submodule/nDPId-test
|
||||
- ./test/run_tests.sh ./libnDPI ./build-cmake-submodule/nDPId-test
|
||||
# generate coverage report
|
||||
- make -C ./build-cmake-submodule coverage
|
||||
- make -C ./build-cmake-submodule coverage || true
|
||||
- >
|
||||
if ldd build/nDPId | grep -qoEi libndpi; then \
|
||||
echo 'nDPId linked against a static libnDPI should not contain a shared linked libnDPI.' >&2; false; fi
|
||||
@@ -100,7 +107,8 @@ build_dynamic_libndpi:
|
||||
# pkg-config dynamic linked build
|
||||
- git clone https://github.com/ntop/nDPI.git
|
||||
- cd nDPI
|
||||
- ./autogen.sh --prefix="$(realpath ../_install)" --enable-option-checking=fatal
|
||||
- ./autogen.sh
|
||||
- ./configure --prefix="$(realpath ../_install)" --enable-option-checking=fatal
|
||||
- make install V=s
|
||||
- cd ..
|
||||
- tree ./_install
|
||||
@@ -112,7 +120,7 @@ build_dynamic_libndpi:
|
||||
- make install VERBOSE=1 DESTDIR="$(realpath ../_install)"
|
||||
- cd ..
|
||||
- tree ./_install
|
||||
- ./build/nDPId-test || test $? -eq 1
|
||||
- ./build/nDPId-test
|
||||
- ./build/nDPId -h || test $? -eq 1
|
||||
- ./build/nDPIsrvd -h || test $? -eq 1
|
||||
# dameon start/stop test
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -9,3 +9,6 @@
|
||||
[submodule "examples/js-rt-analyzer-frontend"]
|
||||
path = examples/js-rt-analyzer-frontend
|
||||
url = https://gitlab.com/verzulli/ndpid-rt-analyzer-frontend.git
|
||||
[submodule "examples/cxx-graph"]
|
||||
path = examples/cxx-graph
|
||||
url = https://github.com/utoni/nDPId-Graph.git
|
||||
|
||||
83
CHANGELOG.md
Normal file
83
CHANGELOG.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# CHANGELOG
|
||||
|
||||
#### nDPId 1.7 (Oct 2024)
|
||||
|
||||
- Read and parse configuration files for nDPId (+ libnDPI) and nDPIsrvd
|
||||
- Added loading risk domains from a file (`-R`, thanks to @UnveilTech)
|
||||
- Added Filebeat configuration file
|
||||
- Improved hostname handling; will now always be part of `analyse`/`end`/`idle` events (if dissected)
|
||||
- Improved Documentation (INSTALL / Schema)
|
||||
- Added PF\_RING support
|
||||
- Improved nDPIsrvd-analyse to write global stats to a CSV
|
||||
- Added global (heap) memory stats for daemon status events (if enabled)
|
||||
- Fixed IPv6 address/netmask retrieval on some systems
|
||||
- Improved nDPIsrvd-collect; gauges and counters are now handled the right way
|
||||
- Added nDPId Grafana dashboard
|
||||
- Fixed `detection-update` event bug; was thrown even if nothing changed
|
||||
- Fixed `not-detected` event spam if detection not completed (in some rare cases)
|
||||
- Improved InfluxDB push daemon (severity parsing / gauge handling)
|
||||
- Improved zLib compression
|
||||
- Fixed nDPIsrvd-collectd missing escape character
|
||||
|
||||
#### nDPId 1.6 (Nov 2023)
|
||||
|
||||
- Added Event I/O abstraction layer (supporting only poll/epoll by now)
|
||||
- Support for OSX and *BSD systems
|
||||
- Added proper DLT_RAW dissection for IPv4 and IPv6
|
||||
- Improved TCP timeout handling if FIN/RST seen which caused Midstream TCP flows when there shouldn't be any
|
||||
- Fixed a crash if `nDPId -o value=''` was used
|
||||
- Added OpenWrt packaging
|
||||
- Added new flow event "analyse" used to give some statistical information about active flows
|
||||
- Added new analyse event daemon which generates CSV files from such events
|
||||
- Fixed a crash in nDPIsrvd if a collector closes a connection
|
||||
- Support `nDPId` to send it's data to a UDP endpoint instead of a nDPIsrvd collector
|
||||
- Added events and flow states documentation
|
||||
- Added basic systemd support
|
||||
- Fixed a bug in base64 encoding which could lead to invalid base64 strings
|
||||
- Added some machine learning examples
|
||||
- Fixed various smaller bugs
|
||||
- Fixed nDPIsrvd bug which causes invalid JSON messages sent to Distributors
|
||||
|
||||
#### nDPId 1.5 (Apr 2022)
|
||||
|
||||
- Improved nDPId cross compilation
|
||||
- zLib flow memory compression (Experimental!)
|
||||
- Memory profiling for nDPId-test
|
||||
- JSMN with parent link support for subtoken iteration
|
||||
- Refactored nDPIsrvd buffer and buffer bloat handling
|
||||
- Upgraded JSMN/uthash
|
||||
- Improved nDPIsrvd.(h|py) debugging capability for client apps
|
||||
- Advanced flow usage logging usable for memory profiling
|
||||
- Support for dissection additional layer2/layer3 protocols
|
||||
- Serialize more JSON information
|
||||
- Add TCP/IP support for nDPIsrvd
|
||||
- Improved nDPIsrvd connection lost behaviour
|
||||
- Reworked Python/C distributor API
|
||||
- Support read()/recv() timeouts and nonblocking I/O
|
||||
|
||||
|
||||
#### nDPId 1.4 (Jun 2021)
|
||||
|
||||
- Use layer4 specific flow timeouts for nDPId
|
||||
- Reworked layer4 flow length names and calculations (use only layer4 payload w/o any previous headers) for nDPId
|
||||
- Build system cleanup and cosmetics
|
||||
|
||||
|
||||
#### nDPId 1.3 (May 2021)
|
||||
|
||||
- Added missing datalink layer types
|
||||
|
||||
|
||||
#### nDPId 1.2 (May 2021)
|
||||
|
||||
- OpenWrt compatible build system
|
||||
|
||||
|
||||
#### nDPId 1.1 (May 2021)
|
||||
|
||||
- Added License information
|
||||
|
||||
|
||||
#### nDPId 1.0 (May 2021)
|
||||
|
||||
- First public release
|
||||
453
CMakeLists.txt
453
CMakeLists.txt
@@ -1,37 +1,85 @@
|
||||
cmake_minimum_required(VERSION 3.12.4)
|
||||
project(nDPId C)
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
execute_process(COMMAND ${CMAKE_C_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
|
||||
if (GCC_VERSION VERSION_GREATER 4.7 OR GCC_VERSION VERSION_EQUAL 4.7)
|
||||
message(STATUS "${CMAKE_C_COMPILER} supports C11 standard.")
|
||||
else ()
|
||||
message(FATAL_ERROR "C Compiler with C11 standard needed. Therefore a gcc compiler with a version equal or higher than 4.7 is needed.")
|
||||
endif()
|
||||
endif(CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(CMAKE_C_STANDARD 11)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -D_DEFAULT_SOURCE=1 -D_GNU_SOURCE=1")
|
||||
if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
|
||||
message(FATAL_ERROR "In-source builds are not allowed.\n"
|
||||
"Please remove ${PROJECT_SOURCE_DIR}/CMakeCache.txt\n"
|
||||
"and\n"
|
||||
"${PROJECT_SOURCE_DIR}/CMakeFiles\n"
|
||||
"Create a build directory somewhere and run CMake again.")
|
||||
"Create a build directory somewhere and run CMake again.\n"
|
||||
"Or run: 'cmake -S ${PROJECT_SOURCE_DIR} -B ./your-custom-build-dir [CMAKE-OPTIONS]'")
|
||||
endif()
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake)
|
||||
find_package(PkgConfig REQUIRED)
|
||||
|
||||
set(CPACK_PACKAGE_CONTACT "toni@impl.cc")
|
||||
set(CPACK_DEBIAN_PACKAGE_NAME "nDPId")
|
||||
set(CPACK_DEBIAN_PACKAGE_SECTION "network")
|
||||
set(CPACK_DEBIAN_PACKAGE_DESCRIPTION "nDPId is a set of daemons and tools to capture, process and classify network traffic.")
|
||||
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Toni Uhlig <toni@impl.cc>")
|
||||
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_SOURCE_DIR}/packages/debian/preinst;${CMAKE_SOURCE_DIR}/packages/debian/prerm;${CMAKE_SOURCE_DIR}/packages/debian/postrm")
|
||||
set(CPACK_DEBIAN_PACKAGE_CONTROL_STRICT_PERMISSION TRUE)
|
||||
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
|
||||
set(CPACK_DEBIAN_DEBUGINFO_PACKAGE ON)
|
||||
set(CPACK_RPM_PACKAGE_LICENSE "GPL-3")
|
||||
set(CPACK_RPM_PACKAGE_VENDOR "Toni Uhlig")
|
||||
set(CPACK_RPM_PACKAGE_URL "https://www.github.com/utoni/nDPId.git")
|
||||
set(CPACK_RPM_PACKAGE_DESCRIPTION "nDPId is a set of daemons and tools to capture, process and classify network traffic.")
|
||||
set(CPACK_RPM_PRE_INSTALL_SCRIPT_FILE "${CMAKE_SOURCE_DIR}/packages/redhat/pre_install")
|
||||
set(CPACK_RPM_PRE_UNINSTALL_SCRIPT_FILE "${CMAKE_SOURCE_DIR}/packages/redhat/pre_uninstall")
|
||||
set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "${CMAKE_SOURCE_DIR}/packages/redhat/post_uninstall")
|
||||
set(CPACK_STRIP_FILES ON)
|
||||
set(CPACK_PACKAGE_VERSION_MAJOR 1)
|
||||
set(CPACK_PACKAGE_VERSION_MINOR 5)
|
||||
set(CPACK_PACKAGE_VERSION_MINOR 7)
|
||||
set(CPACK_PACKAGE_VERSION_PATCH 0)
|
||||
# Note: CPACK_PACKAGING_INSTALL_PREFIX and CMAKE_INSTALL_PREFIX are *not* the same.
|
||||
# It is used only to ease environment file loading via systemd.
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}")
|
||||
set(CMAKE_MACOSX_RPATH 1)
|
||||
|
||||
include(CPack)
|
||||
include(CheckFunctionExists)
|
||||
include(CheckLibraryExists)
|
||||
include(CheckEpoll)
|
||||
|
||||
check_epoll(HAS_EPOLL)
|
||||
if(HAS_EPOLL)
|
||||
option(FORCE_POLL "Force the use of poll() instead of epoll()." OFF)
|
||||
if(NOT FORCE_POLL)
|
||||
set(EPOLL_DEFS "-DENABLE_EPOLL=1")
|
||||
endif()
|
||||
else()
|
||||
if(BUILD_EXAMPLES)
|
||||
message(FATAL_ERROR "Examples are using epoll event I/O. Without epoll available, you can not build/run those.")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT MATH_FUNCTION_EXISTS AND NOT NEED_LINKING_AGAINST_LIBM)
|
||||
CHECK_FUNCTION_EXISTS(log2f MATH_FUNCTION_EXISTS)
|
||||
if(NOT MATH_FUNCTION_EXISTS)
|
||||
unset(MATH_FUNCTION_EXISTS CACHE)
|
||||
list(APPEND CMAKE_REQUIRED_LIBRARIES m)
|
||||
CHECK_FUNCTION_EXISTS(log2f MATH_FUNCTION_EXISTS)
|
||||
if(MATH_FUNCTION_EXISTS)
|
||||
set(NEED_LINKING_AGAINST_LIBM TRUE CACHE BOOL "" FORCE)
|
||||
else()
|
||||
message(FATAL_ERROR "Failed making the log2f() function available")
|
||||
endif()
|
||||
endif()
|
||||
list(APPEND CMAKE_REQUIRED_LIBRARIES m)
|
||||
CHECK_FUNCTION_EXISTS(log2f MATH_FUNCTION_EXISTS)
|
||||
if(MATH_FUNCTION_EXISTS)
|
||||
set(NEED_LINKING_AGAINST_LIBM TRUE CACHE BOOL "" FORCE)
|
||||
else()
|
||||
check_library_exists(m sqrt "" NEED_LINKING_AGAINST_LIBM)
|
||||
if(NOT NEED_LINKING_AGAINST_LIBM)
|
||||
# Was not able to figure out if explicit linkage against libm is required.
|
||||
# Forcing libm linkage. Good idea?
|
||||
set(NEED_LINKING_AGAINST_LIBM TRUE CACHE BOOL "" FORCE)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NEED_LINKING_AGAINST_LIBM)
|
||||
@@ -46,9 +94,84 @@ option(ENABLE_SANITIZER_THREAD "Enable TSAN (does not work together with ASAN)."
|
||||
option(ENABLE_MEMORY_PROFILING "Enable dynamic memory tracking." OFF)
|
||||
option(ENABLE_ZLIB "Enable zlib support for nDPId (experimental)." OFF)
|
||||
option(ENABLE_SYSTEMD "Install systemd components." OFF)
|
||||
option(ENABLE_CRYPTO "Enable OpenSSL cryptographic support in nDPId/nDPIsrvd." OFF)
|
||||
option(BUILD_EXAMPLES "Build C examples." ON)
|
||||
option(BUILD_RUST_EXAMPLES "Build Rust examples." OFF)
|
||||
if(BUILD_EXAMPLES)
|
||||
option(ENABLE_DBUS "Build DBus notification example." OFF)
|
||||
option(ENABLE_CURL "Build influxdb data write example." OFF)
|
||||
endif()
|
||||
option(ENABLE_PFRING "Enable PF_RING support for nDPId (experimental)" OFF)
|
||||
option(BUILD_NDPI "Clone and build nDPI from github." OFF)
|
||||
|
||||
if(ENABLE_PFRING)
|
||||
option(PFRING_LINK_STATIC "Link against a static version of pfring." ON)
|
||||
set(PFRING_KERNEL_INC "" CACHE STRING "Path to PFRING kernel module include directory.")
|
||||
set(PFRING_DEFS "-DENABLE_PFRING=1")
|
||||
|
||||
if(PFRING_KERNEL_INC STREQUAL "")
|
||||
message(FATAL_ERROR "PFRING_KERNEL_INC needs to be set to the PFRING kernel module include directory.")
|
||||
endif()
|
||||
if(NOT EXISTS "${PFRING_KERNEL_INC}/linux/pf_ring.h")
|
||||
message(FATAL_ERROR "Expected to find <linux/pf_ring.h> below ${PFRING_KERNEL_INC}, but none found.")
|
||||
endif()
|
||||
|
||||
set(PFRING_INSTALLDIR "/opt/PF_RING/usr" CACHE STRING "")
|
||||
set(PFRING_INC "${PFRING_INSTALLDIR}/include")
|
||||
|
||||
if(NOT EXISTS "${PFRING_INC}")
|
||||
message(FATAL_ERROR "Include directory \"${PFRING_INC}\" does not exist!")
|
||||
endif()
|
||||
if(PFRING_LINK_STATIC)
|
||||
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
if(EXISTS "${PFRING_INSTALLDIR}/lib64")
|
||||
set(STATIC_PFRING_LIB "${PFRING_INSTALLDIR}/lib64/libpfring.a")
|
||||
else()
|
||||
set(STATIC_PFRING_LIB "${PFRING_INSTALLDIR}/lib/libpfring.a")
|
||||
endif()
|
||||
else()
|
||||
if(EXISTS "${PFRING_INSTALLDIR}/lib32")
|
||||
set(STATIC_PFRING_LIB "${PFRING_INSTALLDIR}/lib32/libpfring.a")
|
||||
else()
|
||||
set(STATIC_PFRING_LIB "${PFRING_INSTALLDIR}/lib/libpfring.a")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS "${STATIC_PFRING_LIB}")
|
||||
message(FATAL_ERROR "Static library \"${STATIC_PFRING_LIB}\" does not exist!")
|
||||
endif()
|
||||
else()
|
||||
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
if(EXISTS "${PFRING_INSTALLDIR}/lib64")
|
||||
find_library(PF_RING_LIB pfring PATHS "${PFRING_INSTALLDIR}/lib64")
|
||||
else()
|
||||
find_library(PF_RING_LIB pfring PATHS "${PFRING_INSTALLDIR}/lib")
|
||||
endif()
|
||||
else()
|
||||
if(EXISTS "${PFRING_INSTALLDIR}/lib32")
|
||||
find_library(PF_RING_LIB pfring PATHS "${PFRING_INSTALLDIR}/lib32")
|
||||
else()
|
||||
find_library(PF_RING_LIB pfring PATHS "${PFRING_INSTALLDIR}/lib")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT PF_RING_LIB)
|
||||
message(FATAL_ERROR "libpfring.so not found below ${PFRING_INSTALLDIR}/{lib,lib32,lib64}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS "${PFRING_INSTALLDIR}/include/pfring.h")
|
||||
message(FATAL_ERROR "Expected to find <include/pfring.h> inside ${PFRING_INSTALLDIR}, but none found.")
|
||||
endif()
|
||||
else()
|
||||
unset(PFRING_INSTALLDIR CACHE)
|
||||
unset(PFRING_INC CACHE)
|
||||
unset(STATIC_PFRING_LIB CACHE)
|
||||
unset(PFRING_LINK_STATIC CACHE)
|
||||
endif()
|
||||
|
||||
if(BUILD_NDPI)
|
||||
option(BUILD_NDPI_FORCE_GIT_UPDATE "Forcefully instruments nDPI build script to update the git submodule." OFF)
|
||||
unset(NDPI_NO_PKGCONFIG CACHE)
|
||||
unset(STATIC_LIBNDPI_INSTALLDIR CACHE)
|
||||
else()
|
||||
@@ -73,22 +196,38 @@ else()
|
||||
unset(NDPI_WITH_MAXMINDDB CACHE)
|
||||
endif()
|
||||
|
||||
add_executable(nDPId nDPId.c utils.c)
|
||||
add_executable(nDPIsrvd nDPIsrvd.c utils.c)
|
||||
add_executable(nDPId-test nDPId-test.c)
|
||||
if(ENABLE_PFRING)
|
||||
set(NDPID_PFRING_SRCS npfring.c)
|
||||
endif()
|
||||
if(ENABLE_CRYPTO)
|
||||
set(CRYPTO_SRCS ncrypt.c)
|
||||
endif()
|
||||
add_executable(nDPId nDPId.c ${NDPID_PFRING_SRCS} ${CRYPTO_SRCS} nio.c utils.c)
|
||||
add_executable(nDPIsrvd nDPIsrvd.c nio.c utils.c)
|
||||
add_executable(nDPId-test nDPId-test.c ${NDPID_PFRING_SRCS} ${CRYPTO_SRCS})
|
||||
|
||||
add_custom_target(umask_check)
|
||||
add_custom_command(
|
||||
TARGET umask_check
|
||||
PRE_BUILD
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/scripts/umask-check.sh
|
||||
)
|
||||
add_dependencies(nDPId umask_check)
|
||||
|
||||
add_custom_target(dist)
|
||||
add_custom_command(
|
||||
TARGET dist
|
||||
PRE_BUILD
|
||||
COMMAND "${CMAKE_SOURCE_DIR}/scripts/make-dist.sh"
|
||||
)
|
||||
|
||||
add_custom_target(daemon)
|
||||
add_custom_command(
|
||||
TARGET daemon
|
||||
TARGET daemon
|
||||
POST_BUILD
|
||||
COMMAND env nDPIsrvd_ARGS='-C 1024' "${CMAKE_SOURCE_DIR}/scripts/daemon.sh" "$<TARGET_FILE:nDPId>" "$<TARGET_FILE:nDPIsrvd>"
|
||||
DEPENDS nDPId nDPIsrvd
|
||||
)
|
||||
add_dependencies(daemon nDPId nDPIsrvd)
|
||||
|
||||
if(CMAKE_CROSSCOMPILING)
|
||||
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
|
||||
@@ -120,14 +259,22 @@ if(ENABLE_COVERAGE)
|
||||
COMMAND genhtml -o "${CMAKE_BINARY_DIR}/coverage_report" "${CMAKE_BINARY_DIR}/lcov.info"
|
||||
DEPENDS nDPId nDPId-test nDPIsrvd
|
||||
)
|
||||
add_custom_target(coverage-clean)
|
||||
add_custom_command(
|
||||
TARGET coverage-clean
|
||||
COMMAND find "${CMAKE_BINARY_DIR}" "${CMAKE_SOURCE_DIR}/libnDPI" -name "*.gcda" -delete
|
||||
POST_BUILD
|
||||
)
|
||||
add_custom_target(coverage-view)
|
||||
add_custom_command(
|
||||
TARGET coverage-view
|
||||
COMMAND cd "${CMAKE_BINARY_DIR}/coverage_report" && python3 -m http.server
|
||||
DEPENDS "${CMAKE_BINARY_DIR}/coverage_report/nDPId/index.html"
|
||||
POST_BUILD
|
||||
)
|
||||
add_dependencies(coverage-view coverage)
|
||||
endif()
|
||||
if(ENABLE_SANITIZER)
|
||||
# TODO: Check for `-fsanitize-memory-track-origins` and add if available?
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=address -fsanitize=undefined -fno-sanitize=alignment -fsanitize=enum -fsanitize=leak")
|
||||
endif()
|
||||
if(ENABLE_SANITIZER_THREAD)
|
||||
@@ -137,21 +284,35 @@ if(ENABLE_ZLIB)
|
||||
set(ZLIB_DEFS "-DENABLE_ZLIB=1")
|
||||
pkg_check_modules(ZLIB REQUIRED zlib)
|
||||
endif()
|
||||
if(NDPI_WITH_GCRYPT)
|
||||
message(STATUS "nDPI: Enable GCRYPT")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --with-local-libgcrypt")
|
||||
if(BUILD_EXAMPLES)
|
||||
if(ENABLE_DBUS)
|
||||
pkg_check_modules(DBUS REQUIRED dbus-1)
|
||||
endif()
|
||||
if(ENABLE_CURL)
|
||||
pkg_check_modules(CURL REQUIRED libcurl)
|
||||
endif()
|
||||
endif()
|
||||
if(NDPI_WITH_PCRE)
|
||||
message(STATUS "nDPI: Enable PCRE")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --with-pcre")
|
||||
endif()
|
||||
if(NDPI_WITH_MAXMINDDB)
|
||||
message(STATUS "nDPI: Enable MAXMINDDB")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --with-maxminddb")
|
||||
endif()
|
||||
if(ENABLE_COVERAGE)
|
||||
message(STATUS "nDPI: Enable Coverage")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --enable-code-coverage")
|
||||
if(BUILD_NDPI)
|
||||
if(NDPI_WITH_GCRYPT)
|
||||
message(STATUS "nDPI: Enable GCRYPT")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --with-local-libgcrypt")
|
||||
endif()
|
||||
if(NDPI_WITH_PCRE)
|
||||
message(STATUS "nDPI: Enable PCRE")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --with-pcre2")
|
||||
endif()
|
||||
if(NDPI_WITH_MAXMINDDB)
|
||||
message(STATUS "nDPI: Enable MAXMINDDB")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --with-maxminddb")
|
||||
endif()
|
||||
if(ENABLE_COVERAGE)
|
||||
message(STATUS "nDPI: Enable Coverage")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --enable-code-coverage")
|
||||
endif()
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_TYPE STREQUAL "")
|
||||
message(STATUS "nDPI: Enable Debug Build")
|
||||
set(NDPI_ADDITIONAL_ARGS "${NDPI_ADDITIONAL_ARGS} --enable-debug-build --enable-debug-messages")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
execute_process(
|
||||
@@ -166,6 +327,7 @@ if(GIT_VERSION STREQUAL "" OR NOT IS_DIRECTORY "${CMAKE_SOURCE_DIR}/.git")
|
||||
set(GIT_VERSION "${CPACK_PACKAGE_VERSION}-release")
|
||||
endif()
|
||||
endif()
|
||||
set(PKG_VERSION "${CPACK_PACKAGE_VERSION}")
|
||||
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra")
|
||||
set(NDPID_DEFS -DJSMN_STATIC=1 -DJSMN_STRICT=1 -DJSMN_PARENT_LINKS=1)
|
||||
@@ -173,8 +335,13 @@ set(NDPID_DEPS_INC "${CMAKE_SOURCE_DIR}"
|
||||
"${CMAKE_SOURCE_DIR}/dependencies"
|
||||
"${CMAKE_SOURCE_DIR}/dependencies/jsmn"
|
||||
"${CMAKE_SOURCE_DIR}/dependencies/uthash/src")
|
||||
if(CMAKE_CROSSCOMPILING)
|
||||
add_definitions("-DCROSS_COMPILATION=1")
|
||||
endif()
|
||||
if(ENABLE_MEMORY_PROFILING)
|
||||
message(WARNING "ENABLE_MEMORY_PROFILING should not be used in production environments.")
|
||||
if(NOT CMAKE_BUILD_TYPE STREQUAL "Debug" AND NOT CMAKE_BUILD_TYPE STREQUAL "")
|
||||
message(WARNING "ENABLE_MEMORY_PROFILING should not be used in production environments.")
|
||||
endif()
|
||||
add_definitions("-DENABLE_MEMORY_PROFILING=1"
|
||||
"-Duthash_malloc=nDPIsrvd_uthash_malloc"
|
||||
"-Duthash_free=nDPIsrvd_uthash_free")
|
||||
@@ -207,6 +374,7 @@ if(BUILD_NDPI)
|
||||
ADDITIONAL_ARGS=${NDPI_ADDITIONAL_ARGS}
|
||||
MAKE_PROGRAM=${CMAKE_MAKE_PROGRAM}
|
||||
DEST_INSTALL=${CMAKE_BINARY_DIR}/libnDPI
|
||||
FORCE_GIT_UPDATE=${BUILD_NDPI_FORCE_GIT_UPDATE}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/scripts/get-and-build-libndpi.sh
|
||||
BUILD_BYPRODUCTS ${CMAKE_BINARY_DIR}/libnDPI/lib/libndpi.a
|
||||
BUILD_COMMAND ""
|
||||
@@ -214,25 +382,29 @@ if(BUILD_NDPI)
|
||||
BUILD_IN_SOURCE 1)
|
||||
|
||||
add_custom_target(clean-libnDPI
|
||||
COMMAND rm -rf ${CMAKE_BINARY_DIR}/libnDPI ${CMAKE_BINARY_DIR}/libnDPI-prefix
|
||||
COMMAND ${CMAKE_BUILD_TOOL} clean
|
||||
COMMAND rm -rf ${CMAKE_BINARY_DIR}/libnDPI
|
||||
COMMAND rm -f ${CMAKE_BINARY_DIR}/libnDPI-prefix/src/libnDPI-stamp/libnDPI-configure
|
||||
)
|
||||
add_custom_target(distclean-libnDPI
|
||||
COMMAND cd ${CMAKE_SOURCE_DIR}/libnDPI && git clean -df . && git clean -dfX .
|
||||
)
|
||||
add_dependencies(distclean-libnDPI clean-libnDPI)
|
||||
|
||||
set(STATIC_LIBNDPI_INSTALLDIR "${CMAKE_BINARY_DIR}/libnDPI")
|
||||
add_dependencies(nDPId libnDPI)
|
||||
add_dependencies(nDPId-test libnDPI)
|
||||
endif()
|
||||
|
||||
if(ENABLE_CRYPTO)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
set(OSSL_DEFS "-DENABLE_CRYPTO=1")
|
||||
set(OSSL_LIBRARY "${OPENSSL_SSL_LIBRARY}" "${OPENSSL_CRYPTO_LIBRARY}")
|
||||
endif()
|
||||
|
||||
if(STATIC_LIBNDPI_INSTALLDIR OR BUILD_NDPI OR NDPI_NO_PKGCONFIG)
|
||||
if(NDPI_WITH_GCRYPT)
|
||||
find_package(GCRYPT "1.4.2" REQUIRED)
|
||||
endif()
|
||||
|
||||
if(NDPI_WITH_PCRE)
|
||||
pkg_check_modules(PCRE REQUIRED libpcre>=8.39)
|
||||
pkg_check_modules(PCRE REQUIRED libpcre2-8)
|
||||
endif()
|
||||
|
||||
if(NDPI_WITH_MAXMINDDB)
|
||||
@@ -243,7 +415,19 @@ endif()
|
||||
if(STATIC_LIBNDPI_INSTALLDIR OR BUILD_NDPI)
|
||||
add_definitions("-DLIBNDPI_STATIC=1")
|
||||
set(STATIC_LIBNDPI_INC "${STATIC_LIBNDPI_INSTALLDIR}/include/ndpi")
|
||||
set(STATIC_LIBNDPI_LIB "${STATIC_LIBNDPI_INSTALLDIR}/lib/libndpi.a")
|
||||
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
if(EXISTS "${STATIC_LIBNDPI_INSTALLDIR}/lib64/libndpi.a")
|
||||
set(STATIC_LIBNDPI_LIB "${STATIC_LIBNDPI_INSTALLDIR}/lib64/libndpi.a")
|
||||
else()
|
||||
set(STATIC_LIBNDPI_LIB "${STATIC_LIBNDPI_INSTALLDIR}/lib/libndpi.a")
|
||||
endif()
|
||||
else()
|
||||
if(EXISTS "${STATIC_LIBNDPI_INSTALLDIR}/lib32/libndpi.a")
|
||||
set(STATIC_LIBNDPI_LIB "${STATIC_LIBNDPI_INSTALLDIR}/lib32/libndpi.a")
|
||||
else()
|
||||
set(STATIC_LIBNDPI_LIB "${STATIC_LIBNDPI_INSTALLDIR}/lib/libndpi.a")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(STATIC_LIBNDPI_INSTALLDIR AND NOT BUILD_NDPI)
|
||||
if(NOT EXISTS "${STATIC_LIBNDPI_INC}" OR NOT EXISTS "${STATIC_LIBNDPI_LIB}")
|
||||
@@ -253,9 +437,13 @@ if(STATIC_LIBNDPI_INSTALLDIR OR BUILD_NDPI)
|
||||
endif()
|
||||
|
||||
unset(DEFAULT_NDPI_INCLUDE CACHE)
|
||||
unset(pkgcfg_lib_NDPI_ndpi CACHE)
|
||||
else()
|
||||
if(NOT NDPI_NO_PKGCONFIG)
|
||||
pkg_check_modules(NDPI REQUIRED libndpi>=4.5.0)
|
||||
pkg_check_modules(NDPI REQUIRED libndpi>=5.0.0)
|
||||
if(NOT pkgcfg_lib_NDPI_ndpi)
|
||||
find_package(NDPI "5.0.0" REQUIRED)
|
||||
endif()
|
||||
|
||||
unset(STATIC_LIBNDPI_INC CACHE)
|
||||
unset(STATIC_LIBNDPI_LIB CACHE)
|
||||
@@ -264,32 +452,56 @@ else()
|
||||
set(DEFAULT_NDPI_INCLUDE ${NDPI_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
find_package(PCAP "1.8.1" REQUIRED)
|
||||
pkg_check_modules(PCAP libpcap>=1.9.0) # no *.pc file before 1.9.0
|
||||
if(NOT pkgcfg_lib_PCAP_pcap)
|
||||
pkg_check_modules(PCAP libpcap>=1.8.1) # seems like some distributions provide their own *.pc file for 1.8.1 (e.g. Ubuntu-18.04)
|
||||
endif()
|
||||
if(NOT pkgcfg_lib_PCAP_pcap)
|
||||
find_package(PCAP "1.9.0" REQUIRED)
|
||||
endif()
|
||||
|
||||
target_compile_options(nDPId PRIVATE "-pthread")
|
||||
target_compile_definitions(nDPId PRIVATE -D_GNU_SOURCE=1 -DGIT_VERSION=\"${GIT_VERSION}\" ${NDPID_DEFS} ${ZLIB_DEFS})
|
||||
target_include_directories(nDPId PRIVATE "${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}" ${NDPID_DEPS_INC})
|
||||
target_link_libraries(nDPId "${STATIC_LIBNDPI_LIB}" "${pkgcfg_lib_NDPI_ndpi}"
|
||||
"${pkgcfg_lib_PCRE_pcre}" "${pkgcfg_lib_MAXMINDDB_maxminddb}" "${pkgcfg_lib_ZLIB_z}"
|
||||
"${GCRYPT_LIBRARY}" "${GCRYPT_ERROR_LIBRARY}" "${PCAP_LIBRARY}" "${LIBM_LIB}"
|
||||
"-pthread")
|
||||
target_compile_definitions(nDPId PRIVATE -D_GNU_SOURCE=1 -DPKG_VERSION=\"${PKG_VERSION}\" -DGIT_VERSION=\"${GIT_VERSION}\"
|
||||
${NDPID_DEFS} ${EPOLL_DEFS} ${ZLIB_DEFS} ${PFRING_DEFS} ${OSSL_DEFS})
|
||||
target_include_directories(nDPId PRIVATE "${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}" ${NDPID_DEPS_INC} ${PFRING_KERNEL_INC} ${PFRING_INC})
|
||||
target_link_libraries(nDPId "${STATIC_LIBNDPI_LIB}" "${STATIC_PFRING_LIB}" "${pkgcfg_lib_PCAP_pcap}" "${pkgcfg_lib_NDPI_ndpi}"
|
||||
"${pkgcfg_lib_PCRE_pcre2-8}" "${pkgcfg_lib_MAXMINDDB_maxminddb}" "${pkgcfg_lib_ZLIB_z}"
|
||||
"${GCRYPT_LIBRARY}" "${GCRYPT_ERROR_LIBRARY}" "${PCAP_LIBRARY}" "${LIBM_LIB}" "${PF_RING_LIB}"
|
||||
"${OSSL_LIBRARY}" "-pthread")
|
||||
|
||||
target_compile_definitions(nDPIsrvd PRIVATE -D_GNU_SOURCE=1 -DGIT_VERSION=\"${GIT_VERSION}\" ${NDPID_DEFS})
|
||||
target_compile_definitions(nDPIsrvd PRIVATE -D_GNU_SOURCE=1 -DPKG_VERSION=\"${PKG_VERSION}\" -DGIT_VERSION=\"${GIT_VERSION}\" ${NDPID_DEFS} ${EPOLL_DEFS})
|
||||
target_include_directories(nDPIsrvd PRIVATE ${NDPID_DEPS_INC})
|
||||
|
||||
target_include_directories(nDPId-test PRIVATE ${NDPID_DEPS_INC})
|
||||
target_compile_options(nDPId-test PRIVATE "-Wno-unused-function" "-pthread")
|
||||
target_compile_definitions(nDPId-test PRIVATE -D_GNU_SOURCE=1 -DNO_MAIN=1 -DGIT_VERSION=\"${GIT_VERSION}\"
|
||||
${NDPID_DEFS} ${ZLIB_DEFS} ${NDPID_TEST_MPROF_DEFS})
|
||||
target_compile_definitions(nDPId-test PRIVATE -D_GNU_SOURCE=1 -DNO_MAIN=1 -DPKG_VERSION=\"${PKG_VERSION}\" -DGIT_VERSION=\"${GIT_VERSION}\"
|
||||
${NDPID_DEFS} ${EPOLL_DEFS} ${ZLIB_DEFS} ${PFRING_DEFS} ${OSSL_DEFS} ${NDPID_TEST_MPROF_DEFS})
|
||||
target_include_directories(nDPId-test PRIVATE
|
||||
"${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}" ${NDPID_DEPS_INC})
|
||||
target_link_libraries(nDPId-test "${STATIC_LIBNDPI_LIB}" "${pkgcfg_lib_NDPI_ndpi}"
|
||||
"${pkgcfg_lib_PCRE_pcre}" "${pkgcfg_lib_MAXMINDDB_maxminddb}" "${pkgcfg_lib_ZLIB_z}"
|
||||
"${GCRYPT_LIBRARY}" "${GCRYPT_ERROR_LIBRARY}" "${PCAP_LIBRARY}" "${LIBM_LIB}"
|
||||
"-pthread")
|
||||
"${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}" ${NDPID_DEPS_INC} ${PFRING_KERNEL_INC} ${PFRING_INC})
|
||||
target_link_libraries(nDPId-test "${STATIC_LIBNDPI_LIB}" "${STATIC_PFRING_LIB}" "${pkgcfg_lib_PCAP_pcap}" "${pkgcfg_lib_NDPI_ndpi}"
|
||||
"${pkgcfg_lib_PCRE_pcre2-8}" "${pkgcfg_lib_MAXMINDDB_maxminddb}" "${pkgcfg_lib_ZLIB_z}"
|
||||
"${GCRYPT_LIBRARY}" "${GCRYPT_ERROR_LIBRARY}" "${PCAP_LIBRARY}" "${LIBM_LIB}" "${PF_RING_LIB}"
|
||||
"${OSSL_LIBRARY}" "-pthread")
|
||||
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
||||
add_executable(fuzz_ndpi_process_packet test/fuzz_ndpi_process_packet.c)
|
||||
if(BUILD_NDPI)
|
||||
add_dependencies(fuzz_ndpi_process_packet libnDPI)
|
||||
endif()
|
||||
target_compile_options(fuzz_ndpi_process_packet PRIVATE "-Wno-unused-function" "-fsanitize=fuzzer" "-pthread")
|
||||
target_compile_definitions(fuzz_ndpi_process_packet PRIVATE -D_GNU_SOURCE=1
|
||||
-DPKG_VERSION=\"${PKG_VERSION}\" -DGIT_VERSION=\"${GIT_VERSION}\"
|
||||
${NDPID_DEFS} ${EPOLL_DEFS} ${ZLIB_DEFS} ${PFRING_DEFS})
|
||||
target_include_directories(fuzz_ndpi_process_packet PRIVATE "${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}"
|
||||
${NDPID_DEPS_INC} ${PFRING_KERNEL_INC} ${PFRING_INC})
|
||||
target_link_libraries(fuzz_ndpi_process_packet "${STATIC_LIBNDPI_LIB}" "${STATIC_PFRING_LIB}" "${pkgcfg_lib_PCAP_pcap}" "${pkgcfg_lib_NDPI_ndpi}"
|
||||
"${pkgcfg_lib_PCRE_pcre2-8}" "${pkgcfg_lib_MAXMINDDB_maxminddb}" "${pkgcfg_lib_ZLIB_z}"
|
||||
"${GCRYPT_LIBRARY}" "${GCRYPT_ERROR_LIBRARY}" "${PCAP_LIBRARY}" "${LIBM_LIB}" "${PF_RING_LIB}"
|
||||
"-pthread")
|
||||
target_link_options(fuzz_ndpi_process_packet PRIVATE "-fsanitize=fuzzer")
|
||||
endif()
|
||||
|
||||
if(BUILD_EXAMPLES)
|
||||
add_executable(nDPIsrvd-collectd examples/c-collectd/c-collectd.c)
|
||||
add_executable(nDPIsrvd-collectd examples/c-collectd/c-collectd.c utils.c)
|
||||
if(BUILD_NDPI)
|
||||
add_dependencies(nDPIsrvd-collectd libnDPI)
|
||||
endif()
|
||||
@@ -304,47 +516,101 @@ if(BUILD_EXAMPLES)
|
||||
target_compile_definitions(nDPIsrvd-captured PRIVATE ${NDPID_DEFS})
|
||||
target_include_directories(nDPIsrvd-captured PRIVATE
|
||||
"${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}" "${CMAKE_SOURCE_DIR}" ${NDPID_DEPS_INC})
|
||||
target_link_libraries(nDPIsrvd-captured "${pkgcfg_lib_NDPI_ndpi}"
|
||||
"${pkgcfg_lib_PCRE_pcre}" "${pkgcfg_lib_MAXMINDDB_maxminddb}"
|
||||
target_link_libraries(nDPIsrvd-captured "${pkgcfg_lib_PCAP_pcap}" "${pkgcfg_lib_NDPI_ndpi}"
|
||||
"${pkgcfg_lib_PCRE_pcre2-8}" "${pkgcfg_lib_MAXMINDDB_maxminddb}"
|
||||
"${GCRYPT_LIBRARY}" "${GCRYPT_ERROR_LIBRARY}" "${PCAP_LIBRARY}")
|
||||
|
||||
add_executable(nDPIsrvd-json-dump examples/c-json-stdout/c-json-stdout.c)
|
||||
target_compile_definitions(nDPIsrvd-json-dump PRIVATE ${NDPID_DEFS})
|
||||
target_include_directories(nDPIsrvd-json-dump PRIVATE ${NDPID_DEPS_INC})
|
||||
|
||||
add_executable(nDPIsrvd-analysed examples/c-analysed/c-analysed.c utils.c)
|
||||
if(BUILD_NDPI)
|
||||
add_dependencies(nDPIsrvd-analysed libnDPI)
|
||||
endif()
|
||||
target_compile_definitions(nDPIsrvd-analysed PRIVATE ${NDPID_DEFS})
|
||||
target_include_directories(nDPIsrvd-analysed PRIVATE ${NDPID_DEPS_INC})
|
||||
target_include_directories(nDPIsrvd-analysed PRIVATE
|
||||
"${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}" "${CMAKE_SOURCE_DIR}" ${NDPID_DEPS_INC})
|
||||
|
||||
add_executable(nDPIsrvd-simple examples/c-simple/c-simple.c)
|
||||
target_compile_definitions(nDPIsrvd-simple PRIVATE ${NDPID_DEFS})
|
||||
target_include_directories(nDPIsrvd-simple PRIVATE ${NDPID_DEPS_INC})
|
||||
|
||||
if(ENABLE_COVERAGE)
|
||||
add_dependencies(coverage nDPIsrvd-analysed nDPIsrvd-collectd nDPIsrvd-captured nDPIsrvd-json-dump nDPIsrvd-simple)
|
||||
add_dependencies(coverage nDPIsrvd-analysed nDPIsrvd-collectd nDPIsrvd-captured nDPIsrvd-simple)
|
||||
if(BUILD_NDPI)
|
||||
add_dependencies(coverage libnDPI)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
install(TARGETS nDPIsrvd-analysed nDPIsrvd-collectd nDPIsrvd-captured nDPIsrvd-json-dump nDPIsrvd-simple DESTINATION bin)
|
||||
if(ENABLE_DBUS)
|
||||
add_executable(nDPIsrvd-notifyd examples/c-notifyd/c-notifyd.c utils.c)
|
||||
if(BUILD_NDPI)
|
||||
add_dependencies(nDPIsrvd-notifyd libnDPI)
|
||||
endif()
|
||||
target_compile_definitions(nDPIsrvd-notifyd PRIVATE ${NDPID_DEFS})
|
||||
target_include_directories(nDPIsrvd-notifyd PRIVATE
|
||||
"${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}" "${CMAKE_SOURCE_DIR}" "${NDPID_DEPS_INC}"
|
||||
"${DBUS_INCLUDE_DIRS}")
|
||||
target_link_libraries(nDPIsrvd-notifyd "${DBUS_LIBRARIES}")
|
||||
install(TARGETS nDPIsrvd-notifyd DESTINATION bin)
|
||||
endif()
|
||||
|
||||
if(ENABLE_CURL)
|
||||
add_executable(nDPIsrvd-influxd examples/c-influxd/c-influxd.c utils.c)
|
||||
if(BUILD_NDPI)
|
||||
add_dependencies(nDPIsrvd-influxd libnDPI)
|
||||
endif()
|
||||
target_compile_definitions(nDPIsrvd-influxd PRIVATE ${NDPID_DEFS})
|
||||
target_include_directories(nDPIsrvd-influxd PRIVATE
|
||||
"${STATIC_LIBNDPI_INC}" "${DEFAULT_NDPI_INCLUDE}" "${CMAKE_SOURCE_DIR}" "${NDPID_DEPS_INC}"
|
||||
"${CURL_INCLUDE_DIRS}")
|
||||
target_link_libraries(nDPIsrvd-influxd "${CURL_LIBRARIES}")
|
||||
install(TARGETS nDPIsrvd-influxd DESTINATION bin)
|
||||
endif()
|
||||
|
||||
install(TARGETS nDPIsrvd-analysed nDPIsrvd-collectd nDPIsrvd-captured nDPIsrvd-simple DESTINATION bin)
|
||||
install(FILES examples/c-collectd/plugin_nDPIsrvd.conf examples/c-collectd/rrdgraph.sh DESTINATION share/nDPId/nDPIsrvd-collectd)
|
||||
install(DIRECTORY examples/c-collectd/www DESTINATION share/nDPId/nDPIsrvd-collectd)
|
||||
endif()
|
||||
|
||||
if(ENABLE_SYSTEMD)
|
||||
install(FILES packages/systemd/ndpisrvd.service DESTINATION lib/systemd/system)
|
||||
install(FILES packages/systemd/ndpid@.service DESTINATION lib/systemd/system)
|
||||
if(BUILD_RUST_EXAMPLES)
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/target/release/rs-simple
|
||||
COMMAND cargo build --release
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/examples/rs-simple
|
||||
COMMENT "Build Rust executable with cargo: rs-simple"
|
||||
)
|
||||
add_custom_target(rs-simple ALL
|
||||
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/target/release/rs-simple
|
||||
)
|
||||
endif()
|
||||
|
||||
if(ENABLE_SYSTEMD)
|
||||
configure_file(packages/systemd/ndpisrvd.service.in ndpisrvd.service @ONLY)
|
||||
configure_file(packages/systemd/ndpid@.service.in ndpid@.service @ONLY)
|
||||
|
||||
install(DIRECTORY DESTINATION etc/nDPId)
|
||||
install(FILES "ndpid.conf.example" DESTINATION share/nDPId)
|
||||
install(FILES "ndpisrvd.conf.example" DESTINATION share/nDPId)
|
||||
install(FILES "${CMAKE_BINARY_DIR}/ndpisrvd.service" DESTINATION lib/systemd/system)
|
||||
install(FILES "${CMAKE_BINARY_DIR}/ndpid@.service" DESTINATION lib/systemd/system)
|
||||
endif()
|
||||
|
||||
install(FILES config.h
|
||||
dependencies/nDPIsrvd.h
|
||||
dependencies/jsmn/jsmn.h
|
||||
dependencies/uthash/src/utarray.h
|
||||
dependencies/uthash/src/uthash.h
|
||||
dependencies/uthash/src/utlist.h
|
||||
dependencies/uthash/src/utringbuffer.h
|
||||
dependencies/uthash/src/utstack.h
|
||||
dependencies/uthash/src/utstring.h
|
||||
DESTINATION include/nDPId)
|
||||
install(TARGETS nDPId DESTINATION sbin)
|
||||
install(TARGETS nDPIsrvd nDPId-test DESTINATION bin)
|
||||
if(BUILD_EXAMPLES)
|
||||
install(FILES dependencies/nDPIsrvd.py examples/py-flow-dashboard/plotly_dash.py
|
||||
install(FILES dependencies/nDPIsrvd.py
|
||||
DESTINATION share/nDPId)
|
||||
install(FILES examples/py-flow-info/flow-info.py
|
||||
DESTINATION bin RENAME nDPIsrvd-flow-info.py
|
||||
PERMISSIONS OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
|
||||
install(FILES examples/py-flow-dashboard/flow-dash.py
|
||||
DESTINATION bin RENAME nDPIsrvd-flow-dash.py
|
||||
PERMISSIONS OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
|
||||
install(FILES examples/py-json-stdout/json-stdout.py
|
||||
DESTINATION bin RENAME nDPIsrvd-json-stdout.py
|
||||
PERMISSIONS OWNER_READ OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE)
|
||||
@@ -367,31 +633,54 @@ message(STATUS "Cross Compilation........: ${CMAKE_CROSSCOMPILING}")
|
||||
message(STATUS "CMAKE_BUILD_TYPE.........: ${CMAKE_BUILD_TYPE}")
|
||||
message(STATUS "CMAKE_C_FLAGS............: ${CMAKE_C_FLAGS}")
|
||||
message(STATUS "NDPID_DEFS...............: ${NDPID_DEFS}")
|
||||
message(STATUS "FORCE_POLL...............: ${FORCE_POLL}")
|
||||
message(STATUS "ENABLE_PFRING............: ${ENABLE_PFRING}")
|
||||
if(ENABLE_PFRING)
|
||||
message(STATUS "PFRING_LINK_STATIC.......: ${PFRING_LINK_STATIC}")
|
||||
endif()
|
||||
message(STATUS "ENABLE_CRYPTO............: ${ENABLE_CRYPTO}")
|
||||
message(STATUS "ENABLE_COVERAGE..........: ${ENABLE_COVERAGE}")
|
||||
message(STATUS "ENABLE_SANITIZER.........: ${ENABLE_SANITIZER}")
|
||||
message(STATUS "ENABLE_SANITIZER_THREAD..: ${ENABLE_SANITIZER_THREAD}")
|
||||
message(STATUS "ENABLE_MEMORY_PROFILING..: ${ENABLE_MEMORY_PROFILING}")
|
||||
message(STATUS "ENABLE_ZLIB..............: ${ENABLE_ZLIB}")
|
||||
if(STATIC_LIBNDPI_INSTALLDIR)
|
||||
message(STATUS "STATIC_LIBNDPI_INSTALLDIR: ${STATIC_LIBNDPI_INSTALLDIR}")
|
||||
endif()
|
||||
message(STATUS "BUILD_NDPI...............: ${BUILD_NDPI}")
|
||||
message(STATUS "BUILD_EXAMPLES...........: ${BUILD_EXAMPLES}")
|
||||
message(STATUS "BUILD_RUST_EXAMPLES......: ${BUILD_RUST_EXAMPLES}")
|
||||
if(BUILD_EXAMPLES)
|
||||
message(STATUS "ENABLE_DBUS..............: ${ENABLE_DBUS}")
|
||||
message(STATUS "ENABLE_CURL..............: ${ENABLE_CURL}")
|
||||
endif()
|
||||
if(BUILD_NDPI)
|
||||
message(STATUS "NDPI_ADDITIONAL_ARGS.....: ${NDPI_ADDITIONAL_ARGS}")
|
||||
endif()
|
||||
message(STATUS "NDPI_NO_PKGCONFIG........: ${NDPI_NO_PKGCONFIG}")
|
||||
message(STATUS "--------------------------")
|
||||
if(PFRING_INSTALLDIR)
|
||||
message(STATUS "PFRING_INSTALLDIR........: ${PFRING_INSTALLDIR}")
|
||||
message(STATUS "- PFRING_INC.............: ${PFRING_INC}")
|
||||
message(STATUS "- PFRING_KERNEL_INC......: ${PFRING_KERNEL_INC}")
|
||||
message(STATUS "- STATIC_PFRING_LIB......: ${STATIC_PFRING_LIB}")
|
||||
message(STATUS "- SHARED_PFRING_LIB......: ${PF_RING_LIB}")
|
||||
message(STATUS "--------------------------")
|
||||
endif()
|
||||
if(STATIC_LIBNDPI_INSTALLDIR)
|
||||
message(STATUS "STATIC_LIBNDPI_INSTALLDIR: ${STATIC_LIBNDPI_INSTALLDIR}")
|
||||
endif()
|
||||
if(STATIC_LIBNDPI_INSTALLDIR OR BUILD_NDPI OR NDPI_NO_PKGCONFIG)
|
||||
message(STATUS "- STATIC_LIBNDPI_INC....: ${STATIC_LIBNDPI_INC}")
|
||||
message(STATUS "- STATIC_LIBNDPI_LIB....: ${STATIC_LIBNDPI_LIB}")
|
||||
message(STATUS "- NDPI_WITH_GCRYPT......: ${NDPI_WITH_GCRYPT}")
|
||||
message(STATUS "- NDPI_WITH_PCRE........: ${NDPI_WITH_PCRE}")
|
||||
message(STATUS "- NDPI_WITH_MAXMINDDB...: ${NDPI_WITH_MAXMINDDB}")
|
||||
message(STATUS "- STATIC_LIBNDPI_INC.....: ${STATIC_LIBNDPI_INC}")
|
||||
message(STATUS "- STATIC_LIBNDPI_LIB.....: ${STATIC_LIBNDPI_LIB}")
|
||||
message(STATUS "- NDPI_WITH_GCRYPT.......: ${NDPI_WITH_GCRYPT}")
|
||||
message(STATUS "- NDPI_WITH_PCRE.........: ${NDPI_WITH_PCRE}")
|
||||
message(STATUS "- NDPI_WITH_MAXMINDDB....: ${NDPI_WITH_MAXMINDDB}")
|
||||
endif()
|
||||
if(NOT STATIC_LIBNDPI_INSTALLDIR AND NOT BUILD_NDPI)
|
||||
message(STATUS "- DEFAULT_NDPI_INCLUDE..: ${DEFAULT_NDPI_INCLUDE}")
|
||||
message(STATUS "- DEFAULT_NDPI_INCLUDE...: ${DEFAULT_NDPI_INCLUDE}")
|
||||
endif()
|
||||
if(NOT NDPI_NO_PKGCONFIG)
|
||||
message(STATUS "- pkgcfg_lib_NDPI_ndpi..: ${pkgcfg_lib_NDPI_ndpi}")
|
||||
message(STATUS "- pkgcfg_lib_NDPI_ndpi...: ${pkgcfg_lib_NDPI_ndpi}")
|
||||
endif()
|
||||
message(STATUS "--------------------------")
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "Clang")
|
||||
message(STATUS "Fuzzing enabled")
|
||||
endif()
|
||||
|
||||
55
Dockerfile
55
Dockerfile
@@ -1,25 +1,46 @@
|
||||
FROM ubuntu:22.10 as builder
|
||||
FROM ubuntu:22.04 AS builder-ubuntu-2204
|
||||
|
||||
WORKDIR /root
|
||||
RUN apt-get -y update && apt-get install -y git cmake pkg-config libpcap-dev autoconf libtool
|
||||
RUN apt-get -y update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
autoconf automake build-essential ca-certificates cmake git \
|
||||
libpcap-dev libcurl4-openssl-dev libdbus-1-dev libtool make pkg-config unzip wget \
|
||||
&& apt-get clean \
|
||||
&& git clone https://github.com/utoni/nDPId.git
|
||||
|
||||
RUN git clone https://github.com/utoni/nDPId.git
|
||||
#for dev, uncomment below
|
||||
#RUN mkdir /root/nDPId
|
||||
#COPY . /root/nDPId/
|
||||
WORKDIR /root/nDPId
|
||||
RUN cmake -S . -B build -DBUILD_NDPI=ON -DBUILD_EXAMPLES=ON \
|
||||
-DENABLE_DBUS=ON -DENABLE_CURL=ON \
|
||||
&& cmake --build build --verbose
|
||||
|
||||
RUN cd nDPId && mkdir build && cd build && cmake .. -DBUILD_NDPI=ON && make
|
||||
FROM ubuntu:22.04
|
||||
USER root
|
||||
WORKDIR /
|
||||
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPId /usr/sbin/nDPId
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPIsrvd /usr/bin/nDPIsrvd
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPId-test /usr/bin/nDPId-test
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPIsrvd-collectd /usr/bin/nDPIsrvd-collectd
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPIsrvd-captured /usr/bin/nDPIsrvd-captured
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPIsrvd-analysed /usr/bin/nDPIsrvd-anaylsed
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPIsrvd-analysed /usr/bin/nDPIsrvd-anaylsed
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPIsrvd-notifyd /usr/bin/nDPIsrvd-notifyd
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPIsrvd-influxd /usr/bin/nDPIsrvd-influxd
|
||||
COPY --from=builder-ubuntu-2204 /root/nDPId/build/nDPIsrvd-simple /usr/bin/nDPIsrvd-simple
|
||||
|
||||
RUN apt-get -y update \
|
||||
&& apt-get install -y --no-install-recommends libpcap-dev \
|
||||
&& apt-get clean
|
||||
|
||||
USER nobody
|
||||
RUN /usr/bin/nDPIsrvd -h || { RC=$?; test ${RC} -eq 1; }; \
|
||||
/usr/sbin/nDPId -h || { RC=$?; test ${RC} -eq 1; }
|
||||
|
||||
FROM archlinux:base-devel AS builder-archlinux
|
||||
|
||||
FROM ubuntu:22.10
|
||||
WORKDIR /root
|
||||
RUN apt-get -y update && apt-get -y install libpcap-dev
|
||||
RUN pacman --noconfirm -Sy cmake git unzip wget && mkdir /build && chown nobody /build && cd /build \
|
||||
&& runuser -u nobody git clone https://github.com/utoni/nDPId.git
|
||||
|
||||
COPY --from=builder /root/nDPId/libnDPI/ /root/
|
||||
COPY --from=builder /root/nDPId/build/nDPIsrvd /root/nDPId/build/nDPId /root/
|
||||
|
||||
#RUN echo "#!/bin/bash\n" \
|
||||
# "/root/nDPIsrvd -d\n"\
|
||||
# "/root/nDPId \n" > run.sh && cat run.sh && chmod +x run.sh
|
||||
|
||||
#ENTRYPOINT ["/root/run.sh"]
|
||||
WORKDIR /build/nDPId/packages/archlinux
|
||||
RUN runuser -u nobody makepkg
|
||||
|
||||
162
README.md
162
README.md
@@ -1,32 +1,41 @@
|
||||
[](https://github.com/utoni/nDPId/actions/workflows/build.yml)
|
||||
[](https://gitlab.com/utoni/nDPId/-/pipelines)
|
||||
[](https://app.circleci.com/pipelines/github/utoni/nDPId)
|
||||
[](https://sonarcloud.io/summary/new_code?id=lnslbrty_nDPId)
|
||||
[](https://sonarcloud.io/summary/new_code?id=lnslbrty_nDPId)
|
||||
[](https://sonarcloud.io/summary/new_code?id=lnslbrty_nDPId)
|
||||
[](https://sonarcloud.io/summary/new_code?id=lnslbrty_nDPId)
|
||||
[](https://sonarcloud.io/summary/new_code?id=lnslbrty_nDPId)
|
||||
[](https://sonarcloud.io/summary/new_code?id=lnslbrty_nDPId)
|
||||

|
||||
|
||||
# References
|
||||
|
||||
[ntop Webinar 2022](https://www.ntop.org/webinar/ntop-webinar-on-dec-14th-community-meeting-and-future-plans/)
|
||||
[ntopconf 2023](https://www.ntop.org/ntopconf2023/)
|
||||
|
||||
# Disclaimer
|
||||
|
||||
Please respect&protect the privacy of others.
|
||||
Please respect & protect the privacy of others.
|
||||
|
||||
The purpose of this software is not to spy on others, but to detect network anomalies and malicious traffic.
|
||||
|
||||
# Abstract
|
||||
|
||||
nDPId is a set of daemons and tools to capture, process and classify network traffic.
|
||||
It's minimal dependencies (besides a half-way modern c library and POSIX threads) are libnDPI (> 4.4.0 or current github dev branch) and libpcap.
|
||||
Its minimal dependencies (besides a to some extent modern C library and POSIX threads) are libnDPI (>=5.0.0 or current github dev branch) and libpcap.
|
||||
|
||||
The daemon `nDPId` is capable of multithreading for packet processing, but w/o mutexes for performance reasons.
|
||||
Instead synchronization is achieved by a packet distribution mechanism.
|
||||
To balance all workload to all threads (more or less) equally a unique identifier represented as hash value is calculated using a 3-tuple consisting of IPv4/IPv6 src/dst address, IP header value of the layer4 protocol and (for TCP/UDP) src/dst port. Other protocols e.g. ICMP/ICMPv6 are lacking relevance for DPI, thus nDPId does not distinguish between different ICMP/ICMPv6 flows coming from the same host. Saves memory and performance, but might change in the future.
|
||||
Instead, synchronization is achieved by a packet distribution mechanism.
|
||||
To balance the workload to all threads (more or less) equally, a unique identifier represented as hash value is calculated using a 3-tuple consisting of: IPv4/IPv6 src/dst address; IP header value of the layer4 protocol; and (for TCP/UDP) src/dst port. Other protocols e.g. ICMP/ICMPv6 lack relevance for DPI, thus nDPId does not distinguish between different ICMP/ICMPv6 flows coming from the same host. This saves memory and performance, but might change in the future.
|
||||
|
||||
`nDPId` uses libnDPI's JSON serialization interface to generate a JSON strings for each event it receive from the library and which it then sends out to a UNIX-socket (default: /tmp/ndpid-collector.sock ). From such a socket, `nDPIsrvd` (or other custom applications) can retrieve incoming JSON-messages and further proceed working/distributing messages to higher-level applications.
|
||||
`nDPId` uses libnDPI's JSON serialization interface to generate a JSON messages for each event it receives from the library and which it then sends out to a UNIX-socket (default: `/tmp/ndpid-collector.sock` ). From such a socket, `nDPIsrvd` (or other custom applications) can retrieve incoming JSON-messages and further proceed working/distributing messages to higher-level applications.
|
||||
|
||||
Unfortunately `nDPIsrvd` does currently not support any encryption/authentication for TCP connections (TODO!).
|
||||
Unfortunately, `nDPIsrvd` does not yet support any encryption/authentication for TCP connections (TODO!).
|
||||
|
||||
# Architecture
|
||||
|
||||
This project uses some kind of microservice architecture.
|
||||
This project uses a kind of microservice architecture.
|
||||
|
||||
```text
|
||||
connect to UNIX socket [1] connect to UNIX/TCP socket [2]
|
||||
@@ -62,30 +71,30 @@ where:
|
||||
|
||||
JSON messages streamed by both `nDPId` and `nDPIsrvd` are presented with:
|
||||
|
||||
* a 5-digit-number describing (as decimal number) of the **entire** JSON string including the newline `\n` at the end;
|
||||
* a 5-digit-number describing (as decimal number) the **entire** JSON message including the newline `\n` at the end;
|
||||
* the JSON messages
|
||||
|
||||
```text
|
||||
[5-digit-number][JSON string]
|
||||
[5-digit-number][JSON message]
|
||||
```
|
||||
|
||||
as with the following example:
|
||||
|
||||
```text
|
||||
01223{"flow_event_id":7,"flow_event_name":"detection-update","thread_id":12,"packet_id":307,"source":"wlan0",[...]}
|
||||
00458{"packet_event_id":2,"packet_event_name":"packet-flow","thread_id":11,"packet_id":324,"source":"wlan0",[...]]}
|
||||
00572{"flow_event_id":1,"flow_event_name":"new","thread_id":11,"packet_id":324,"source":"wlan0",[...]}
|
||||
01223{"flow_event_id":7,"flow_event_name":"detection-update","thread_id":12,"packet_id":307,"source":"wlan0", ...snip...}
|
||||
00458{"packet_event_id":2,"packet_event_name":"packet-flow","thread_id":11,"packet_id":324,"source":"wlan0", ...snip...}
|
||||
00572{"flow_event_id":1,"flow_event_name":"new","thread_id":11,"packet_id":324,"source":"wlan0", ...snip...}
|
||||
```
|
||||
|
||||
The full stream of `nDPId` generated JSON-events can be retrieved directly from `nDPId`, without relying on `nDPIsrvd`, by providing a properly managed UNIX-socket.
|
||||
|
||||
Technical details about JSON-messages format can be obtained from related `.schema` file included in the `schema` directory
|
||||
Technical details about the JSON-message format can be obtained from the related `.schema` file included in the `schema` directory
|
||||
|
||||
|
||||
# Events
|
||||
|
||||
`nDPId` generates JSON strings whereas each string is assigned to a certain event.
|
||||
Those events specify the contents (key-value-pairs) of the JSON string.
|
||||
`nDPId` generates JSON messages whereby each string is assigned to a certain event.
|
||||
Those events specify the contents (key-value-pairs) of the JSON message.
|
||||
They are divided into four categories, each with a number of subevents.
|
||||
|
||||
## Error Events
|
||||
@@ -123,10 +132,10 @@ Detailed JSON-schema is available [here](schema/daemon_event_schema.json)
|
||||
|
||||
|
||||
## Packet Events
|
||||
There are 2 events containing base64 encoded packet payload either belonging to a flow or not:
|
||||
There are 2 events containing base64 encoded packet payloads either belonging to a flow or not:
|
||||
|
||||
1. packet: does not belong to any flow
|
||||
2. packet-flow: does belong to a flow e.g. TCP/UDP or ICMP
|
||||
2. packet-flow: belongs to a flow e.g. TCP/UDP or ICMP
|
||||
|
||||
Detailed JSON-schema is available [here](schema/packet_event_schema.json)
|
||||
|
||||
@@ -134,11 +143,11 @@ Detailed JSON-schema is available [here](schema/packet_event_schema.json)
|
||||
There are 9 distinct events related to a flow:
|
||||
|
||||
1. new: a new TCP/UDP/ICMP flow seen which will be tracked
|
||||
2. end: a TCP connections terminates
|
||||
2. end: a TCP connection terminates
|
||||
3. idle: a flow timed out, because there was no packet on the wire for a certain amount of time
|
||||
4. update: inform nDPIsrvd or other apps about a long-lasting flow, whose detection was finished a long time ago but is still active
|
||||
5. analyse: provide some information about extracted features of a flow (Experimental; disabled per default, enable with `-A`)
|
||||
6. guessed: `libnDPI` was not able to reliable detect a layer7 protocol and falls back to IP/Port based detection
|
||||
6. guessed: `libnDPI` was not able to reliably detect a layer7 protocol and falls back to IP/Port based detection
|
||||
7. detected: `libnDPI` sucessfully detected a layer7 protocol
|
||||
8. detection-update: `libnDPI` dissected more layer7 protocol data (after detection already done)
|
||||
9. not-detected: neither detected nor guessed
|
||||
@@ -149,8 +158,9 @@ Detailed JSON-schema is available [here](schema/flow_event_schema.json). Also, a
|
||||
|
||||
A flow can have three different states while it is been tracked by `nDPId`.
|
||||
|
||||
1. skipped: the flow will be tracked, but no detection will happen to safe memory, see command line argument `-I` and `-E`
|
||||
2. finished: detection finished and the memory used for the detection is free'd
|
||||
1. skipped: the flow will be tracked, but no detection will happen to reduce memory usage.
|
||||
See command line argument `-I` and `-E`
|
||||
2. finished: detection finished and the memory used for the detection is freed
|
||||
3. info: detection is in progress and all flow memory required for `libnDPI` is allocated (this state consumes most memory)
|
||||
|
||||
# Build (CMake)
|
||||
@@ -172,7 +182,7 @@ see below for a full/test live-session
|
||||
|
||||

|
||||
|
||||
Based on your building environment and/or desiderata, you could need:
|
||||
Based on your build environment and/or desiderata, you could need:
|
||||
|
||||
```shell
|
||||
mkdir build
|
||||
@@ -183,43 +193,49 @@ ccmake ..
|
||||
or to build with a staticially linked libnDPI:
|
||||
|
||||
```shell
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DSTATIC_LIBNDPI_INSTALLDIR=[path/to/your/libnDPI/installdir]
|
||||
cmake -S . -B ./build \
|
||||
-DSTATIC_LIBNDPI_INSTALLDIR=[path/to/your/libnDPI/installdir] \
|
||||
-DNDPI_NO_PKGCONFIG=ON
|
||||
cmake --build ./build
|
||||
```
|
||||
|
||||
If you're using the latter one, make sure that you've configured libnDPI with `./configure --prefix=[path/to/your/libnDPI/installdir]`
|
||||
and do not forget to set the all necessary CMake variables to link against shared libraries used by your nDPI build.
|
||||
If you use the latter, make sure that you've configured libnDPI with `./configure --prefix=[path/to/your/libnDPI/installdir]`
|
||||
and remember to set the all-necessary CMake variables to link against shared libraries used by your nDPI build.
|
||||
You'll also need to use `-DNDPI_NO_PKGCONFIG=ON` if `STATIC_LIBNDPI_INSTALLDIR` does not contain a pkg-config file.
|
||||
|
||||
e.g.:
|
||||
|
||||
```shell
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DSTATIC_LIBNDPI_INSTALLDIR=[path/to/your/libnDPI/installdir] -DNDPI_WITH_GCRYPT=ON -DNDPI_WITH_PCRE=OFF -DNDPI_WITH_MAXMINDDB=OFF
|
||||
cmake -S . -B ./build \
|
||||
-DSTATIC_LIBNDPI_INSTALLDIR=[path/to/your/libnDPI/installdir] \
|
||||
-DNDPI_NO_PKGCONFIG=ON \
|
||||
-DNDPI_WITH_GCRYPT=ON -DNDPI_WITH_PCRE=OFF -DNDPI_WITH_MAXMINDDB=OFF
|
||||
cmake --build ./build
|
||||
```
|
||||
|
||||
Or let a shell script do the work for you:
|
||||
|
||||
```shell
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DBUILD_NDPI=ON
|
||||
cmake -S . -B ./build \
|
||||
-DBUILD_NDPI=ON
|
||||
cmake --build ./build
|
||||
```
|
||||
|
||||
The CMake cache variable `-DBUILD_NDPI=ON` builds a version of `libnDPI` residing as git submodule in this repository.
|
||||
The CMake cache variable `-DBUILD_NDPI=ON` builds a version of `libnDPI` residing as a git submodule in this repository.
|
||||
|
||||
# run
|
||||
|
||||
As mentioned above, in order to run `nDPId` a UNIX-socket need to be provided in order to stream our related JSON-data.
|
||||
As mentioned above, in order to run `nDPId`, a UNIX-socket needs to be provided in order to stream our related JSON-data.
|
||||
|
||||
Such a UNIX-socket can be provided by both the included `nDPIsrvd` daemon, or, if you simply need a quick check, with the [ncat](https://nmap.org/book/ncat-man.html) utility, with a simple `ncat -U /tmp/listen.sock -l -k`. Remember that OpenBSD `netcat` is not able to handle multiple connections reliably.
|
||||
|
||||
Once the socket is ready, you can run `nDPId` capturing and analyzing your own traffic, with something similar to:
|
||||
Once the socket is ready, you can run `nDPId` capturing and analyzing your own traffic, with something similar to: `sudo nDPId -c /tmp/listen.sock`
|
||||
If you're using OpenBSD `netcat`, you need to run: `sudo nDPId -c /tmp/listen.sock -o max-reader-threads=1`
|
||||
Make sure that the UNIX socket is accessible by the user (see -u) to whom nDPId changes to, default: nobody.
|
||||
|
||||
Of course, both `ncat` and `nDPId` need to point to the same UNIX-socket (`nDPId` provides the `-c` option, exactly for this. As a default, `nDPId` refer to `/tmp/ndpid-collector.sock`, and the same default-path is also used by `nDPIsrvd` as for the incoming socket).
|
||||
Of course, both `ncat` and `nDPId` need to point to the same UNIX-socket (`nDPId` provides the `-c` option, exactly for this. By default, `nDPId` refers to `/tmp/ndpid-collector.sock`, and the same default-path is also used by `nDPIsrvd` for the incoming socket).
|
||||
|
||||
You also need to provide `nDPId` some real-traffic. You can capture your own traffic, with something similar to:
|
||||
Give `nDPId` some real-traffic. You can capture your own traffic, with something similar to:
|
||||
|
||||
```shell
|
||||
socat -u UNIX-Listen:/tmp/listen.sock,fork - # does the same as `ncat`
|
||||
@@ -247,7 +263,7 @@ Daemons:
|
||||
make -C [path-to-a-build-dir] daemon
|
||||
```
|
||||
|
||||
Or you can proceed with a manual approach with:
|
||||
Or a manual approach with:
|
||||
|
||||
```shell
|
||||
./nDPIsrvd -d
|
||||
@@ -265,11 +281,6 @@ And why not a flow-info example?
|
||||
./examples/py-flow-info/flow-info.py
|
||||
```
|
||||
|
||||
or
|
||||
```shell
|
||||
./nDPIsrvd-json-dump
|
||||
```
|
||||
|
||||
or anything below `./examples`.
|
||||
|
||||
# nDPId tuning
|
||||
@@ -282,45 +293,56 @@ Suboptions for `-o`:
|
||||
Format: `subopt` (unit, comment): description
|
||||
|
||||
* `max-flows-per-thread` (N, caution advised): affects max. memory usage
|
||||
* `max-idle-flows-per-thread` (N, safe): max. allowed idle flows which memory get's free'd after `flow-scan-interval`
|
||||
* `max-idle-flows-per-thread` (N, safe): max. allowed idle flows whose memory gets freed after `flow-scan-interval`
|
||||
* `max-reader-threads` (N, safe): amount of packet processing threads, every thread can have a max. of `max-flows-per-thread` flows
|
||||
* `daemon-status-interval` (ms, safe): specifies how often daemon event `status` will be generated
|
||||
* `compression-scan-interval` (ms, untested): specifies how often `nDPId` should scan for inactive flows ready for compression
|
||||
* `compression-flow-inactivity` (ms, untested): the earliest period of time that must elapse before `nDPId` may consider compressing a flow that did neither send nor receive any data
|
||||
* `flow-scan-interval` (ms, safe): min. amount of time after which `nDPId` will scan for idle or long-lasting flows
|
||||
* `generic-max-idle-time` (ms, untested): time after which a non TCP/UDP/ICMP flow will time out
|
||||
* `icmp-max-idle-time` (ms, untested): time after which an ICMP flow will time out
|
||||
* `udp-max-idle-time` (ms, caution advised): time after which an UDP flow will time out
|
||||
* `tcp-max-idle-time` (ms, caution advised): time after which a TCP flow will time out
|
||||
* `tcp-max-post-end-flow-time` (ms, caution advised): a TCP flow that received a FIN or RST will wait that amount of time before flow tracking will be stopped and the flow memory free'd
|
||||
* `max-packets-per-flow-to-send` (N, safe): max. `packet-flow` events that will be generated for the first N packets of each flow
|
||||
* `max-packets-per-flow-to-process` (N, caution advised): max. packets that will be processed by `libnDPI`
|
||||
* `daemon-status-interval` (ms, safe): specifies how often daemon event `status` is generated
|
||||
* `compression-scan-interval` (ms, untested): specifies how often `nDPId` scans for inactive flows ready for compression
|
||||
* `compression-flow-inactivity` (ms, untested): the shortest period of time elapsed before `nDPId` considers compressing a flow (e.g. nDPI flow struct) that neither sent nor received any data
|
||||
* `flow-scan-interval` (ms, safe): min. amount of time after which `nDPId` scans for idle or long-lasting flows
|
||||
* `generic-max-idle-time` (ms, untested): time after which a non TCP/UDP/ICMP flow times out
|
||||
* `icmp-max-idle-time` (ms, untested): time after which an ICMP flow times out
|
||||
* `udp-max-idle-time` (ms, caution advised): time after which an UDP flow times out
|
||||
* `tcp-max-idle-time` (ms, caution advised): time after which a TCP flow times out
|
||||
* `tcp-max-post-end-flow-time` (ms, caution advised): a TCP flow that received a FIN or RST waits this amount of time before flow tracking stops and the flow memory is freed
|
||||
* `max-packets-per-flow-to-send` (N, safe): max. `packet-flow` events generated for the first N packets of each flow
|
||||
* `max-packets-per-flow-to-process` (N, caution advised): max. amount of packets processed by `libnDPI`
|
||||
* `max-packets-per-flow-to-analyze` (N, safe): max. packets to analyze before sending an `analyse` event, requires `-A`
|
||||
* `error-event-threshold-n` (N, safe): max. error events to send until threshold time has passed
|
||||
* `error-event-threshold-time` (N, safe): time after which the error event threshold resets
|
||||
|
||||
# test
|
||||
|
||||
The recommended way to run integration / diff tests:
|
||||
The recommended way to run regression / diff tests:
|
||||
|
||||
```shell
|
||||
mkdir build
|
||||
cd build
|
||||
cmake .. -DBUILD_NDPI=ON
|
||||
make nDPId-test test
|
||||
cmake -S . -B ./build-like-ci \
|
||||
-DBUILD_NDPI=ON -DENABLE_ZLIB=ON -DBUILD_EXAMPLES=ON
|
||||
# optional: -DENABLE_CURL=ON -DENABLE_SANITIZER=ON
|
||||
./test/run_tests.sh ./libnDPI ./build-like-ci/nDPId-test
|
||||
# or: make -C ./build-like-ci test
|
||||
```
|
||||
|
||||
Alternatively you can run some integration tests manually:
|
||||
|
||||
`./test/run_tests.sh [/path/to/libnDPI/root/directory] [/path/to/nDPId-test]`
|
||||
|
||||
e.g.:
|
||||
|
||||
`./test/run_tests.sh [${HOME}/git/nDPI] [${HOME}/git/nDPId/build/nDPId-test]`
|
||||
Run `./test/run_tests.sh` to see some usage information.
|
||||
|
||||
Remember that all test results are tied to a specific libnDPI commit hash
|
||||
as part of the `git submodule`. Using `test/run_tests.sh` for other commit hashes
|
||||
will most likely result in PCAP diff's.
|
||||
will most likely result in PCAP diffs.
|
||||
|
||||
Why not use `examples/py-flow-dashboard/flow-dash.py` to visualize nDPId's output.
|
||||
# Code Coverage
|
||||
|
||||
You may generate code coverage by using:
|
||||
|
||||
```shell
|
||||
cmake -S . -B ./build-coverage \
|
||||
-DENABLE_COVERAGE=ON -DENABLE_ZLIB=ON
|
||||
# optional: -DBUILD_NDPI=ON
|
||||
make -C ./build-coverage coverage-clean
|
||||
make -C ./build-coverage clean
|
||||
make -C ./build-coverage all
|
||||
./test/run_tests.sh ./libnDPI ./build-coverage/nDPId-test
|
||||
make -C ./build-coverage coverage
|
||||
make -C ./build-coverage coverage-view
|
||||
```
|
||||
|
||||
# Contributors
|
||||
|
||||
|
||||
18
SECURITY.md
Normal file
18
SECURITY.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Security Policy
|
||||
|
||||
I encourage you to submit a pull request if you have a solution or fix for anything even security vulnerabilities.
|
||||
Your contributions help advance and enhance safety for all users :star:.
|
||||
|
||||
## Reporting a Bug :bug: :bug:
|
||||
|
||||
Simply use GitHub issues to report a bug with related information to debug the issue :pencil:.
|
||||
|
||||
## Reporting a Vulnerability :closed_lock_with_key: :eyes:
|
||||
|
||||
For sensitive security issues, please email <toni@impl.cc> including the following information:
|
||||
|
||||
- Description of the vulnerability
|
||||
- Steps to reproduce the issue
|
||||
- Affected versions i.e. release tags, git commit hashes or git branch
|
||||
- If applicable, a data sample (preferably `pcap/pcapng`) to reproduce
|
||||
- If known, any mitigations or fixes for the issue
|
||||
21
TODO.md
21
TODO.md
@@ -1,5 +1,20 @@
|
||||
# TODOs
|
||||
|
||||
1. improve UDP/TCP timeout handling by reading netfilter conntrack timeouts from /proc (or just read conntrack table entries)
|
||||
2. detect interface / timeout changes and apply them to nDPId
|
||||
3. implement AEAD crypto via libsodium (at least for TCP communication)
|
||||
1.8:
|
||||
|
||||
* let nDPIsrvd (collector) connect to other nDPIsrvd instances (as distributor)
|
||||
* nDPIsrvd GnuTLS support for TCP/IP distributor connections
|
||||
* provide nDPId-exportd daemon which will only send captured packets to an nDPId instance running on a different machine
|
||||
|
||||
2.0.0:
|
||||
|
||||
* switch to semantic versioning for the greater good ;)
|
||||
|
||||
no release plan:
|
||||
|
||||
* merge flow end/idle event into idle event (end is not really useful..)
|
||||
* provide a shared library for C / C++ for distributor application developers
|
||||
* improve UDP/TCP timeout handling by reading netfilter conntrack timeouts from /proc (or just read conntrack table entries)
|
||||
* detect interface / timeout changes and apply them to nDPId
|
||||
* switch to MIT or BSD License
|
||||
* libdaq integration
|
||||
|
||||
28
cmake/CheckEpoll.cmake
Normal file
28
cmake/CheckEpoll.cmake
Normal file
@@ -0,0 +1,28 @@
|
||||
# - Check if the system supports epoll.
|
||||
# CHECK_EPOLL(<var>)
|
||||
# <var> - variable to store the result
|
||||
# (1 for success, empty for failure)
|
||||
|
||||
#=============================================================================
|
||||
# This software is in the public domain, furnished "as is", without technical
|
||||
# support, and with no warranty, express or implied, as to its usefulness for
|
||||
# any purpose.
|
||||
#=============================================================================
|
||||
|
||||
macro(CHECK_EPOLL VARIABLE)
|
||||
if(UNIX)
|
||||
if("${VARIABLE}" MATCHES "^${VARIABLE}$")
|
||||
message(STATUS "Check if the system supports epoll")
|
||||
include(CheckSymbolExists)
|
||||
check_symbol_exists(epoll_create "sys/epoll.h" EPOLL_PROTOTYPE_EXISTS)
|
||||
|
||||
if(EPOLL_PROTOTYPE_EXISTS)
|
||||
message(STATUS "Check if the system supports epoll - yes")
|
||||
set(${VARIABLE} 1 CACHE INTERNAL "Result of CHECK_EPOLL" FORCE)
|
||||
else(EPOLL_PROTOTYPE_EXISTS)
|
||||
message(STATUS "Check if the system supports epoll - no")
|
||||
set(${VARIABLE} "" CACHE INTERNAL "Result of CHECK_EPOLL" FORCE)
|
||||
endif(EPOLL_PROTOTYPE_EXISTS)
|
||||
endif("${VARIABLE}" MATCHES "^${VARIABLE}$")
|
||||
endif(UNIX)
|
||||
endmacro(CHECK_EPOLL)
|
||||
8
config.h
8
config.h
@@ -16,11 +16,13 @@
|
||||
#define NETWORK_BUFFER_LENGTH_DIGITS 5u
|
||||
#define NETWORK_BUFFER_LENGTH_DIGITS_STR "5"
|
||||
|
||||
#define TIME_S_TO_US(s) (s * 1000u * 1000u)
|
||||
#define PFRING_BUFFER_SIZE 65536u
|
||||
|
||||
#define TIME_S_TO_US(s) (s * 1000llu * 1000llu)
|
||||
|
||||
/* nDPId default config options */
|
||||
#define nDPId_PIDFILE "/tmp/ndpid.pid"
|
||||
#define nDPId_MAX_FLOWS_PER_THREAD 4096u
|
||||
#define nDPId_MAX_FLOWS_PER_THREAD 65536u
|
||||
#define nDPId_MAX_IDLE_FLOWS_PER_THREAD (nDPId_MAX_FLOWS_PER_THREAD / 32u)
|
||||
#define nDPId_MAX_READER_THREADS 32u
|
||||
#define nDPId_ERROR_EVENT_THRESHOLD_N 16u
|
||||
@@ -38,7 +40,7 @@
|
||||
#define nDPId_THREAD_DISTRIBUTION_SEED 0x03dd018b
|
||||
#define nDPId_PACKETS_PLEN_MAX 8192u /* 8kB */
|
||||
#define nDPId_PACKETS_PER_FLOW_TO_SEND 15u
|
||||
#define nDPId_PACKETS_PER_FLOW_TO_PROCESS NDPI_DEFAULT_MAX_NUM_PKTS_PER_FLOW_TO_DISSECT
|
||||
#define nDPId_PACKETS_PER_FLOW_TO_PROCESS 32u
|
||||
#define nDPId_PACKETS_PER_FLOW_TO_ANALYZE 32u
|
||||
#define nDPId_ANALYZE_PLEN_MAX 1504u
|
||||
#define nDPId_ANALYZE_PLEN_BIN_LEN 32u
|
||||
|
||||
6
dependencies/jsmn/jsmn.h
vendored
6
dependencies/jsmn/jsmn.h
vendored
@@ -196,10 +196,10 @@ static int jsmn_parse_string(jsmn_parser *parser, const char *js,
|
||||
jsmntok_t *token;
|
||||
|
||||
int start = parser->pos;
|
||||
|
||||
parser->pos++;
|
||||
|
||||
|
||||
/* Skip starting quote */
|
||||
parser->pos++;
|
||||
|
||||
for (; parser->pos < len && js[parser->pos] != '\0'; parser->pos++) {
|
||||
char c = js[parser->pos];
|
||||
|
||||
|
||||
182
dependencies/nDPIsrvd.h
vendored
182
dependencies/nDPIsrvd.h
vendored
@@ -5,6 +5,7 @@
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <netinet/in.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
@@ -32,11 +33,9 @@
|
||||
#define nDPIsrvd_JSON_KEY_STRLEN (32)
|
||||
#define nDPIsrvd_HASHKEY_SEED (0x995fd871u)
|
||||
|
||||
#define nDPIsrvd_ARRAY_LENGTH(s) (sizeof(s) / sizeof(s[0]))
|
||||
#define nDPIsrvd_STRLEN_SZ(s) (sizeof(s) / sizeof(s[0]) - sizeof(s[0]))
|
||||
#define nDPIsrvd_ARRAY_LENGTH(s) ((size_t)(sizeof(s) / sizeof(s[0])))
|
||||
#define nDPIsrvd_STRLEN_SZ(s) ((size_t)((sizeof(s) / sizeof(s[0])) - sizeof(s[0])))
|
||||
#define TOKEN_GET_SZ(sock, ...) nDPIsrvd_get_token(sock, __VA_ARGS__, NULL)
|
||||
#define TOKEN_GET_VALUE_SZ(sock, value_length, ...) \
|
||||
nDPIsrvd_get_token_value(sock, TOKEN_GET_SZ(sock, __VA_ARGS__, NULL))
|
||||
#define TOKEN_VALUE_EQUALS(sock, token, string_to_check, string_to_check_length) \
|
||||
nDPIsrvd_token_value_equals(sock, token, string_to_check, string_to_check_length)
|
||||
#define TOKEN_VALUE_EQUALS_SZ(sock, token, string_to_check) \
|
||||
@@ -208,9 +207,9 @@ struct nDPIsrvd_buffer
|
||||
struct nDPIsrvd_json_buffer
|
||||
{
|
||||
struct nDPIsrvd_buffer buf;
|
||||
char * json_string;
|
||||
size_t json_string_start;
|
||||
nDPIsrvd_ull json_string_length;
|
||||
char * json_message;
|
||||
size_t json_message_start;
|
||||
nDPIsrvd_ull json_message_length;
|
||||
};
|
||||
|
||||
struct nDPIsrvd_jsmn
|
||||
@@ -254,9 +253,9 @@ static inline void nDPIsrvd_socket_free(struct nDPIsrvd_socket ** const sock);
|
||||
#define WHITESPACE 64
|
||||
#define EQUALS 65
|
||||
#define INVALID 66
|
||||
static inline int nDPIsrvd_base64decode(char * in, size_t inLen, unsigned char * out, size_t * outLen)
|
||||
static inline int nDPIsrvd_base64decode(char const * in, size_t inLen, unsigned char * out, size_t * outLen)
|
||||
{
|
||||
char * end = in + inLen;
|
||||
char const * end = in + inLen;
|
||||
char iter = 0;
|
||||
uint32_t buf = 0;
|
||||
size_t len = 0;
|
||||
@@ -278,7 +277,7 @@ static inline int nDPIsrvd_base64decode(char * in, size_t inLen, unsigned char *
|
||||
|
||||
while (in < end)
|
||||
{
|
||||
unsigned char c = d[*(unsigned char *)in++];
|
||||
unsigned char c = d[*(unsigned char const *)in++];
|
||||
|
||||
switch (c)
|
||||
{
|
||||
@@ -401,14 +400,19 @@ static inline void nDPIsrvd_buffer_free(struct nDPIsrvd_buffer * const buffer)
|
||||
buffer->max = 0;
|
||||
}
|
||||
|
||||
static inline void nDPIsrvd_json_buffer_reset(struct nDPIsrvd_json_buffer * const json_buffer)
|
||||
{
|
||||
json_buffer->json_message_start = 0UL;
|
||||
json_buffer->json_message_length = 0ULL;
|
||||
json_buffer->json_message = NULL;
|
||||
}
|
||||
|
||||
static inline int nDPIsrvd_json_buffer_init(struct nDPIsrvd_json_buffer * const json_buffer, size_t json_buffer_size)
|
||||
{
|
||||
int ret = nDPIsrvd_buffer_init(&json_buffer->buf, json_buffer_size);
|
||||
if (ret == 0)
|
||||
{
|
||||
json_buffer->json_string_start = 0ul;
|
||||
json_buffer->json_string_length = 0ull;
|
||||
json_buffer->json_string = NULL;
|
||||
nDPIsrvd_json_buffer_reset(json_buffer);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -417,9 +421,7 @@ static inline int nDPIsrvd_json_buffer_init(struct nDPIsrvd_json_buffer * const
|
||||
static inline void nDPIsrvd_json_buffer_free(struct nDPIsrvd_json_buffer * const json_buffer)
|
||||
{
|
||||
nDPIsrvd_buffer_free(&json_buffer->buf);
|
||||
json_buffer->json_string_start = 0ul;
|
||||
json_buffer->json_string_length = 0ull;
|
||||
json_buffer->json_string = NULL;
|
||||
nDPIsrvd_json_buffer_reset(json_buffer);
|
||||
}
|
||||
|
||||
static inline struct nDPIsrvd_socket * nDPIsrvd_socket_init(size_t global_user_data_size,
|
||||
@@ -470,7 +472,6 @@ static inline struct nDPIsrvd_socket * nDPIsrvd_socket_init(size_t global_user_d
|
||||
|
||||
return sock;
|
||||
error:
|
||||
nDPIsrvd_json_buffer_free(&sock->buffer);
|
||||
nDPIsrvd_socket_free(&sock);
|
||||
return NULL;
|
||||
}
|
||||
@@ -481,7 +482,7 @@ static inline int nDPIsrvd_set_read_timeout(struct nDPIsrvd_socket * const sock,
|
||||
{
|
||||
struct timeval tv = {.tv_sec = seconds, .tv_usec = micro_seconds};
|
||||
|
||||
if (sock->fd < 0)
|
||||
if (sock == NULL || sock->fd < 0)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
@@ -496,7 +497,7 @@ static inline int nDPIsrvd_set_read_timeout(struct nDPIsrvd_socket * const sock,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int nDPIsrvd_set_nonblock(struct nDPIsrvd_socket * const sock)
|
||||
static inline int nDPIsrvd_set_nonblock(struct nDPIsrvd_socket const * const sock)
|
||||
{
|
||||
int flags;
|
||||
|
||||
@@ -561,7 +562,7 @@ static inline void nDPIsrvd_cleanup_instance(struct nDPIsrvd_socket * const sock
|
||||
struct nDPIsrvd_thread_data * current_thread_data;
|
||||
struct nDPIsrvd_thread_data * ttmp;
|
||||
|
||||
if (instance != NULL)
|
||||
if (sock != NULL && instance != NULL)
|
||||
{
|
||||
#ifdef ENABLE_MEMORY_PROFILING
|
||||
nDPIsrvd_memprof_log("Cleaning up instance 0x%x.", instance->alias_source_key);
|
||||
@@ -587,25 +588,56 @@ static inline void nDPIsrvd_cleanup_instance(struct nDPIsrvd_socket * const sock
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nDPIsrvd_socket_free(struct nDPIsrvd_socket ** const sock)
|
||||
static inline void nDPIsrvd_socket_close(struct nDPIsrvd_socket * const sock)
|
||||
{
|
||||
struct nDPIsrvd_instance * current_instance;
|
||||
struct nDPIsrvd_instance * itmp;
|
||||
struct nDPIsrvd_json_token * current_json_token;
|
||||
struct nDPIsrvd_json_token * jtmp;
|
||||
|
||||
if (sock == NULL || *sock == NULL)
|
||||
if (sock == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if ((*sock)->json.token_table != NULL)
|
||||
if (sock->json.token_table != NULL)
|
||||
{
|
||||
HASH_ITER(hh, (*sock)->json.token_table, current_json_token, jtmp)
|
||||
HASH_ITER(hh, sock->json.token_table, current_json_token, jtmp)
|
||||
{
|
||||
HASH_DEL((*sock)->json.token_table, current_json_token);
|
||||
HASH_DEL(sock->json.token_table, current_json_token);
|
||||
}
|
||||
(*sock)->json.token_table = NULL;
|
||||
}
|
||||
|
||||
if (sock->json.tokens != NULL)
|
||||
{
|
||||
utarray_clear(sock->json.tokens);
|
||||
}
|
||||
|
||||
if (sock->instance_table != NULL)
|
||||
{
|
||||
HASH_ITER(hh, sock->instance_table, current_instance, itmp)
|
||||
{
|
||||
nDPIsrvd_cleanup_instance(sock, current_instance, CLEANUP_REASON_APP_SHUTDOWN);
|
||||
}
|
||||
}
|
||||
|
||||
nDPIsrvd_json_buffer_reset(&sock->buffer);
|
||||
close(sock->fd);
|
||||
sock->fd = -1;
|
||||
}
|
||||
|
||||
static inline void nDPIsrvd_socket_free(struct nDPIsrvd_socket ** const sock)
|
||||
{
|
||||
if (sock == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nDPIsrvd_socket_close(*sock);
|
||||
|
||||
if (*sock == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if ((*sock)->json.tokens != NULL)
|
||||
@@ -613,20 +645,21 @@ static inline void nDPIsrvd_socket_free(struct nDPIsrvd_socket ** const sock)
|
||||
utarray_free((*sock)->json.tokens);
|
||||
}
|
||||
|
||||
HASH_ITER(hh, (*sock)->instance_table, current_instance, itmp)
|
||||
{
|
||||
nDPIsrvd_cleanup_instance(*sock, current_instance, CLEANUP_REASON_APP_SHUTDOWN);
|
||||
}
|
||||
(*sock)->json.tokens = NULL;
|
||||
(*sock)->json.token_table = NULL;
|
||||
(*sock)->instance_table = NULL;
|
||||
|
||||
nDPIsrvd_json_buffer_free(&(*sock)->buffer);
|
||||
nDPIsrvd_free(*sock);
|
||||
|
||||
*sock = NULL;
|
||||
}
|
||||
|
||||
static inline int nDPIsrvd_setup_address(struct nDPIsrvd_address * const address, char const * const destination)
|
||||
{
|
||||
if (address == NULL || destination == NULL)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
size_t len = strlen(destination);
|
||||
char const * first_colon = strchr(destination, ':');
|
||||
char const * last_colon = strrchr(destination, ':');
|
||||
@@ -653,7 +686,7 @@ static inline int nDPIsrvd_setup_address(struct nDPIsrvd_address * const address
|
||||
{
|
||||
address->raw.sa_family = AF_INET;
|
||||
address->size = sizeof(address->in);
|
||||
address->in.sin_port = htons(atoi(last_colon + 1));
|
||||
address->in.sin_port = htons((uint16_t)atoi(last_colon + 1));
|
||||
sock_addr = &address->in.sin_addr;
|
||||
|
||||
if (len < 7)
|
||||
@@ -665,7 +698,7 @@ static inline int nDPIsrvd_setup_address(struct nDPIsrvd_address * const address
|
||||
{
|
||||
address->raw.sa_family = AF_INET6;
|
||||
address->size = sizeof(address->in6);
|
||||
address->in6.sin6_port = htons(atoi(last_colon + 1));
|
||||
address->in6.sin6_port = htons((uint16_t)atoi(last_colon + 1));
|
||||
sock_addr = &address->in6.sin6_addr;
|
||||
|
||||
if (len < 2)
|
||||
@@ -674,7 +707,7 @@ static inline int nDPIsrvd_setup_address(struct nDPIsrvd_address * const address
|
||||
}
|
||||
if (destination[0] == '[')
|
||||
{
|
||||
if (*(last_colon - 1) != ']')
|
||||
if (last_colon - destination > 1 && *(last_colon - 1) != ']')
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
@@ -698,6 +731,11 @@ static inline int nDPIsrvd_setup_address(struct nDPIsrvd_address * const address
|
||||
|
||||
static inline enum nDPIsrvd_connect_return nDPIsrvd_connect(struct nDPIsrvd_socket * const sock)
|
||||
{
|
||||
if (sock == NULL)
|
||||
{
|
||||
return CONNECT_ERROR_SOCKET;
|
||||
}
|
||||
|
||||
sock->fd = socket(sock->address.raw.sa_family, SOCK_STREAM, 0);
|
||||
|
||||
if (sock->fd < 0)
|
||||
@@ -765,7 +803,7 @@ static inline enum nDPIsrvd_conversion_return str_value_to_ull(char const * cons
|
||||
return CONVERSION_OK;
|
||||
}
|
||||
|
||||
static inline nDPIsrvd_hashkey nDPIsrvd_build_key(char const * str, int len)
|
||||
static inline nDPIsrvd_hashkey nDPIsrvd_build_key(char const * str, size_t len)
|
||||
{
|
||||
uint32_t hash = nDPIsrvd_HASHKEY_SEED;
|
||||
uint32_t c;
|
||||
@@ -781,11 +819,11 @@ static inline nDPIsrvd_hashkey nDPIsrvd_build_key(char const * str, int len)
|
||||
static inline void nDPIsrvd_drain_buffer(struct nDPIsrvd_json_buffer * const json_buffer)
|
||||
{
|
||||
memmove(json_buffer->buf.ptr.raw,
|
||||
json_buffer->buf.ptr.raw + json_buffer->json_string_length,
|
||||
json_buffer->buf.used - json_buffer->json_string_length);
|
||||
json_buffer->buf.used -= json_buffer->json_string_length;
|
||||
json_buffer->json_string_length = 0;
|
||||
json_buffer->json_string_start = 0;
|
||||
json_buffer->buf.ptr.raw + json_buffer->json_message_length,
|
||||
json_buffer->buf.used - json_buffer->json_message_length);
|
||||
json_buffer->buf.used -= json_buffer->json_message_length;
|
||||
json_buffer->json_message_length = 0;
|
||||
json_buffer->json_message_start = 0;
|
||||
}
|
||||
|
||||
static inline nDPIsrvd_hashkey nDPIsrvd_vbuild_jsmn_key(char const * const json_key, va_list ap)
|
||||
@@ -850,7 +888,7 @@ static inline char const * nDPIsrvd_get_jsmn_token_value(struct nDPIsrvd_socket
|
||||
*value_length = jt->end - jt->start;
|
||||
}
|
||||
|
||||
return sock->buffer.json_string + jt->start;
|
||||
return sock->buffer.json_message + jt->start;
|
||||
}
|
||||
|
||||
static inline char const * nDPIsrvd_jsmn_token_to_string(struct nDPIsrvd_socket const * const sock,
|
||||
@@ -872,7 +910,7 @@ static inline char const * nDPIsrvd_jsmn_token_to_string(struct nDPIsrvd_socket
|
||||
*string_length = jt->end - jt->start;
|
||||
}
|
||||
|
||||
return sock->buffer.json_string + jt->start;
|
||||
return sock->buffer.json_message + jt->start;
|
||||
}
|
||||
|
||||
static inline int nDPIsrvd_get_token_size(struct nDPIsrvd_socket const * const sock,
|
||||
@@ -898,7 +936,7 @@ static inline char const * nDPIsrvd_get_token_value(struct nDPIsrvd_socket const
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return sock->buffer.json_string + t->start;
|
||||
return sock->buffer.json_message + t->start;
|
||||
}
|
||||
|
||||
static inline struct nDPIsrvd_json_token const * nDPIsrvd_get_next_token(struct nDPIsrvd_socket const * const sock,
|
||||
@@ -1061,19 +1099,24 @@ static inline struct nDPIsrvd_json_token * nDPIsrvd_find_token(struct nDPIsrvd_s
|
||||
|
||||
static inline struct nDPIsrvd_json_token * nDPIsrvd_add_token(struct nDPIsrvd_socket * const sock,
|
||||
nDPIsrvd_hashkey hash_value,
|
||||
int value_token_index)
|
||||
size_t value_token_index)
|
||||
{
|
||||
struct nDPIsrvd_json_token * token = nDPIsrvd_find_token(sock, hash_value);
|
||||
|
||||
if (value_token_index >= nDPIsrvd_MAX_JSON_TOKENS)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (token != NULL)
|
||||
{
|
||||
token->token_index = value_token_index;
|
||||
token->token_index = (int)value_token_index;
|
||||
|
||||
return token;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct nDPIsrvd_json_token jt = {.token_keys_hash = hash_value, .token_index = value_token_index, .hh = {}};
|
||||
struct nDPIsrvd_json_token jt = {.token_keys_hash = hash_value, .token_index = (int)value_token_index, .hh = {}};
|
||||
|
||||
utarray_push_back(sock->json.tokens, &jt);
|
||||
HASH_ADD_INT(sock->json.token_table,
|
||||
@@ -1087,10 +1130,11 @@ static inline struct nDPIsrvd_json_token * nDPIsrvd_add_token(struct nDPIsrvd_so
|
||||
static inline int nDPIsrvd_walk_tokens(
|
||||
struct nDPIsrvd_socket * const sock, nDPIsrvd_hashkey h, size_t b, int count, uint8_t is_value, uint8_t depth)
|
||||
{
|
||||
int i, j;
|
||||
int i;
|
||||
int j;
|
||||
jsmntok_t const * key;
|
||||
jsmntok_t const * const t = &sock->jsmn.tokens[b];
|
||||
char const * const js = sock->buffer.json_string;
|
||||
char const * const js = sock->buffer.json_message;
|
||||
|
||||
if (depth >= 16)
|
||||
{
|
||||
@@ -1187,7 +1231,7 @@ static inline struct nDPIsrvd_instance * nDPIsrvd_get_instance(struct nDPIsrvd_s
|
||||
}
|
||||
|
||||
static inline struct nDPIsrvd_thread_data * nDPIsrvd_get_thread_data(
|
||||
struct nDPIsrvd_socket * const sock,
|
||||
struct nDPIsrvd_socket const * const sock,
|
||||
struct nDPIsrvd_instance * const instance,
|
||||
struct nDPIsrvd_json_token const * const thread_id_token,
|
||||
struct nDPIsrvd_json_token const * const ts_usec_token)
|
||||
@@ -1203,7 +1247,7 @@ static inline struct nDPIsrvd_thread_data * nDPIsrvd_get_thread_data(
|
||||
{
|
||||
nDPIsrvd_ull thread_key;
|
||||
TOKEN_VALUE_TO_ULL(sock, thread_id_token, &thread_key);
|
||||
thread_id = thread_key;
|
||||
thread_id = (nDPIsrvd_hashkey)thread_key;
|
||||
}
|
||||
|
||||
HASH_FIND_INT(instance->thread_data_table, &thread_id, thread_data);
|
||||
@@ -1411,36 +1455,36 @@ static inline enum nDPIsrvd_parse_return nDPIsrvd_parse_line(struct nDPIsrvd_jso
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
json_buffer->json_string_length = strtoull((const char *)json_buffer->buf.ptr.text, &json_buffer->json_string, 10);
|
||||
json_buffer->json_string_length += json_buffer->json_string - json_buffer->buf.ptr.text;
|
||||
json_buffer->json_string_start = json_buffer->json_string - json_buffer->buf.ptr.text;
|
||||
json_buffer->json_message_length = strtoull((const char *)json_buffer->buf.ptr.text, &json_buffer->json_message, 10);
|
||||
json_buffer->json_message_length += json_buffer->json_message - json_buffer->buf.ptr.text;
|
||||
json_buffer->json_message_start = json_buffer->json_message - json_buffer->buf.ptr.text;
|
||||
|
||||
if (errno == ERANGE)
|
||||
{
|
||||
return PARSE_SIZE_EXCEEDS_CONVERSION_LIMIT;
|
||||
}
|
||||
if (json_buffer->json_string == json_buffer->buf.ptr.text)
|
||||
if (json_buffer->json_message == json_buffer->buf.ptr.text)
|
||||
{
|
||||
return PARSE_SIZE_MISSING;
|
||||
}
|
||||
if (json_buffer->json_string_length > json_buffer->buf.max)
|
||||
if (json_buffer->json_message_length > json_buffer->buf.max)
|
||||
{
|
||||
return PARSE_STRING_TOO_BIG;
|
||||
}
|
||||
if (json_buffer->json_string_length > json_buffer->buf.used)
|
||||
if (json_buffer->json_message_length > json_buffer->buf.used)
|
||||
{
|
||||
return PARSE_NEED_MORE_DATA;
|
||||
}
|
||||
if (json_buffer->buf.ptr.text[json_buffer->json_string_length - 2] != '}' ||
|
||||
json_buffer->buf.ptr.text[json_buffer->json_string_length - 1] != '\n')
|
||||
if (json_buffer->buf.ptr.text[json_buffer->json_message_length - 2] != '}' ||
|
||||
json_buffer->buf.ptr.text[json_buffer->json_message_length - 1] != '\n')
|
||||
{
|
||||
return PARSE_INVALID_CLOSING_CHAR;
|
||||
}
|
||||
|
||||
jsmn_init(&jsmn->parser);
|
||||
jsmn->tokens_found = jsmn_parse(&jsmn->parser,
|
||||
json_buffer->buf.ptr.text + json_buffer->json_string_start,
|
||||
json_buffer->json_string_length - json_buffer->json_string_start,
|
||||
json_buffer->buf.ptr.text + json_buffer->json_message_start,
|
||||
json_buffer->json_message_length - json_buffer->json_message_start,
|
||||
jsmn->tokens,
|
||||
nDPIsrvd_MAX_JSON_TOKENS);
|
||||
if (jsmn->tokens_found < 0 || jsmn->tokens[0].type != JSMN_OBJECT)
|
||||
@@ -1647,12 +1691,22 @@ static inline void nDPIsrvd_flow_info(struct nDPIsrvd_socket const * const sock,
|
||||
|
||||
static inline int nDPIsrvd_json_buffer_length(struct nDPIsrvd_socket const * const sock)
|
||||
{
|
||||
return (int)sock->buffer.json_string_length - NETWORK_BUFFER_LENGTH_DIGITS;
|
||||
if (sock == NULL)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (int)sock->buffer.json_message_length - NETWORK_BUFFER_LENGTH_DIGITS;
|
||||
}
|
||||
|
||||
static inline char const *nDPIsrvd_json_buffer_string(struct nDPIsrvd_socket const * const sock)
|
||||
static inline char const * nDPIsrvd_json_buffer_string(struct nDPIsrvd_socket const * const sock)
|
||||
{
|
||||
return sock->buffer.json_string;
|
||||
if (sock == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return sock->buffer.json_message;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
96
dependencies/nDPIsrvd.py
vendored
96
dependencies/nDPIsrvd.py
vendored
@@ -63,6 +63,10 @@ class TermColor:
|
||||
global USE_COLORAMA
|
||||
USE_COLORAMA = False
|
||||
|
||||
@staticmethod
|
||||
def disableBlink():
|
||||
TermColor.BLINK = ''
|
||||
|
||||
@staticmethod
|
||||
def calcColorHash(string):
|
||||
h = 0
|
||||
@@ -83,7 +87,6 @@ class TermColor:
|
||||
global USE_COLORAMA
|
||||
if USE_COLORAMA is True:
|
||||
fg_color, bg_color = TermColor.getColorsByHash(string)
|
||||
color_hash = TermColor.calcColorHash(string)
|
||||
return '{}{}{}{}{}'.format(Style.BRIGHT, fg_color, bg_color, string, Style.RESET_ALL)
|
||||
else:
|
||||
return '{}{}{}'.format(TermColor.BOLD, string, TermColor.END)
|
||||
@@ -182,7 +185,6 @@ class FlowManager:
|
||||
if alias not in self.instances:
|
||||
self.instances[alias] = dict()
|
||||
if source not in self.instances[alias]:
|
||||
self.instances[alias][source] = dict()
|
||||
self.instances[alias][source] = Instance(alias, source)
|
||||
|
||||
self.instances[alias][source].setMostRecentFlowTimeFromJSON(json_dict)
|
||||
@@ -296,6 +298,7 @@ class nDPIsrvdException(Exception):
|
||||
INVALID_LINE_RECEIVED = 4
|
||||
CALLBACK_RETURNED_FALSE = 5
|
||||
SOCKET_TIMEOUT = 6
|
||||
JSON_DECODE_ERROR = 7
|
||||
|
||||
def __init__(self, etype):
|
||||
self.etype = etype
|
||||
@@ -342,11 +345,51 @@ class SocketTimeout(nDPIsrvdException):
|
||||
def __str__(self):
|
||||
return 'Socket timeout.'
|
||||
|
||||
class JsonDecodeError(nDPIsrvdException):
|
||||
def __init__(self, json_exception, failed_line):
|
||||
super().__init__(nDPIsrvdException.JSON_DECODE_ERROR)
|
||||
self.json_exception = json_exception
|
||||
self.failed_line = failed_line
|
||||
def __str__(self):
|
||||
return '{}: {}'.format(self.json_exception, self.failed_line)
|
||||
|
||||
class JsonFilter():
|
||||
def __init__(self, filter_string):
|
||||
self.filter_string = filter_string
|
||||
self.filter = compile(filter_string, '<string>', 'eval')
|
||||
def evaluate(self, json_dict):
|
||||
if type(json_dict) is not dict:
|
||||
raise nDPIsrvdException('Could not evaluate JSON Filter: expected dictionary, got {}'.format(type(json_dict)))
|
||||
return eval(self.filter, {'json_dict': json_dict})
|
||||
|
||||
class nDPIsrvdSocket:
|
||||
def __init__(self):
|
||||
self.sock_family = None
|
||||
self.flow_mgr = FlowManager()
|
||||
self.received_bytes = 0
|
||||
self.json_filter = list()
|
||||
|
||||
def addFilter(self, filter_str):
|
||||
self.json_filter.append(JsonFilter(filter_str))
|
||||
|
||||
def evalFilters(self, json_dict):
|
||||
for jf in self.json_filter:
|
||||
try:
|
||||
json_filter_retval = jf.evaluate(json_dict)
|
||||
except Exception as err:
|
||||
print()
|
||||
sys.stderr.write('Error while evaluating expression "{}"\n'.format(jf.filter_string))
|
||||
raise err
|
||||
|
||||
if not isinstance(json_filter_retval, bool):
|
||||
print()
|
||||
sys.stderr.write('Error while evaluating expression "{}"\n'.format(jf.filter_string))
|
||||
raise nDPIsrvdException('JSON Filter returned an invalid type: expected bool, got {}'.format(type(json_filter_retval)))
|
||||
|
||||
if json_filter_retval is False:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def connect(self, addr):
|
||||
if type(addr) is tuple:
|
||||
@@ -363,6 +406,7 @@ class nDPIsrvdSocket:
|
||||
self.digitlen = 0
|
||||
self.lines = []
|
||||
self.failed_lines = []
|
||||
self.filtered_lines = 0
|
||||
|
||||
def timeout(self, timeout):
|
||||
self.sock.settimeout(timeout)
|
||||
@@ -424,7 +468,7 @@ class nDPIsrvdSocket:
|
||||
json_dict = dict()
|
||||
self.failed_lines += [received_line]
|
||||
self.lines = self.lines[1:]
|
||||
raise(e)
|
||||
raise JsonDecodeError(e, received_line)
|
||||
|
||||
instance = self.flow_mgr.getInstance(json_dict)
|
||||
if instance is None:
|
||||
@@ -432,19 +476,24 @@ class nDPIsrvdSocket:
|
||||
retval = False
|
||||
continue
|
||||
|
||||
try:
|
||||
if callback_json(json_dict, instance, self.flow_mgr.getFlow(instance, json_dict), global_user_data) is not True:
|
||||
self.failed_lines += [received_line]
|
||||
retval = False
|
||||
except Exception as e:
|
||||
self.failed_lines += [received_line]
|
||||
self.lines = self.lines[1:]
|
||||
raise(e)
|
||||
current_flow = self.flow_mgr.getFlow(instance, json_dict)
|
||||
filter_eval = self.evalFilters(json_dict)
|
||||
if filter_eval is True:
|
||||
try:
|
||||
if callback_json(json_dict, instance, current_flow, global_user_data) is not True:
|
||||
self.failed_lines += [received_line]
|
||||
retval = False
|
||||
except Exception as e:
|
||||
self.failed_lines += [received_line]
|
||||
self.lines = self.lines[1:]
|
||||
raise(e)
|
||||
else:
|
||||
self.filtered_lines += 1
|
||||
|
||||
for _, flow in self.flow_mgr.getFlowsToCleanup(instance, json_dict).items():
|
||||
if callback_flow_cleanup is None:
|
||||
pass
|
||||
elif callback_flow_cleanup(instance, flow, global_user_data) is not True:
|
||||
elif filter_eval is True and callback_flow_cleanup(instance, flow, global_user_data) is not True:
|
||||
self.failed_lines += [received_line]
|
||||
self.lines = self.lines[1:]
|
||||
retval = False
|
||||
@@ -477,12 +526,16 @@ class nDPIsrvdSocket:
|
||||
raise nDPIsrvdException('Failed lines > 0: {}'.format(len(self.failed_lines)))
|
||||
return self.flow_mgr.verifyFlows()
|
||||
|
||||
def defaultArgumentParser(desc='nDPIsrvd Python Interface',
|
||||
def defaultArgumentParser(desc='nDPIsrvd Python Interface', enable_json_filter=False,
|
||||
help_formatter=argparse.ArgumentDefaultsHelpFormatter):
|
||||
parser = argparse.ArgumentParser(description=desc, formatter_class=help_formatter)
|
||||
parser.add_argument('--host', type=str, help='nDPIsrvd host IP')
|
||||
parser.add_argument('--port', type=int, default=DEFAULT_PORT, help='nDPIsrvd TCP port')
|
||||
parser.add_argument('--unix', type=str, help='nDPIsrvd unix socket path')
|
||||
if enable_json_filter is True:
|
||||
parser.add_argument('--filter', type=str, action='append',
|
||||
help='Set a filter string which if evaluates to True will invoke the JSON callback.\n'
|
||||
'Example: json_dict[\'flow_event_name\'] == \'detected\' will only process \'detected\' events.')
|
||||
return parser
|
||||
|
||||
def toSeconds(usec):
|
||||
@@ -515,6 +568,23 @@ def validateAddress(args):
|
||||
|
||||
return address
|
||||
|
||||
def prepareJsonFilter(args, nsock):
|
||||
# HowTo use JSON Filters:
|
||||
# Add `--filter [FILTER_STRING]` to the Python scripts that support JSON filtering.
|
||||
# Examples:
|
||||
# ./examples/py-json-stdout/json-stdout.py --filter '"ndpi" in json_dict and "proto" in json_dict["ndpi"]'
|
||||
# The command above will print only JSONs that have the subobjects json_dict["ndpi"] and json_dict["ndpi"]["proto"] available.
|
||||
# ./examples/py-flow-info/flow-info.py --filter 'json_dict["source"] == "eth0"' --filter '"flow_event_name" in json_dict and json_dict["flow_event_name"] == "analyse"'
|
||||
# Multiple JSON filter will be ANDed together.
|
||||
# Note: You may *only* use the global "json_dict" in your expressions.
|
||||
try:
|
||||
json_filter = args.filter
|
||||
if json_filter is not None:
|
||||
for jf in json_filter:
|
||||
nsock.addFilter(jf)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
global schema
|
||||
schema = {'packet_event_schema' : None, 'error_event_schema' : None, 'daemon_event_schema' : None, 'flow_event_schema' : None}
|
||||
|
||||
|
||||
41
dependencies/uthash/.github/workflows/build.yml
vendored
Normal file
41
dependencies/uthash/.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: build # This name shows up in badge.svg
|
||||
|
||||
on:
|
||||
push: # any branch
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
build-gcc:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: make -C tests EXTRA_CFLAGS="-W -Wall -Wextra -Wswitch-default"
|
||||
- run: make -C tests clean ; make -C tests pedantic
|
||||
- run: make -C tests clean ; make -C tests pedantic EXTRA_CFLAGS=-DNO_DECLTYPE
|
||||
- run: make -C tests clean ; make -C tests cplusplus
|
||||
- run: make -C tests clean ; make -C tests cplusplus EXTRA_CFLAGS=-DNO_DECLTYPE
|
||||
build-clang:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
CC: clang
|
||||
CXX: clang++
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: make -C tests EXTRA_CFLAGS="-W -Wall -Wextra -Wswitch-default"
|
||||
- run: make -C tests clean ; make -C tests pedantic
|
||||
- run: make -C tests clean ; make -C tests pedantic EXTRA_CFLAGS=-DNO_DECLTYPE
|
||||
- run: make -C tests clean ; make -C tests cplusplus
|
||||
- run: make -C tests clean ; make -C tests cplusplus EXTRA_CFLAGS=-DNO_DECLTYPE
|
||||
build-asciidoc:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: sudo apt-get update && sudo apt-get install asciidoc -y
|
||||
- run: make -C doc
|
||||
2
dependencies/uthash/LICENSE
vendored
2
dependencies/uthash/LICENSE
vendored
@@ -1,4 +1,4 @@
|
||||
Copyright (c) 2005-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2005-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
||||
1
dependencies/uthash/README.md
vendored
1
dependencies/uthash/README.md
vendored
@@ -1,5 +1,6 @@
|
||||
|
||||
[](https://travis-ci.org/troydhanson/uthash)
|
||||
[](https://github.com/troydhanson/uthash/actions/workflows/build.yml)
|
||||
|
||||
Documentation for uthash is available at:
|
||||
|
||||
|
||||
6
dependencies/uthash/doc/index.html
vendored
6
dependencies/uthash/doc/index.html
vendored
@@ -13,8 +13,8 @@
|
||||
</div> <!-- banner -->
|
||||
|
||||
<div id="topnav">
|
||||
<a href="http://github.com/troydhanson/uthash">GitHub page</a> >
|
||||
uthash home <!-- http://troydhanson.github.io/uthash/ -->
|
||||
<a href="https://github.com/troydhanson/uthash">GitHub page</a> >
|
||||
uthash home <!-- https://troydhanson.github.io/uthash/ -->
|
||||
|
||||
<a href="https://twitter.com/share" class="twitter-share-button" data-via="troydhanson">Tweet</a>
|
||||
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0];if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src="//platform.twitter.com/widgets.js";fjs.parentNode.insertBefore(js,fjs);}}(document,"script","twitter-wjs");</script>
|
||||
@@ -43,7 +43,7 @@
|
||||
|
||||
|
||||
<h2>developer</h2>
|
||||
<div><a href="http://troydhanson.github.io/">Troy D. Hanson</a></div>
|
||||
<div><a href="https://troydhanson.github.io/">Troy D. Hanson</a></div>
|
||||
|
||||
<h2>maintainer</h2>
|
||||
<div><a href="https://github.com/Quuxplusone">Arthur O'Dwyer</a></div>
|
||||
|
||||
4
dependencies/uthash/doc/license.html
vendored
4
dependencies/uthash/doc/license.html
vendored
@@ -13,7 +13,7 @@
|
||||
</div> <!-- banner -->
|
||||
|
||||
<div id="topnav">
|
||||
<a href="http://troydhanson.github.io/uthash/">uthash home</a> >
|
||||
<a href="https://troydhanson.github.io/uthash/">uthash home</a> >
|
||||
BSD license
|
||||
</div>
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
<div id="mid">
|
||||
<div id="main">
|
||||
<pre>
|
||||
Copyright (c) 2005-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2005-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
||||
1
dependencies/uthash/doc/styles.css
vendored
1
dependencies/uthash/doc/styles.css
vendored
@@ -29,7 +29,6 @@ h1,p { margin: 0; } /* non-0 margin on firefox */
|
||||
background-repeat: repeat-y;
|
||||
/* background-color: #ffddaa; */
|
||||
padding-top: 20px;
|
||||
padding-top: 20px;
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
|
||||
|
||||
44
dependencies/uthash/doc/userguide.txt
vendored
44
dependencies/uthash/doc/userguide.txt
vendored
@@ -5,7 +5,7 @@ v2.3.0, February 2021
|
||||
|
||||
To download uthash, follow this link back to the
|
||||
https://github.com/troydhanson/uthash[GitHub project page].
|
||||
Back to my http://troydhanson.github.io/[other projects].
|
||||
Back to my https://troydhanson.github.io/[other projects].
|
||||
|
||||
A hash in C
|
||||
-----------
|
||||
@@ -805,7 +805,7 @@ Here is a simple example where a structure has a pointer member, called `key`.
|
||||
|
||||
.A pointer key
|
||||
----------------------------------------------------------------------
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include "uthash.h"
|
||||
|
||||
@@ -816,17 +816,16 @@ typedef struct {
|
||||
} el_t;
|
||||
|
||||
el_t *hash = NULL;
|
||||
char *someaddr = NULL;
|
||||
void *someaddr = &hash;
|
||||
|
||||
int main() {
|
||||
el_t *d;
|
||||
el_t *e = (el_t *)malloc(sizeof *e);
|
||||
if (!e) return -1;
|
||||
e->key = (void*)someaddr;
|
||||
e->key = someaddr;
|
||||
e->i = 1;
|
||||
HASH_ADD_PTR(hash, key, e);
|
||||
HASH_FIND_PTR(hash, &someaddr, d);
|
||||
if (d) printf("found\n");
|
||||
assert(d == e);
|
||||
|
||||
/* release memory */
|
||||
HASH_DEL(hash, e);
|
||||
@@ -835,9 +834,7 @@ int main() {
|
||||
}
|
||||
----------------------------------------------------------------------
|
||||
|
||||
This example is included in `tests/test57.c`. Note that the end of the program
|
||||
deletes the element out of the hash, (and since no more elements remain in the
|
||||
hash), uthash releases its internal memory.
|
||||
This example is included in `tests/test57.c`.
|
||||
|
||||
Structure keys
|
||||
~~~~~~~~~~~~~~
|
||||
@@ -893,7 +890,7 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
----------------------------------------------------------------------
|
||||
|
||||
This usage is nearly the same as use of a compound key explained below.
|
||||
This usage is nearly the same as the usage of a compound key explained below.
|
||||
|
||||
Note that the general macros require the name of the `UT_hash_handle` to be
|
||||
passed as the first argument (here, this is `hh`). The general macros are
|
||||
@@ -1153,17 +1150,16 @@ always used with the `users_by_name` hash table).
|
||||
|
||||
Sorted insertion of new items
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
If you would like to maintain a sorted hash you have two options. The first
|
||||
option is to use the HASH_SRT() macro, which will sort any unordered list in
|
||||
To maintain a sorted hash, you have two options. Your first
|
||||
option is to use the `HASH_SRT` macro, which will sort any unordered list in
|
||||
'O(n log(n))'. This is the best strategy if you're just filling up a hash
|
||||
table with items in random order with a single final HASH_SRT() operation
|
||||
when all is done. Obviously, this won't do what you want if you need
|
||||
the list to be in an ordered state at times between insertion of
|
||||
items. You can use HASH_SRT() after every insertion operation, but that will
|
||||
yield a computational complexity of 'O(n^2 log n)'.
|
||||
table with items in random order with a single final `HASH_SRT` operation
|
||||
when all is done. If you need the table to remain sorted as you add and remove
|
||||
items, you can use `HASH_SRT` after every insertion operation, but that gives
|
||||
a computational complexity of 'O(n^2 log n)' to insert 'n' items.
|
||||
|
||||
The second route you can take is via the in-order add and replace macros.
|
||||
The `HASH_ADD_INORDER*` macros work just like their `HASH_ADD*` counterparts, but
|
||||
Your second option is to use the in-order add and replace macros.
|
||||
The `HASH_ADD_*_INORDER` macros work just like their `HASH_ADD_*` counterparts, but
|
||||
with an additional comparison-function argument:
|
||||
|
||||
int name_sort(struct my_struct *a, struct my_struct *b) {
|
||||
@@ -1172,11 +1168,11 @@ with an additional comparison-function argument:
|
||||
|
||||
HASH_ADD_KEYPTR_INORDER(hh, items, &item->name, strlen(item->name), item, name_sort);
|
||||
|
||||
New items are sorted at insertion time in 'O(n)', thus resulting in a
|
||||
total computational complexity of 'O(n^2)' for the creation of the hash
|
||||
table with all items.
|
||||
For in-order add to work, the list must be in an ordered state before
|
||||
insertion of the new item.
|
||||
These macros assume that the hash is already sorted according to the
|
||||
comparison function, and insert the new item in its proper place.
|
||||
A single insertion takes 'O(n)', resulting in a total computational
|
||||
complexity of 'O(n^2)' to insert all 'n' items: slower than a single
|
||||
`HASH_SRT`, but faster than doing a `HASH_SRT` after every insertion.
|
||||
|
||||
Several sort orders
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
2
dependencies/uthash/doc/utarray.txt
vendored
2
dependencies/uthash/doc/utarray.txt
vendored
@@ -139,7 +139,7 @@ a copy of the source string and pushes that copy into the array.
|
||||
About UT_icd
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Arrays be made of any type of element, not just integers and strings. The
|
||||
Arrays can be made of any type of element, not just integers and strings. The
|
||||
elements can be basic types or structures. Unless you're dealing with integers
|
||||
and strings (which use pre-defined `ut_int_icd` and `ut_str_icd`), you'll need
|
||||
to define a `UT_icd` helper structure. This structure contains everything that
|
||||
|
||||
18
dependencies/uthash/src/utarray.h
vendored
18
dependencies/uthash/src/utarray.h
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2008-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2008-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -38,11 +38,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#define UTARRAY_UNUSED
|
||||
#endif
|
||||
|
||||
#ifdef oom
|
||||
#error "The name of macro 'oom' has been changed to 'utarray_oom'. Please update your code."
|
||||
#define utarray_oom() oom()
|
||||
#endif
|
||||
|
||||
#ifndef utarray_oom
|
||||
#define utarray_oom() exit(-1)
|
||||
#endif
|
||||
@@ -234,7 +229,16 @@ typedef struct {
|
||||
static void utarray_str_cpy(void *dst, const void *src) {
|
||||
char *const *srcc = (char *const *)src;
|
||||
char **dstc = (char**)dst;
|
||||
*dstc = (*srcc == NULL) ? NULL : strdup(*srcc);
|
||||
if (*srcc == NULL) {
|
||||
*dstc = NULL;
|
||||
} else {
|
||||
*dstc = (char*)malloc(strlen(*srcc) + 1);
|
||||
if (*dstc == NULL) {
|
||||
utarray_oom();
|
||||
} else {
|
||||
strcpy(*dstc, *srcc);
|
||||
}
|
||||
}
|
||||
}
|
||||
static void utarray_str_dtor(void *elt) {
|
||||
char **eltc = (char**)elt;
|
||||
|
||||
16
dependencies/uthash/src/uthash.h
vendored
16
dependencies/uthash/src/uthash.h
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2003-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2003-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -51,6 +51,8 @@ typedef unsigned char uint8_t;
|
||||
#else /* VS2008 or older (or VS2010 in C mode) */
|
||||
#define NO_DECLTYPE
|
||||
#endif
|
||||
#elif defined(__MCST__) /* Elbrus C Compiler */
|
||||
#define DECLTYPE(x) (__typeof(x))
|
||||
#elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__)
|
||||
#define NO_DECLTYPE
|
||||
#else /* GNU, Sun and other compilers */
|
||||
@@ -157,7 +159,7 @@ do {
|
||||
if (head) { \
|
||||
unsigned _hf_bkt; \
|
||||
HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \
|
||||
if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \
|
||||
if (HASH_BLOOM_TEST((head)->hh.tbl, hashval)) { \
|
||||
HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
|
||||
} \
|
||||
} \
|
||||
@@ -194,7 +196,7 @@ do {
|
||||
} while (0)
|
||||
|
||||
#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
|
||||
#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
|
||||
#define HASH_BLOOM_BITTEST(bv,idx) ((bv[(idx)/8U] & (1U << ((idx)%8U))) != 0)
|
||||
|
||||
#define HASH_BLOOM_ADD(tbl,hashv) \
|
||||
HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U)))
|
||||
@@ -206,7 +208,7 @@ do {
|
||||
#define HASH_BLOOM_MAKE(tbl,oomed)
|
||||
#define HASH_BLOOM_FREE(tbl)
|
||||
#define HASH_BLOOM_ADD(tbl,hashv)
|
||||
#define HASH_BLOOM_TEST(tbl,hashv) (1)
|
||||
#define HASH_BLOOM_TEST(tbl,hashv) 1
|
||||
#define HASH_BLOOM_BYTELEN 0U
|
||||
#endif
|
||||
|
||||
@@ -450,7 +452,7 @@ do {
|
||||
|
||||
#define HASH_DELETE_HH(hh,head,delptrhh) \
|
||||
do { \
|
||||
struct UT_hash_handle *_hd_hh_del = (delptrhh); \
|
||||
const struct UT_hash_handle *_hd_hh_del = (delptrhh); \
|
||||
if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \
|
||||
HASH_BLOOM_FREE((head)->hh.tbl); \
|
||||
uthash_free((head)->hh.tbl->buckets, \
|
||||
@@ -593,7 +595,9 @@ do {
|
||||
|
||||
|
||||
/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
|
||||
* http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
|
||||
* http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
|
||||
* (archive link: https://archive.is/Ivcan )
|
||||
*/
|
||||
#define HASH_SAX(key,keylen,hashv) \
|
||||
do { \
|
||||
unsigned _sx_i; \
|
||||
|
||||
7
dependencies/uthash/src/utlist.h
vendored
7
dependencies/uthash/src/utlist.h
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2007-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2007-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
@@ -70,6 +70,8 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#else /* VS2008 or older (or VS2010 in C mode) */
|
||||
#define NO_DECLTYPE
|
||||
#endif
|
||||
#elif defined(__MCST__) /* Elbrus C Compiler */
|
||||
#define LDECLTYPE(x) __typeof(x)
|
||||
#elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__)
|
||||
#define NO_DECLTYPE
|
||||
#else /* GNU, Sun and other compilers */
|
||||
@@ -709,7 +711,8 @@ do {
|
||||
assert((del)->prev != NULL); \
|
||||
if ((del)->prev == (del)) { \
|
||||
(head)=NULL; \
|
||||
} else if ((del)==(head)) { \
|
||||
} else if ((del) == (head)) { \
|
||||
assert((del)->next != NULL); \
|
||||
(del)->next->prev = (del)->prev; \
|
||||
(head) = (del)->next; \
|
||||
} else { \
|
||||
|
||||
2
dependencies/uthash/src/utringbuffer.h
vendored
2
dependencies/uthash/src/utringbuffer.h
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2015-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2015-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
||||
2
dependencies/uthash/src/utstack.h
vendored
2
dependencies/uthash/src/utstack.h
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2018-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2018-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
||||
2
dependencies/uthash/src/utstring.h
vendored
2
dependencies/uthash/src/utstring.h
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2008-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2008-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
||||
2
dependencies/uthash/tests/Makefile
vendored
2
dependencies/uthash/tests/Makefile
vendored
@@ -12,7 +12,7 @@ PROGS = test1 test2 test3 test4 test5 test6 test7 test8 test9 \
|
||||
test66 test67 test68 test69 test70 test71 test72 test73 \
|
||||
test74 test75 test76 test77 test78 test79 test80 test81 \
|
||||
test82 test83 test84 test85 test86 test87 test88 test89 \
|
||||
test90 test91 test92 test93 test94 test95 test96
|
||||
test90 test91 test92 test93 test94 test95 test96 test97
|
||||
CFLAGS += -I$(HASHDIR)
|
||||
#CFLAGS += -DHASH_BLOOM=16
|
||||
#CFLAGS += -O2
|
||||
|
||||
1
dependencies/uthash/tests/README
vendored
1
dependencies/uthash/tests/README
vendored
@@ -98,6 +98,7 @@ test93: alt_fatal
|
||||
test94: utlist with fields named other than 'next' and 'prev'
|
||||
test95: utstack
|
||||
test96: HASH_FUNCTION + HASH_KEYCMP
|
||||
test97: deleting a const-qualified node from a hash
|
||||
|
||||
Other Make targets
|
||||
================================================================================
|
||||
|
||||
2
dependencies/uthash/tests/hashscan.c
vendored
2
dependencies/uthash/tests/hashscan.c
vendored
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright (c) 2005-2021, Troy D. Hanson http://troydhanson.github.io/uthash/
|
||||
Copyright (c) 2005-2022, Troy D. Hanson https://troydhanson.github.io/uthash/
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
||||
1
dependencies/uthash/tests/test57.ans
vendored
1
dependencies/uthash/tests/test57.ans
vendored
@@ -1 +0,0 @@
|
||||
found
|
||||
|
||||
57
dependencies/uthash/tests/test57.c
vendored
57
dependencies/uthash/tests/test57.c
vendored
@@ -1,5 +1,5 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include <stddef.h>
|
||||
#include "uthash.h"
|
||||
|
||||
typedef struct {
|
||||
@@ -8,25 +8,46 @@ typedef struct {
|
||||
UT_hash_handle hh;
|
||||
} el_t;
|
||||
|
||||
el_t *findit(el_t *hash, void *keytofind)
|
||||
{
|
||||
el_t *found;
|
||||
HASH_FIND_PTR(hash, &keytofind, found);
|
||||
return found;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
el_t *d;
|
||||
el_t *hash = NULL;
|
||||
char *someaddr = NULL;
|
||||
el_t *e = (el_t*)malloc(sizeof(el_t));
|
||||
if (!e) {
|
||||
return -1;
|
||||
}
|
||||
e->key = (void*)someaddr;
|
||||
e->i = 1;
|
||||
HASH_ADD_PTR(hash,key,e);
|
||||
HASH_FIND_PTR(hash, &someaddr, d);
|
||||
if (d != NULL) {
|
||||
printf("found\n");
|
||||
}
|
||||
el_t e1;
|
||||
el_t e2;
|
||||
|
||||
e1.key = NULL;
|
||||
e1.i = 1;
|
||||
|
||||
e2.key = &e2;
|
||||
e2.i = 2;
|
||||
|
||||
assert(findit(hash, NULL) == NULL);
|
||||
assert(findit(hash, &e1) == NULL);
|
||||
assert(findit(hash, &e2) == NULL);
|
||||
|
||||
HASH_ADD_PTR(hash, key, &e1);
|
||||
assert(findit(hash, NULL) == &e1);
|
||||
assert(findit(hash, &e1) == NULL);
|
||||
assert(findit(hash, &e2) == NULL);
|
||||
|
||||
HASH_ADD_PTR(hash, key, &e2);
|
||||
assert(findit(hash, NULL) == &e1);
|
||||
assert(findit(hash, &e1) == NULL);
|
||||
assert(findit(hash, &e2) == &e2);
|
||||
|
||||
HASH_DEL(hash, &e1);
|
||||
assert(findit(hash, NULL) == NULL);
|
||||
assert(findit(hash, &e1) == NULL);
|
||||
assert(findit(hash, &e2) == &e2);
|
||||
|
||||
HASH_CLEAR(hh, hash);
|
||||
assert(hash == NULL);
|
||||
|
||||
/* release memory */
|
||||
HASH_DEL(hash,e);
|
||||
free(e);
|
||||
return 0;
|
||||
}
|
||||
|
||||
2
dependencies/uthash/tests/test65.c
vendored
2
dependencies/uthash/tests/test65.c
vendored
@@ -3,7 +3,7 @@
|
||||
#include "uthash.h"
|
||||
|
||||
// this is an example of how to do a LRU cache in C using uthash
|
||||
// http://troydhanson.github.io/uthash/
|
||||
// https://troydhanson.github.io/uthash/
|
||||
// by Jehiah Czebotar 2011 - jehiah@gmail.com
|
||||
// this code is in the public domain http://unlicense.org/
|
||||
|
||||
|
||||
0
dependencies/uthash/tests/test97.ans
vendored
Normal file
0
dependencies/uthash/tests/test97.ans
vendored
Normal file
57
dependencies/uthash/tests/test97.c
vendored
Normal file
57
dependencies/uthash/tests/test97.c
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "uthash.h"
|
||||
|
||||
struct item {
|
||||
int payload;
|
||||
UT_hash_handle hh;
|
||||
};
|
||||
|
||||
void delete_without_modifying(struct item *head, const struct item *p)
|
||||
{
|
||||
struct item old;
|
||||
memcpy(&old, p, sizeof(struct item)); // also copy the padding bits
|
||||
assert(memcmp(&old, p, sizeof(struct item)) == 0);
|
||||
assert(p->hh.tbl == head->hh.tbl); // class invariant
|
||||
HASH_DEL(head, p);
|
||||
assert(memcmp(&old, p, sizeof(struct item)) == 0); // unmodified by HASH_DEL
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
struct item *items = NULL;
|
||||
struct item *found = NULL;
|
||||
int fortytwo = 42;
|
||||
int i;
|
||||
|
||||
for (i=0; i < 100; i++) {
|
||||
struct item *p = (struct item *)malloc(sizeof *p);
|
||||
p->payload = i;
|
||||
HASH_ADD_INT(items, payload, p);
|
||||
}
|
||||
assert(HASH_COUNT(items) == 100);
|
||||
|
||||
// Delete item "42" from the hash, wherever it is.
|
||||
HASH_FIND_INT(items, &fortytwo, found);
|
||||
assert(found != NULL);
|
||||
assert(found->payload == 42);
|
||||
delete_without_modifying(items, found);
|
||||
|
||||
assert(HASH_COUNT(items) == 99);
|
||||
HASH_FIND_INT(items, &fortytwo, found);
|
||||
assert(found == NULL);
|
||||
|
||||
// Delete the very first item in the hash.
|
||||
assert(items != NULL);
|
||||
i = items->payload;
|
||||
delete_without_modifying(items, items);
|
||||
|
||||
assert(HASH_COUNT(items) == 98);
|
||||
HASH_FIND_INT(items, &i, found);
|
||||
assert(found == NULL);
|
||||
|
||||
// leak the items, we don't care
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -13,19 +13,32 @@ Used also by `tests/run_tests.sh` if available.
|
||||
|
||||
A capture daemon suitable for low-resource devices.
|
||||
It saves flows that were guessed/undetected/risky/midstream to a PCAP file for manual analysis.
|
||||
Used also by `tests/run_tests.sh` if available.
|
||||
|
||||
## c-collectd
|
||||
|
||||
A collecd-exec compatible middleware that gathers statistic values from nDPId.
|
||||
Used also by `tests/run_tests.sh` if available.
|
||||
|
||||
## c-json-stdout
|
||||
## c-influxd
|
||||
|
||||
Tiny nDPId json dumper. Does not provide any useful funcationality besides dumping parsed JSON objects.
|
||||
An InfluxDB push daemon. It aggregates various statistics gathered from nDPId.
|
||||
The results are sent to a specified InfluxDB endpoint.
|
||||
|
||||

|
||||
|
||||
## c-notifyd
|
||||
|
||||
A notification daemon that sends information about suspicious flow events to DBUS.
|
||||
|
||||
## c-simple
|
||||
|
||||
Integration example that verifies flow timeouts on SIGUSR1.
|
||||
|
||||
## cxx-graph
|
||||
|
||||
A standalone GLFW/OpenGL application that draws statistical data using ImWeb/ImPlot/ImGui.
|
||||
|
||||
## js-rt-analyzer
|
||||
|
||||
[nDPId-rt-analyzer](https://gitlab.com/verzulli/ndpid-rt-analyzer.git)
|
||||
@@ -41,20 +54,20 @@ Required by `tests/run_tests.sh`
|
||||
|
||||
## py-machine-learning
|
||||
|
||||
Contains:
|
||||
|
||||
1. Classification via Random Forests and SciLearn
|
||||
2. Anomaly Detection via Autoencoder and Keras (Work-In-Progress!)
|
||||
|
||||
Use sklearn together with CSVs created with **c-analysed** to train and predict DPI detections.
|
||||
|
||||
Try it with: `./examples/py-machine-learning/sklearn_random_forest.py --csv ./ndpi-analysed.csv --proto-class tls.youtube --proto-class tls.github --proto-class tls.spotify --proto-class tls.facebook --proto-class tls.instagram --proto-class tls.doh_dot --proto-class quic --proto-class icmp`
|
||||
|
||||
This way you should get 9 different classification classes.
|
||||
You may notice that some classes e.g. TLS protocol classifications may have a higher false-negative rate.
|
||||
You may notice that some classes e.g. TLS protocol classifications have a higher false-negative/false-positive rate.
|
||||
Unfortunately, I can not provide any datasets due to some privacy concerns.
|
||||
|
||||
But you can use a [pre-trained model](https://drive.google.com/file/d/1KEwbP-Gx7KJr54wNoa63I56VI4USCAPL/view?usp=sharing) with `--load-model`.
|
||||
|
||||
## py-flow-dashboard
|
||||
|
||||
A realtime web based graph using Plotly/Dash.
|
||||
Probably the most informative example.
|
||||
But you may use a [pre-trained model](https://drive.google.com/file/d/1KEwbP-Gx7KJr54wNoa63I56VI4USCAPL/view?usp=sharing) with `--load-model`.
|
||||
|
||||
## py-flow-multiprocess
|
||||
|
||||
@@ -66,11 +79,20 @@ Dump received and parsed JSON objects.
|
||||
|
||||
## py-schema-validation
|
||||
|
||||
Validate nDPId JSON strings against pre-defined JSON schema's.
|
||||
Validate nDPId JSON messages against pre-defined JSON schema's.
|
||||
See `schema/`.
|
||||
Required by `tests/run_tests.sh`
|
||||
|
||||
## py-semantic-validation
|
||||
|
||||
Validate nDPId JSON strings against internal event semantics.
|
||||
Validate nDPId JSON messages against internal event semantics.
|
||||
Required by `tests/run_tests.sh`
|
||||
|
||||
## rs-simple
|
||||
|
||||
A straight forward Rust deserialization/parsing example.
|
||||
|
||||
## yaml-filebeat
|
||||
An example filebeat configuration to parse and send nDPId JSON
|
||||
messages to Elasticsearch. Allowing long term storage and data visualization with kibana
|
||||
and various other tools that interact with Elasticsearch (No logstash required).
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@
|
||||
RRDDIR="${1}"
|
||||
OUTDIR="${2}"
|
||||
RRDARGS="--width=800 --height=400"
|
||||
REQUIRED_RRDCNT=106
|
||||
REQUIRED_RRDCNT=130
|
||||
|
||||
if [ -z "${RRDDIR}" ]; then
|
||||
printf '%s: Missing RRD directory which contains nDPIsrvd/Collectd files.\n' "${0}"
|
||||
@@ -62,25 +62,17 @@ rrdtool_graph() {
|
||||
}
|
||||
|
||||
rrdtool_graph Flows Amount "${OUTDIR}/flows" \
|
||||
DEF:flows_new=${RRDDIR}/gauge-flow_new_count.rrd:value:AVERAGE \
|
||||
DEF:flows_end=${RRDDIR}/gauge-flow_end_count.rrd:value:AVERAGE \
|
||||
DEF:flows_idle=${RRDDIR}/gauge-flow_idle_count.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data flows_new) \
|
||||
AREA:flows_new#54EC48::STACK \
|
||||
AREA:flows_end#ECD748::STACK \
|
||||
AREA:flows_idle#EC9D48::STACK \
|
||||
LINE2:flows_new#24BC14:"New." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg flows_new) \
|
||||
LINE2:flows_end#C9B215:"End." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg flows_end) \
|
||||
LINE2:flows_idle#CC7016:"Idle" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg flows_idle)
|
||||
DEF:flows_active=${RRDDIR}/gauge-flow_active_count.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data flows_active) \
|
||||
AREA:flows_active#54EC48::STACK \
|
||||
LINE2:flows_active#24BC14:"Active" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg flows_active)
|
||||
rrdtool_graph Detections Amount "${OUTDIR}/detections" \
|
||||
DEF:flows_detected=${RRDDIR}/gauge-flow_detected_count.rrd:value:AVERAGE \
|
||||
DEF:flows_guessed=${RRDDIR}/gauge-flow_guessed_count.rrd:value:AVERAGE \
|
||||
DEF:flows_not_detected=${RRDDIR}/gauge-flow_not_detected_count.rrd:value:AVERAGE \
|
||||
DEF:flows_detection_update=${RRDDIR}/gauge-flow_detection_update_count.rrd:value:AVERAGE \
|
||||
DEF:flows_risky=${RRDDIR}/gauge-flow_risky_count.rrd:value:AVERAGE \
|
||||
DEF:flows_detection_update=${RRDDIR}/counter-flow_detection_update_count.rrd:value:AVERAGE \
|
||||
DEF:flows_risky=${RRDDIR}/counter-flow_risky_count.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data flows_detected) \
|
||||
AREA:flows_detected#00bfff::STACK \
|
||||
AREA:flows_detection_update#a1b8c4::STACK \
|
||||
@@ -98,8 +90,8 @@ rrdtool_graph Detections Amount "${OUTDIR}/detections" \
|
||||
LINE2:flows_risky#b32d00:"Risky..........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg flows_risky)
|
||||
rrdtool_graph "Traffic (IN/OUT)" Bytes "${OUTDIR}/traffic" \
|
||||
DEF:total_src_bytes=${RRDDIR}/gauge-flow_src_total_bytes.rrd:value:AVERAGE \
|
||||
DEF:total_dst_bytes=${RRDDIR}/gauge-flow_dst_total_bytes.rrd:value:AVERAGE \
|
||||
DEF:total_src_bytes=${RRDDIR}/counter-flow_src_total_bytes.rrd:value:AVERAGE \
|
||||
DEF:total_dst_bytes=${RRDDIR}/counter-flow_dst_total_bytes.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data total_src_bytes) \
|
||||
AREA:total_src_bytes#00cc99:"Total-Bytes-Source2Dest":STACK \
|
||||
$(rrdtool_graph_print_cur_min_max_avg total_src_bytes) \
|
||||
@@ -137,7 +129,45 @@ rrdtool_graph Layer4-Flows Amount "${OUTDIR}/layer4" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg layer4_icmp) \
|
||||
LINE2:layer4_other#83588d:"Other" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg layer4_other)
|
||||
rrdtool_graph Flow-Breeds Amount "${OUTDIR}/breed" \
|
||||
rrdtool_graph Confidence Amount "${OUTDIR}/confidence" \
|
||||
DEF:conf_ip=${RRDDIR}/gauge-flow_confidence_by_ip.rrd:value:AVERAGE \
|
||||
DEF:conf_port=${RRDDIR}/gauge-flow_confidence_by_port.rrd:value:AVERAGE \
|
||||
DEF:conf_aggr=${RRDDIR}/gauge-flow_confidence_dpi_aggressive.rrd:value:AVERAGE \
|
||||
DEF:conf_cache=${RRDDIR}/gauge-flow_confidence_dpi_cache.rrd:value:AVERAGE \
|
||||
DEF:conf_pcache=${RRDDIR}/gauge-flow_confidence_dpi_partial_cache.rrd:value:AVERAGE \
|
||||
DEF:conf_part=${RRDDIR}/gauge-flow_confidence_dpi_partial.rrd:value:AVERAGE \
|
||||
DEF:conf_dpi=${RRDDIR}/gauge-flow_confidence_dpi.rrd:value:AVERAGE \
|
||||
DEF:conf_nbpf=${RRDDIR}/gauge-flow_confidence_nbpf.rrd:value:AVERAGE \
|
||||
DEF:conf_ukn=${RRDDIR}/gauge-flow_confidence_unknown.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data conf_ip) \
|
||||
AREA:conf_ip#4dff4d::STACK \
|
||||
AREA:conf_port#c2ff33::STACK \
|
||||
AREA:conf_aggr#ffe433::STACK \
|
||||
AREA:conf_cache#ffb133::STACK \
|
||||
AREA:conf_pcache#ff5f33::STACK \
|
||||
AREA:conf_part#e74b5b::STACK \
|
||||
AREA:conf_dpi#a5aca0::STACK \
|
||||
AREA:conf_nbpf#d7c1cc::STACK \
|
||||
AREA:conf_ukn#ddccbb::STACK \
|
||||
LINE2:conf_ip#00e600:"By-IP................" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_ip) \
|
||||
LINE2:conf_port#8fce00:"By-Port.............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_port) \
|
||||
LINE2:conf_aggr#e6c700:"DPI-Aggressive......." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_aggr) \
|
||||
LINE2:conf_cache#e68e00:"DPI-Cache............" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_cache) \
|
||||
LINE2:conf_pcache#e63200:"DPI-Partial-Cache...." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_pcache) \
|
||||
LINE2:conf_part#c61b2b:"DPI-Partial.........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_part) \
|
||||
LINE2:conf_dpi#7e8877:"DPI.................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_dpi) \
|
||||
LINE2:conf_nbpf#ae849a:"nBPF................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_nbpf) \
|
||||
LINE2:conf_ukn#aa9988:"Unknown.............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg conf_ukn)
|
||||
rrdtool_graph Breeds Amount "${OUTDIR}/breed" \
|
||||
DEF:breed_safe=${RRDDIR}/gauge-flow_breed_safe_count.rrd:value:AVERAGE \
|
||||
DEF:breed_acceptable=${RRDDIR}/gauge-flow_breed_acceptable_count.rrd:value:AVERAGE \
|
||||
DEF:breed_fun=${RRDDIR}/gauge-flow_breed_fun_count.rrd:value:AVERAGE \
|
||||
@@ -171,17 +201,22 @@ rrdtool_graph Flow-Breeds Amount "${OUTDIR}/breed" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg breed_unrated) \
|
||||
LINE2:breed_unknown#ae849a:"Unknown.............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg breed_unknown)
|
||||
rrdtool_graph Flow-Categories 'Amount(SUM)' "${OUTDIR}/categories" \
|
||||
rrdtool_graph Categories 'Amount(SUM)' "${OUTDIR}/categories" \
|
||||
DEF:cat_adlt=${RRDDIR}/gauge-flow_category_adult_content_count.rrd:value:AVERAGE \
|
||||
DEF:cat_ads=${RRDDIR}/gauge-flow_category_advertisment_count.rrd:value:AVERAGE \
|
||||
DEF:cat_chat=${RRDDIR}/gauge-flow_category_chat_count.rrd:value:AVERAGE \
|
||||
DEF:cat_cloud=${RRDDIR}/gauge-flow_category_cloud_count.rrd:value:AVERAGE \
|
||||
DEF:cat_collab=${RRDDIR}/gauge-flow_category_collaborative_count.rrd:value:AVERAGE \
|
||||
DEF:cat_conn=${RRDDIR}/gauge-flow_category_conn_check_count.rrd:value:AVERAGE \
|
||||
DEF:cat_cybr=${RRDDIR}/gauge-flow_category_cybersecurity_count.rrd:value:AVERAGE \
|
||||
DEF:cat_xfer=${RRDDIR}/gauge-flow_category_data_transfer_count.rrd:value:AVERAGE \
|
||||
DEF:cat_db=${RRDDIR}/gauge-flow_category_database_count.rrd:value:AVERAGE \
|
||||
DEF:cat_dl=${RRDDIR}/gauge-flow_category_download_count.rrd:value:AVERAGE \
|
||||
DEF:cat_mail=${RRDDIR}/gauge-flow_category_email_count.rrd:value:AVERAGE \
|
||||
DEF:cat_fs=${RRDDIR}/gauge-flow_category_file_sharing_count.rrd:value:AVERAGE \
|
||||
DEF:cat_game=${RRDDIR}/gauge-flow_category_game_count.rrd:value:AVERAGE \
|
||||
DEF:cat_gamb=${RRDDIR}/gauge-flow_category_gambling_count.rrd:value:AVERAGE \
|
||||
DEF:cat_iot=${RRDDIR}/gauge-flow_category_iot_scada_count.rrd:value:AVERAGE \
|
||||
DEF:cat_mal=${RRDDIR}/gauge-flow_category_malware_count.rrd:value:AVERAGE \
|
||||
DEF:cat_med=${RRDDIR}/gauge-flow_category_media_count.rrd:value:AVERAGE \
|
||||
DEF:cat_min=${RRDDIR}/gauge-flow_category_mining_count.rrd:value:AVERAGE \
|
||||
@@ -196,12 +231,21 @@ rrdtool_graph Flow-Categories 'Amount(SUM)' "${OUTDIR}/categories" \
|
||||
DEF:cat_str=${RRDDIR}/gauge-flow_category_streaming_count.rrd:value:AVERAGE \
|
||||
DEF:cat_sys=${RRDDIR}/gauge-flow_category_system_count.rrd:value:AVERAGE \
|
||||
DEF:cat_ukn=${RRDDIR}/gauge-flow_category_unknown_count.rrd:value:AVERAGE \
|
||||
DEF:cat_uns=${RRDDIR}/gauge-flow_category_unspecified_count.rrd:value:AVERAGE \
|
||||
DEF:cat_vid=${RRDDIR}/gauge-flow_category_video_count.rrd:value:AVERAGE \
|
||||
DEF:cat_vrt=${RRDDIR}/gauge-flow_category_virt_assistant_count.rrd:value:AVERAGE \
|
||||
DEF:cat_voip=${RRDDIR}/gauge-flow_category_voip_count.rrd:value:AVERAGE \
|
||||
DEF:cat_vpn=${RRDDIR}/gauge-flow_category_vpn_count.rrd:value:AVERAGE \
|
||||
DEF:cat_web=${RRDDIR}/gauge-flow_category_web_count.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data cat_ads) \
|
||||
AREA:cat_ads#f1c232:"Advertisment..........." \
|
||||
DEF:cat_banned=${RRDDIR}/gauge-flow_category_banned_site_count.rrd:value:AVERAGE \
|
||||
DEF:cat_unavail=${RRDDIR}/gauge-flow_category_site_unavail_count.rrd:value:AVERAGE \
|
||||
DEF:cat_allowed=${RRDDIR}/gauge-flow_category_allowed_site_count.rrd:value:AVERAGE \
|
||||
DEF:cat_antimal=${RRDDIR}/gauge-flow_category_antimalware_count.rrd:value:AVERAGE \
|
||||
DEF:cat_crypto=${RRDDIR}/gauge-flow_category_crypto_currency_count.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data cat_adlt) \
|
||||
AREA:cat_adlt#f0c032:"Adult.................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_adlt) \
|
||||
STACK:cat_ads#f1c232:"Advertisment..........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_ads) \
|
||||
STACK:cat_chat#6fa8dc:"Chat..................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_chat) \
|
||||
@@ -209,6 +253,10 @@ rrdtool_graph Flow-Categories 'Amount(SUM)' "${OUTDIR}/categories" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_cloud) \
|
||||
STACK:cat_collab#3212aa:"Collaborative.........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_collab) \
|
||||
STACK:cat_conn#22aa11:"Connection-Check......." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_conn) \
|
||||
STACK:cat_cybr#00ff00:"Cybersecurity.........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_cybr) \
|
||||
STACK:cat_xfer#16537e:"Data-Transfer.........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_xfer) \
|
||||
STACK:cat_db#cc0000:"Database..............." \
|
||||
@@ -221,6 +269,10 @@ rrdtool_graph Flow-Categories 'Amount(SUM)' "${OUTDIR}/categories" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_fs) \
|
||||
STACK:cat_game#00ff26:"Game..................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_game) \
|
||||
STACK:cat_gamb#aa0026:"Gambling..............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_gamb) \
|
||||
STACK:cat_iot#227867:"IoT-Scada.............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_iot) \
|
||||
STACK:cat_mal#f44336:"Malware................" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_mal) \
|
||||
STACK:cat_med#ff8300:"Media.................." \
|
||||
@@ -249,43 +301,56 @@ rrdtool_graph Flow-Categories 'Amount(SUM)' "${OUTDIR}/categories" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_sys) \
|
||||
STACK:cat_ukn#999999:"Unknown................" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_ukn) \
|
||||
STACK:cat_uns#999999:"Unspecified............" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_uns) \
|
||||
STACK:cat_vid#518820:"Video.................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_vid) \
|
||||
STACK:cat_vrt#216820:"Virtual-Assistant......" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_vrt) \
|
||||
STACK:cat_voip#ffc700:"Voice-Over-IP.........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_voip) \
|
||||
STACK:cat_vpn#378035:"Virtual-Private-Network" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_vpn) \
|
||||
STACK:cat_web#00fffb:"Web...................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_web)
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_web) \
|
||||
STACK:cat_banned#ff1010:"Banned-Sites..........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_banned) \
|
||||
STACK:cat_unavail#ff1010:"Sites-Unavailable......" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_unavail) \
|
||||
STACK:cat_allowed#ff1010:"Allowed-Sites.........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_allowed) \
|
||||
STACK:cat_antimal#ff1010:"Antimalware............" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_antimal) \
|
||||
STACK:cat_crypto#afaf00:"Crypto-Currency........" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg cat_crypto)
|
||||
rrdtool_graph JSON 'Lines' "${OUTDIR}/json_lines" \
|
||||
DEF:json_lines=${RRDDIR}/gauge-json_lines.rrd:value:AVERAGE \
|
||||
DEF:json_lines=${RRDDIR}/counter-json_lines.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data json_lines) \
|
||||
AREA:json_lines#4dff4d::STACK \
|
||||
LINE2:json_lines#00e600:"JSON-lines" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg json_lines)
|
||||
rrdtool_graph JSON 'Bytes' "${OUTDIR}/json_bytes" \
|
||||
DEF:json_bytes=${RRDDIR}/gauge-json_bytes.rrd:value:AVERAGE \
|
||||
DEF:json_bytes=${RRDDIR}/counter-json_bytes.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data json_bytes) \
|
||||
AREA:json_bytes#4dff4d::STACK \
|
||||
LINE2:json_bytes#00e600:"JSON-bytes" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg json_bytes)
|
||||
rrdtool_graph Events 'Amouunt' "${OUTDIR}/events" \
|
||||
DEF:init=${RRDDIR}/gauge-init_count.rrd:value:AVERAGE \
|
||||
DEF:reconnect=${RRDDIR}/gauge-reconnect_count.rrd:value:AVERAGE \
|
||||
DEF:shutdown=${RRDDIR}/gauge-shutdown_count.rrd:value:AVERAGE \
|
||||
DEF:status=${RRDDIR}/gauge-status_count.rrd:value:AVERAGE \
|
||||
DEF:packet=${RRDDIR}/gauge-packet_count.rrd:value:AVERAGE \
|
||||
DEF:packet_flow=${RRDDIR}/gauge-packet_flow_count.rrd:value:AVERAGE \
|
||||
DEF:new=${RRDDIR}/gauge-flow_new_count.rrd:value:AVERAGE \
|
||||
DEF:end=${RRDDIR}/gauge-flow_end_count.rrd:value:AVERAGE \
|
||||
DEF:idle=${RRDDIR}/gauge-flow_idle_count.rrd:value:AVERAGE \
|
||||
DEF:update=${RRDDIR}/gauge-flow_update_count.rrd:value:AVERAGE \
|
||||
DEF:detection_update=${RRDDIR}/gauge-flow_detection_update_count.rrd:value:AVERAGE \
|
||||
DEF:guessed=${RRDDIR}/gauge-flow_guessed_count.rrd:value:AVERAGE \
|
||||
DEF:detected=${RRDDIR}/gauge-flow_detected_count.rrd:value:AVERAGE \
|
||||
DEF:not_detected=${RRDDIR}/gauge-flow_not_detected_count.rrd:value:AVERAGE \
|
||||
DEF:analyse=${RRDDIR}/gauge-flow_analyse_count.rrd:value:AVERAGE \
|
||||
DEF:error=${RRDDIR}/gauge-error_count_sum.rrd:value:AVERAGE \
|
||||
rrdtool_graph Events 'Amount' "${OUTDIR}/events" \
|
||||
DEF:init=${RRDDIR}/counter-init_count.rrd:value:AVERAGE \
|
||||
DEF:reconnect=${RRDDIR}/counter-reconnect_count.rrd:value:AVERAGE \
|
||||
DEF:shutdown=${RRDDIR}/counter-shutdown_count.rrd:value:AVERAGE \
|
||||
DEF:status=${RRDDIR}/counter-status_count.rrd:value:AVERAGE \
|
||||
DEF:packet=${RRDDIR}/counter-packet_count.rrd:value:AVERAGE \
|
||||
DEF:packet_flow=${RRDDIR}/counter-packet_flow_count.rrd:value:AVERAGE \
|
||||
DEF:new=${RRDDIR}/counter-flow_new_count.rrd:value:AVERAGE \
|
||||
DEF:ewd=${RRDDIR}/counter-flow_end_count.rrd:value:AVERAGE \
|
||||
DEF:idle=${RRDDIR}/counter-flow_idle_count.rrd:value:AVERAGE \
|
||||
DEF:update=${RRDDIR}/counter-flow_update_count.rrd:value:AVERAGE \
|
||||
DEF:detection_update=${RRDDIR}/counter-flow_detection_update_count.rrd:value:AVERAGE \
|
||||
DEF:guessed=${RRDDIR}/counter-flow_guessed_count.rrd:value:AVERAGE \
|
||||
DEF:detected=${RRDDIR}/counter-flow_detected_count.rrd:value:AVERAGE \
|
||||
DEF:not_detected=${RRDDIR}/counter-flow_not_detected_count.rrd:value:AVERAGE \
|
||||
DEF:analyse=${RRDDIR}/counter-flow_analyse_count.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data init) \
|
||||
AREA:init#f1c232:"Init..................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg init) \
|
||||
@@ -301,8 +366,8 @@ rrdtool_graph Events 'Amouunt' "${OUTDIR}/events" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg packet_flow) \
|
||||
STACK:new#c76700:"New...................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg new) \
|
||||
STACK:end#c78500:"End...................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg end) \
|
||||
STACK:ewd#c78500:"End...................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg ewd) \
|
||||
STACK:idle#c7a900:"Idle..................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg idle) \
|
||||
STACK:update#c7c400:"Updates................" \
|
||||
@@ -316,28 +381,25 @@ rrdtool_graph Events 'Amouunt' "${OUTDIR}/events" \
|
||||
STACK:not_detected#00bdc7:"Not-Detected..........." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg not_detected) \
|
||||
STACK:analyse#1400c7:"Analyse................" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg analyse) \
|
||||
STACK:error#c70000:"Error.................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg error)
|
||||
rrdtool_graph Error-Events 'Amouunt' "${OUTDIR}/error_events" \
|
||||
DEF:error_0=${RRDDIR}/gauge-error_0_count.rrd:value:AVERAGE \
|
||||
DEF:error_1=${RRDDIR}/gauge-error_1_count.rrd:value:AVERAGE \
|
||||
DEF:error_2=${RRDDIR}/gauge-error_2_count.rrd:value:AVERAGE \
|
||||
DEF:error_3=${RRDDIR}/gauge-error_3_count.rrd:value:AVERAGE \
|
||||
DEF:error_4=${RRDDIR}/gauge-error_4_count.rrd:value:AVERAGE \
|
||||
DEF:error_5=${RRDDIR}/gauge-error_5_count.rrd:value:AVERAGE \
|
||||
DEF:error_6=${RRDDIR}/gauge-error_6_count.rrd:value:AVERAGE \
|
||||
DEF:error_7=${RRDDIR}/gauge-error_7_count.rrd:value:AVERAGE \
|
||||
DEF:error_8=${RRDDIR}/gauge-error_8_count.rrd:value:AVERAGE \
|
||||
DEF:error_9=${RRDDIR}/gauge-error_9_count.rrd:value:AVERAGE \
|
||||
DEF:error_10=${RRDDIR}/gauge-error_10_count.rrd:value:AVERAGE \
|
||||
DEF:error_11=${RRDDIR}/gauge-error_11_count.rrd:value:AVERAGE \
|
||||
DEF:error_12=${RRDDIR}/gauge-error_12_count.rrd:value:AVERAGE \
|
||||
DEF:error_13=${RRDDIR}/gauge-error_13_count.rrd:value:AVERAGE \
|
||||
DEF:error_14=${RRDDIR}/gauge-error_14_count.rrd:value:AVERAGE \
|
||||
DEF:error_15=${RRDDIR}/gauge-error_15_count.rrd:value:AVERAGE \
|
||||
DEF:error_16=${RRDDIR}/gauge-error_16_count.rrd:value:AVERAGE \
|
||||
DEF:error_unknown=${RRDDIR}/gauge-error_unknown_count.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_print_cur_min_max_avg analyse)
|
||||
rrdtool_graph Error-Events 'Amount' "${OUTDIR}/error_events" \
|
||||
DEF:error_0=${RRDDIR}/counter-error_unknown_datalink.rrd:value:AVERAGE \
|
||||
DEF:error_1=${RRDDIR}/counter-error_unknown_l3_protocol.rrd:value:AVERAGE \
|
||||
DEF:error_2=${RRDDIR}/counter-error_unsupported_datalink.rrd:value:AVERAGE \
|
||||
DEF:error_3=${RRDDIR}/counter-error_packet_too_short.rrd:value:AVERAGE \
|
||||
DEF:error_4=${RRDDIR}/counter-error_packet_type_unknown.rrd:value:AVERAGE \
|
||||
DEF:error_5=${RRDDIR}/counter-error_packet_header_invalid.rrd:value:AVERAGE \
|
||||
DEF:error_6=${RRDDIR}/counter-error_ip4_packet_too_short.rrd:value:AVERAGE \
|
||||
DEF:error_7=${RRDDIR}/counter-error_ip4_size_smaller_than_header.rrd:value:AVERAGE \
|
||||
DEF:error_8=${RRDDIR}/counter-error_ip4_l4_payload_detection.rrd:value:AVERAGE \
|
||||
DEF:error_9=${RRDDIR}/counter-error_ip6_packet_too_short.rrd:value:AVERAGE \
|
||||
DEF:error_10=${RRDDIR}/counter-error_ip6_size_smaller_than_header.rrd:value:AVERAGE \
|
||||
DEF:error_11=${RRDDIR}/counter-error_ip6_l4_payload_detection.rrd:value:AVERAGE \
|
||||
DEF:error_12=${RRDDIR}/counter-error_tcp_packet_too_short.rrd:value:AVERAGE \
|
||||
DEF:error_13=${RRDDIR}/counter-error_udp_packet_too_short.rrd:value:AVERAGE \
|
||||
DEF:error_14=${RRDDIR}/counter-error_capture_size_smaller_than_packet.rrd:value:AVERAGE \
|
||||
DEF:error_15=${RRDDIR}/counter-error_max_flows_to_track.rrd:value:AVERAGE \
|
||||
DEF:error_16=${RRDDIR}/counter-error_flow_memory_alloc.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data error_0) \
|
||||
AREA:error_0#ff6a00:"Unknown-datalink-layer-packet............................" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg error_0) \
|
||||
@@ -372,10 +434,38 @@ rrdtool_graph Error-Events 'Amouunt' "${OUTDIR}/error_events" \
|
||||
STACK:error_15#4095bf:"Max-flows-to-track-reached..............................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg error_15) \
|
||||
STACK:error_16#0040ff:"Flow-memory-allocation-failed............................" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg error_16) \
|
||||
STACK:error_unknown#4060bf:"Unknown-error............................................" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg error_unknown)
|
||||
rrdtool_graph Risky-Events 'Amouunt' "${OUTDIR}/risky_events" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg error_16)
|
||||
rrdtool_graph Risk-Severites Amount "${OUTDIR}/severities" \
|
||||
DEF:sever_crit=${RRDDIR}/gauge-flow_severity_critical.rrd:value:AVERAGE \
|
||||
DEF:sever_emer=${RRDDIR}/gauge-flow_severity_emergency.rrd:value:AVERAGE \
|
||||
DEF:sever_high=${RRDDIR}/gauge-flow_severity_high.rrd:value:AVERAGE \
|
||||
DEF:sever_low=${RRDDIR}/gauge-flow_severity_low.rrd:value:AVERAGE \
|
||||
DEF:sever_med=${RRDDIR}/gauge-flow_severity_medium.rrd:value:AVERAGE \
|
||||
DEF:sever_sev=${RRDDIR}/gauge-flow_severity_severe.rrd:value:AVERAGE \
|
||||
DEF:sever_ukn=${RRDDIR}/gauge-flow_severity_unknown.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data sever_crit) \
|
||||
AREA:sever_crit#e68e00::STACK \
|
||||
AREA:sever_emer#e63200::STACK \
|
||||
AREA:sever_high#e6c700::STACK \
|
||||
AREA:sever_low#00e600::STACK \
|
||||
AREA:sever_med#8fce00::STACK \
|
||||
AREA:sever_sev#c61b2b::STACK \
|
||||
AREA:sever_ukn#7e8877::STACK \
|
||||
LINE2:sever_crit#e68e00:"Critical............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg sever_crit) \
|
||||
LINE2:sever_emer#e63200:"Emergency............" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg sever_emer) \
|
||||
LINE2:sever_high#e6c700:"High................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg sever_high) \
|
||||
LINE2:sever_low#00e600:"Low.................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg sever_low) \
|
||||
LINE2:sever_med#8fce00:"Medium..............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg sever_med) \
|
||||
LINE2:sever_sev#c61b2b:"Severe..............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg sever_sev) \
|
||||
LINE2:sever_ukn#7e8877:"Unknown.............." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg sever_ukn)
|
||||
rrdtool_graph Risks 'Amount' "${OUTDIR}/risky_events" \
|
||||
DEF:risk_1=${RRDDIR}/gauge-flow_risk_1_count.rrd:value:AVERAGE \
|
||||
DEF:risk_2=${RRDDIR}/gauge-flow_risk_2_count.rrd:value:AVERAGE \
|
||||
DEF:risk_3=${RRDDIR}/gauge-flow_risk_3_count.rrd:value:AVERAGE \
|
||||
@@ -425,6 +515,10 @@ rrdtool_graph Risky-Events 'Amouunt' "${OUTDIR}/risky_events" \
|
||||
DEF:risk_47=${RRDDIR}/gauge-flow_risk_47_count.rrd:value:AVERAGE \
|
||||
DEF:risk_48=${RRDDIR}/gauge-flow_risk_48_count.rrd:value:AVERAGE \
|
||||
DEF:risk_49=${RRDDIR}/gauge-flow_risk_49_count.rrd:value:AVERAGE \
|
||||
DEF:risk_50=${RRDDIR}/gauge-flow_risk_50_count.rrd:value:AVERAGE \
|
||||
DEF:risk_51=${RRDDIR}/gauge-flow_risk_51_count.rrd:value:AVERAGE \
|
||||
DEF:risk_52=${RRDDIR}/gauge-flow_risk_52_count.rrd:value:AVERAGE \
|
||||
DEF:risk_53=${RRDDIR}/gauge-flow_risk_53_count.rrd:value:AVERAGE \
|
||||
DEF:risk_unknown=${RRDDIR}/gauge-flow_risk_unknown_count.rrd:value:AVERAGE \
|
||||
$(rrdtool_graph_colorize_missing_data risk_1) \
|
||||
AREA:risk_1#ff0000:"XSS-Attack..............................................." \
|
||||
@@ -525,5 +619,13 @@ rrdtool_graph Risky-Events 'Amouunt' "${OUTDIR}/risky_events" \
|
||||
$(rrdtool_graph_print_cur_min_max_avg risk_48) \
|
||||
STACK:risk_49#dfffdf:"Minor-Issues............................................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg risk_49) \
|
||||
STACK:risk_50#ef20df:"TCP-Connection-Issues...................................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg risk_50) \
|
||||
STACK:risk_51#ef60df:"Fully-Encrypted.........................................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg risk_51) \
|
||||
STACK:risk_52#efa0df:"Invalid-ALPN/SNI-combination............................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg risk_52) \
|
||||
STACK:risk_53#efffdf:"Malware-Host-Contacted..................................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg risk_53) \
|
||||
STACK:risk_unknown#df2060:"Unknown.................................................." \
|
||||
$(rrdtool_graph_print_cur_min_max_avg risk_unknown)
|
||||
|
||||
@@ -97,6 +97,18 @@
|
||||
Categories
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="risks.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Risks
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="jsons.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
|
||||
@@ -97,6 +97,18 @@
|
||||
Categories
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="risks.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Risks
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="jsons.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
@@ -146,6 +158,25 @@
|
||||
<img src="detections_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_hour.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_12hours.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_day.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_week.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_month.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
</main>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -97,6 +97,18 @@
|
||||
Categories
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="risks.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Risks
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="jsons.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
@@ -165,25 +177,6 @@
|
||||
<img src="error_events_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_hour.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_12hours.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_day.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_week.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_month.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
</main>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -97,6 +97,18 @@
|
||||
Categories
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="risks.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Risks
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="jsons.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
|
||||
@@ -97,6 +97,18 @@
|
||||
Categories
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="risks.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Risks
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="jsons.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
@@ -165,6 +177,25 @@
|
||||
<img src="detections_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_hour.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_12hours.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_day.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_week.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_month.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="confidence_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="traffic_past_hour.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
@@ -336,6 +367,25 @@
|
||||
<img src="json_bytes_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_hour.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_12hours.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_day.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_week.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_month.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_hour.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
@@ -97,6 +97,18 @@
|
||||
Categories
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="risks.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Risks
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link active" href="jsons.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
|
||||
@@ -97,6 +97,18 @@
|
||||
Categories
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="risks.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Risks
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="jsons.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
|
||||
198
examples/c-collectd/www/dpi/risks.html
Normal file
198
examples/c-collectd/www/dpi/risks.html
Normal file
@@ -0,0 +1,198 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en"><head>
|
||||
<meta http-equiv="cache-control" content="max-age=0" />
|
||||
<meta http-equiv="cache-control" content="no-cache" />
|
||||
<meta http-equiv="expires" content="0" />
|
||||
<meta http-equiv="expires" content="Tue, 01 Jan 1980 1:00:00 GMT" />
|
||||
<meta http-equiv="pragma" content="no-cache" />
|
||||
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
<meta name="description" content="nDPId RRD Graph">
|
||||
<meta name="author" content="Toni Uhlig">
|
||||
<link rel="icon" href="https://getbootstrap.com/docs/4.0/assets/img/favicons/favicon.ico">
|
||||
|
||||
<title>nDPId Dashboard</title>
|
||||
|
||||
<link rel="canonical" href="https://getbootstrap.com/docs/4.0/examples/dashboard/">
|
||||
|
||||
<!-- Bootstrap core CSS -->
|
||||
<link href="bootstrap.css" rel="stylesheet">
|
||||
|
||||
<!-- Custom styles for this template -->
|
||||
<link href="dashboard.css" rel="stylesheet">
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<nav class="navbar navbar-dark sticky-top bg-dark flex-md-nowrap p-0">
|
||||
<a class="navbar-brand col-sm-3 col-md-2 mr-0" href="https://github.com/utoni/nDPId">nDPId Collectd RRD Graph</a>
|
||||
</nav>
|
||||
|
||||
<div class="container-fluid">
|
||||
<div class="row">
|
||||
<nav class="col-md-2 d-none d-md-block bg-light sidebar">
|
||||
<div class="sidebar-sticky">
|
||||
|
||||
<h6 class="sidebar-heading d-flex justify-content-between align-items-center px-3 mt-4 mb-1 text-muted">
|
||||
<span>Graphs</span>
|
||||
</h6>
|
||||
|
||||
<ul class="nav flex-column mb-2">
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="index.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Home
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="flows.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line><polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Flows
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="other.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Other
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="detections.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Detections
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="categories.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Categories
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link active" href="risks.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Risks
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="jsons.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
JSONs
|
||||
</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="events.html">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="feather feather-file-text">
|
||||
<path d="M14 2H6a2 2 0 0 0-2 2v16a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V8z"></path>
|
||||
<polyline points="14 2 14 8 20 8"></polyline>
|
||||
<line x1="16" y1="13" x2="8" y2="13"></line>
|
||||
<line x1="16" y1="17" x2="8" y2="17"></line>
|
||||
<polyline points="10 9 9 9 8 9"></polyline>
|
||||
</svg>
|
||||
Events
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<main role="main" class="col-md-9 ml-sm-auto col-lg-10 pt-3 px-4">
|
||||
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_hour.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_12hours.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_day.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_week.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_month.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="severities_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_hour.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_12hours.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_day.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_week.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_month.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
<div class="d-flex justify-content-center flex-wrap flex-md-nowrap align-items-center pb-2 mb-3 border-bottom">
|
||||
<img src="risky_events_past_year.png" class="img-fluid" alt="Responsive image">
|
||||
</div>
|
||||
|
||||
</main>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Bootstrap core JavaScript
|
||||
================================================== -->
|
||||
<!-- Placed at the end of the document so the pages load faster -->
|
||||
<script src="jquery-3.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
|
||||
<script>window.jQuery || document.write('<script src="../../assets/js/vendor/jquery-slim.min.js"><\/script>')</script>
|
||||
<script src="popper.js"></script>
|
||||
<script src="bootstrap.js"></script>
|
||||
|
||||
<!-- Icons -->
|
||||
<script src="feather.js"></script>
|
||||
<script>
|
||||
feather.replace()
|
||||
</script>
|
||||
|
||||
</body></html>
|
||||
1921
examples/c-influxd/c-influxd.c
Normal file
1921
examples/c-influxd/c-influxd.c
Normal file
File diff suppressed because it is too large
Load Diff
6021
examples/c-influxd/grafana-dashboard-simple.json
Normal file
6021
examples/c-influxd/grafana-dashboard-simple.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,138 +0,0 @@
|
||||
#include <arpa/inet.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "jsmn.h"
|
||||
|
||||
static char serv_listen_addr[INET_ADDRSTRLEN] = DISTRIBUTOR_HOST;
|
||||
static uint16_t serv_listen_port = DISTRIBUTOR_PORT;
|
||||
|
||||
int main(void)
|
||||
{
|
||||
int sockfd = socket(AF_INET, SOCK_STREAM, 0);
|
||||
struct sockaddr_in remote_addr = {};
|
||||
socklen_t remote_addrlen = sizeof(remote_addr);
|
||||
uint8_t buf[NETWORK_BUFFER_MAX_SIZE];
|
||||
size_t buf_used = 0;
|
||||
size_t json_start = 0;
|
||||
unsigned long long int json_bytes = 0;
|
||||
jsmn_parser parser;
|
||||
jsmntok_t tokens[128];
|
||||
|
||||
if (sockfd < 0)
|
||||
{
|
||||
perror("socket");
|
||||
return 1;
|
||||
}
|
||||
|
||||
remote_addr.sin_family = AF_INET;
|
||||
if (inet_pton(AF_INET, &serv_listen_addr[0], &remote_addr.sin_addr) != 1)
|
||||
{
|
||||
perror("inet_pton");
|
||||
return 1;
|
||||
}
|
||||
remote_addr.sin_port = htons(serv_listen_port);
|
||||
|
||||
if (connect(sockfd, (struct sockaddr *)&remote_addr, remote_addrlen) != 0)
|
||||
{
|
||||
perror("connect");
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (1)
|
||||
{
|
||||
errno = 0;
|
||||
ssize_t bytes_read = read(sockfd, buf + buf_used, sizeof(buf) - buf_used);
|
||||
|
||||
if (bytes_read <= 0 || errno != 0)
|
||||
{
|
||||
fprintf(stderr, "Remote end disconnected.\n");
|
||||
break;
|
||||
}
|
||||
|
||||
buf_used += bytes_read;
|
||||
while (buf_used >= NETWORK_BUFFER_LENGTH_DIGITS + 1)
|
||||
{
|
||||
if (buf[NETWORK_BUFFER_LENGTH_DIGITS] != '{')
|
||||
{
|
||||
fprintf(stderr, "BUG: JSON invalid opening character: '%c'\n", buf[NETWORK_BUFFER_LENGTH_DIGITS]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
char * json_str_start = NULL;
|
||||
json_bytes = strtoull((char *)buf, &json_str_start, 10);
|
||||
json_bytes += (uint8_t *)json_str_start - buf;
|
||||
json_start = (uint8_t *)json_str_start - buf;
|
||||
|
||||
if (errno == ERANGE)
|
||||
{
|
||||
fprintf(stderr, "BUG: Size of JSON exceeds limit\n");
|
||||
exit(1);
|
||||
}
|
||||
if ((uint8_t *)json_str_start == buf)
|
||||
{
|
||||
fprintf(stderr, "BUG: Missing size before JSON string: \"%.*s\"\n", NETWORK_BUFFER_LENGTH_DIGITS, buf);
|
||||
exit(1);
|
||||
}
|
||||
if (json_bytes > sizeof(buf))
|
||||
{
|
||||
fprintf(stderr, "BUG: JSON string too big: %llu > %zu\n", json_bytes, sizeof(buf));
|
||||
exit(1);
|
||||
}
|
||||
if (json_bytes > buf_used)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if (buf[json_bytes - 2] != '}' ||
|
||||
buf[json_bytes - 1] != '\n')
|
||||
{
|
||||
fprintf(stderr, "BUG: Invalid JSON string: \"%.*s\"\n", (int)json_bytes, buf);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int r;
|
||||
jsmn_init(&parser);
|
||||
r = jsmn_parse(&parser,
|
||||
(char *)(buf + json_start),
|
||||
json_bytes - json_start,
|
||||
tokens,
|
||||
sizeof(tokens) / sizeof(tokens[0]));
|
||||
if (r < 0 || tokens[0].type != JSMN_OBJECT)
|
||||
{
|
||||
fprintf(stderr, "JSON parsing failed with return value %d at position %u\n", r, parser.pos);
|
||||
fprintf(stderr, "JSON string: '%.*s'\n", (int)(json_bytes - json_start), (char *)(buf + json_start));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
for (int i = 1; i < r; i++)
|
||||
{
|
||||
if (i % 2 == 1)
|
||||
{
|
||||
#ifdef JSMN_PARENT_LINKS
|
||||
printf("[%d][%d]", i, tokens[i].parent);
|
||||
#endif
|
||||
printf("[%.*s : ", tokens[i].end - tokens[i].start, (char *)(buf + json_start) + tokens[i].start);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("%.*s] ", tokens[i].end - tokens[i].start, (char *)(buf + json_start) + tokens[i].start);
|
||||
}
|
||||
}
|
||||
printf("EoF\n");
|
||||
|
||||
memmove(buf, buf + json_bytes, buf_used - json_bytes);
|
||||
buf_used -= json_bytes;
|
||||
json_bytes = 0;
|
||||
json_start = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
714
examples/c-notifyd/c-notifyd.c
Normal file
714
examples/c-notifyd/c-notifyd.c
Normal file
@@ -0,0 +1,714 @@
|
||||
#include <dbus-1.0/dbus/dbus.h>
|
||||
#include <signal.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "nDPIsrvd.h"
|
||||
#include "utstring.h"
|
||||
#include "utils.h"
|
||||
|
||||
#define SLEEP_TIME_IN_S (3)
|
||||
|
||||
struct flow_user_data
|
||||
{
|
||||
nDPIsrvd_ull detected_risks;
|
||||
};
|
||||
|
||||
enum dbus_level
|
||||
{
|
||||
DBUS_LOW = 0,
|
||||
DBUS_NORMAL,
|
||||
DBUS_CRITICAL
|
||||
};
|
||||
|
||||
static char const * const flow_severities[] = {"Low", "Medium", "High", "Severe", "Critical", "Emergency"};
|
||||
static char const * const flow_breeds[] = {
|
||||
"Safe", "Acceptable", "Fun", "Unsafe", "Potentially_Dangerous", "Tracker_Ads", "Dangerous", "Unrated", "???"};
|
||||
static char const * const flow_categories[] = {"Unspecified",
|
||||
"Media",
|
||||
"VPN",
|
||||
"Email",
|
||||
"DataTransfer",
|
||||
"Web",
|
||||
"SocialNetwork",
|
||||
"Download",
|
||||
"Game",
|
||||
"Chat",
|
||||
"VoIP",
|
||||
"Database",
|
||||
"RemoteAccess",
|
||||
"Cloud",
|
||||
"Network",
|
||||
"Collaborative",
|
||||
"RPC",
|
||||
"Streaming",
|
||||
"System",
|
||||
"SoftwareUpdate",
|
||||
"Music",
|
||||
"Video",
|
||||
"Shopping",
|
||||
"Productivity",
|
||||
"FileSharing",
|
||||
"ConnCheck",
|
||||
"IoT-Scada",
|
||||
"VirtAssistant",
|
||||
"Cybersecurity",
|
||||
"AdultContent",
|
||||
"Mining",
|
||||
"Malware",
|
||||
"Advertisement",
|
||||
"Banned_Site",
|
||||
"Site_Unavailable",
|
||||
"Allowed_Site",
|
||||
"Antimalware",
|
||||
"Crypto_Currency",
|
||||
"Gambling",
|
||||
"Health",
|
||||
"ArtifIntelligence",
|
||||
"Finance",
|
||||
"News",
|
||||
"Sport",
|
||||
"Business",
|
||||
"Internet",
|
||||
"Blockchain_Crypto",
|
||||
"Blog_Forum",
|
||||
"Government",
|
||||
"Education",
|
||||
"CDN_Proxy",
|
||||
"Hw_Sw",
|
||||
"Dating",
|
||||
"Travel",
|
||||
"Food",
|
||||
"Bots",
|
||||
"Scanners",
|
||||
"Hosting",
|
||||
"Art",
|
||||
"Fashion",
|
||||
"Books",
|
||||
"Science",
|
||||
"Maps_Navigation",
|
||||
"Login_Portal",
|
||||
"Legal",
|
||||
"Environmental_Services",
|
||||
"Culture",
|
||||
"Housing",
|
||||
"Telecommunication",
|
||||
"Transportation",
|
||||
"Design",
|
||||
"Employment",
|
||||
"Events",
|
||||
"Weather",
|
||||
"Lifestyle",
|
||||
"Real_Estate",
|
||||
"Security",
|
||||
"Environment",
|
||||
"Hobby",
|
||||
"Computer_Science",
|
||||
"Construction",
|
||||
"Engineering",
|
||||
"Religion",
|
||||
"Entertainment",
|
||||
"Agriculture",
|
||||
"Technology",
|
||||
"Beauty",
|
||||
"History",
|
||||
"Politics",
|
||||
"Vehicles"};
|
||||
|
||||
static uint8_t desired_flow_severities[nDPIsrvd_ARRAY_LENGTH(flow_severities)] = {};
|
||||
static uint8_t desired_flow_breeds[nDPIsrvd_ARRAY_LENGTH(flow_breeds)] = {};
|
||||
static uint8_t desired_flow_categories[nDPIsrvd_ARRAY_LENGTH(flow_categories)] = {};
|
||||
|
||||
static unsigned int id = 0;
|
||||
static char const * const application = "nDPIsrvd.notifyd";
|
||||
|
||||
static int main_thread_shutdown = 0;
|
||||
|
||||
static char * pidfile = NULL;
|
||||
static char * serv_optarg = NULL;
|
||||
|
||||
#ifdef ENABLE_MEMORY_PROFILING
|
||||
void nDPIsrvd_memprof_log_alloc(size_t alloc_size)
|
||||
{
|
||||
(void)alloc_size;
|
||||
}
|
||||
|
||||
void nDPIsrvd_memprof_log_free(size_t free_size)
|
||||
{
|
||||
(void)free_size;
|
||||
}
|
||||
|
||||
void nDPIsrvd_memprof_log(char const * const format, ...)
|
||||
{
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, format);
|
||||
fprintf(stderr, "%s", "nDPIsrvd MemoryProfiler: ");
|
||||
vfprintf(stderr, format, ap);
|
||||
fprintf(stderr, "%s\n", "");
|
||||
va_end(ap);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void send_to_dbus(char const * const icon,
|
||||
char const * const urgency,
|
||||
enum dbus_level level,
|
||||
char const * const summary,
|
||||
char const * const body,
|
||||
int timeout)
|
||||
{
|
||||
DBusConnection * connection = dbus_bus_get(DBUS_BUS_SESSION, 0);
|
||||
DBusMessage * message = dbus_message_new_method_call("org.freedesktop.Notifications",
|
||||
"/org/freedesktop/Notifications",
|
||||
"org.freedesktop.Notifications",
|
||||
"Notify");
|
||||
DBusMessageIter iter[4];
|
||||
dbus_message_iter_init_append(message, iter);
|
||||
dbus_message_iter_append_basic(iter, 's', &application);
|
||||
dbus_message_iter_append_basic(iter, 'u', &id);
|
||||
dbus_message_iter_append_basic(iter, 's', &icon);
|
||||
dbus_message_iter_append_basic(iter, 's', &summary);
|
||||
dbus_message_iter_append_basic(iter, 's', &body);
|
||||
dbus_message_iter_open_container(iter, 'a', "s", iter + 1);
|
||||
dbus_message_iter_close_container(iter, iter + 1);
|
||||
dbus_message_iter_open_container(iter, 'a', "{sv}", iter + 1);
|
||||
dbus_message_iter_open_container(iter + 1, 'e', 0, iter + 2);
|
||||
dbus_message_iter_append_basic(iter + 2, 's', &urgency);
|
||||
dbus_message_iter_open_container(iter + 2, 'v', "y", iter + 3);
|
||||
dbus_message_iter_append_basic(iter + 3, 'y', &level);
|
||||
dbus_message_iter_close_container(iter + 2, iter + 3);
|
||||
dbus_message_iter_close_container(iter + 1, iter + 2);
|
||||
dbus_message_iter_close_container(iter, iter + 1);
|
||||
dbus_message_iter_append_basic(iter, 'i', &timeout);
|
||||
dbus_connection_send(connection, message, 0);
|
||||
dbus_connection_flush(connection);
|
||||
dbus_message_unref(message);
|
||||
dbus_connection_unref(connection);
|
||||
|
||||
id++;
|
||||
}
|
||||
|
||||
static void notify(enum dbus_level level, char const * const summary, int timeout, char const * const body)
|
||||
{
|
||||
send_to_dbus("dialog-information", "urgency", level, summary, body, timeout);
|
||||
}
|
||||
|
||||
__attribute__((format(printf, 4, 5))) static void notifyf(
|
||||
enum dbus_level level, char const * const summary, int timeout, char const * const body_fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
char buf[BUFSIZ];
|
||||
|
||||
va_start(ap, body_fmt);
|
||||
if (vsnprintf(buf, sizeof(buf), body_fmt, ap) > 0)
|
||||
{
|
||||
notify(level, summary, timeout, buf);
|
||||
}
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
static ssize_t get_value_index(char const * const possible_values[],
|
||||
size_t possible_values_size,
|
||||
char const * const needle,
|
||||
size_t needle_len)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < possible_values_size; ++i)
|
||||
{
|
||||
if (strncmp(needle, possible_values[i], needle_len) == 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == possible_values_size)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static void check_value(char const * const possible_values[],
|
||||
size_t possible_values_size,
|
||||
char const * const needle,
|
||||
size_t needle_len)
|
||||
{
|
||||
if (get_value_index(possible_values, possible_values_size, needle, needle_len) == -1)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL, "nDPIsrvd-notifyd", 5000, "BUG: Unknown value: %.*s", (int)needle_len, needle);
|
||||
}
|
||||
}
|
||||
|
||||
static enum nDPIsrvd_callback_return notifyd_json_callback(struct nDPIsrvd_socket * const sock,
|
||||
struct nDPIsrvd_instance * const instance,
|
||||
struct nDPIsrvd_thread_data * const thread_data,
|
||||
struct nDPIsrvd_flow * const flow)
|
||||
{
|
||||
(void)instance;
|
||||
(void)thread_data;
|
||||
|
||||
struct nDPIsrvd_json_token const * const flow_event_name = TOKEN_GET_SZ(sock, "flow_event_name");
|
||||
struct flow_user_data * flow_user_data = NULL;
|
||||
|
||||
if (flow != NULL)
|
||||
{
|
||||
flow_user_data = (struct flow_user_data *)flow->flow_user_data;
|
||||
}
|
||||
|
||||
if (TOKEN_VALUE_EQUALS_SZ(sock, flow_event_name, "detected") != 0 ||
|
||||
TOKEN_VALUE_EQUALS_SZ(sock, flow_event_name, "detection-update") != 0 ||
|
||||
TOKEN_VALUE_EQUALS_SZ(sock, flow_event_name, "update") != 0)
|
||||
{
|
||||
struct nDPIsrvd_json_token const * const flow_risks = TOKEN_GET_SZ(sock, "ndpi", "flow_risk");
|
||||
struct nDPIsrvd_json_token const * current = NULL;
|
||||
int next_child_index = -1, desired_severity_found = 0;
|
||||
UT_string risks;
|
||||
|
||||
utstring_init(&risks);
|
||||
|
||||
if (flow_risks != NULL)
|
||||
{
|
||||
while ((current = nDPIsrvd_get_next_token(sock, flow_risks, &next_child_index)) != NULL)
|
||||
{
|
||||
nDPIsrvd_ull numeric_risk_value = (nDPIsrvd_ull)-1;
|
||||
size_t flow_risk_key_len = 0;
|
||||
char const * const flow_risk_key = TOKEN_GET_KEY(sock, current, &flow_risk_key_len);
|
||||
|
||||
if (flow_risk_key == NULL || flow_risk_key_len == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if (str_value_to_ull(flow_risk_key, &numeric_risk_value) == CONVERSION_OK && flow_user_data != NULL &&
|
||||
(flow_user_data->detected_risks & (1ull << numeric_risk_value)) == 0)
|
||||
{
|
||||
flow_user_data->detected_risks |= (1ull << (numeric_risk_value - 1));
|
||||
|
||||
char flow_risk_sz[flow_risk_key_len + 1];
|
||||
snprintf(flow_risk_sz, sizeof(flow_risk_sz), "%llu", numeric_risk_value);
|
||||
size_t flow_risk_len = 0;
|
||||
size_t flow_severity_len = 0;
|
||||
char const * const flow_risk_str =
|
||||
TOKEN_GET_VALUE(sock,
|
||||
TOKEN_GET_SZ(sock, "ndpi", "flow_risk", flow_risk_sz, "risk"),
|
||||
&flow_risk_len);
|
||||
char const * const flow_severity_str =
|
||||
TOKEN_GET_VALUE(sock,
|
||||
TOKEN_GET_SZ(sock, "ndpi", "flow_risk", flow_risk_sz, "severity"),
|
||||
&flow_severity_len);
|
||||
|
||||
if (flow_risk_str == NULL || flow_risk_len == 0 || flow_severity_str == NULL ||
|
||||
flow_severity_len == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
ssize_t severity_index = get_value_index(flow_severities,
|
||||
nDPIsrvd_ARRAY_LENGTH(flow_severities),
|
||||
flow_severity_str,
|
||||
flow_severity_len);
|
||||
if (severity_index != -1 && desired_flow_severities[severity_index] != 0)
|
||||
{
|
||||
desired_severity_found = 1;
|
||||
}
|
||||
utstring_printf(&risks,
|
||||
"Risk: '%.*s'\n"
|
||||
"Severity: '%.*s'\n",
|
||||
(int)flow_risk_len,
|
||||
flow_risk_str,
|
||||
(int)flow_severity_len,
|
||||
flow_severity_str);
|
||||
check_value(flow_severities,
|
||||
nDPIsrvd_ARRAY_LENGTH(flow_severities),
|
||||
flow_severity_str,
|
||||
flow_severity_len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
size_t flow_srcip_len = 0;
|
||||
size_t flow_dstip_len = 0;
|
||||
size_t flow_breed_len = 0;
|
||||
size_t flow_category_len = 0;
|
||||
size_t flow_hostname_len = 0;
|
||||
|
||||
char const * const flow_srcip = TOKEN_GET_VALUE(sock, TOKEN_GET_SZ(sock, "src_ip"), &flow_srcip_len);
|
||||
char const * const flow_dstip = TOKEN_GET_VALUE(sock, TOKEN_GET_SZ(sock, "dst_ip"), &flow_dstip_len);
|
||||
char const * const flow_breed_str =
|
||||
TOKEN_GET_VALUE(sock, TOKEN_GET_SZ(sock, "ndpi", "breed"), &flow_breed_len);
|
||||
char const * const flow_category_str =
|
||||
TOKEN_GET_VALUE(sock, TOKEN_GET_SZ(sock, "ndpi", "category"), &flow_category_len);
|
||||
char const * const flow_hostname =
|
||||
TOKEN_GET_VALUE(sock, TOKEN_GET_SZ(sock, "ndpi", "hostname"), &flow_hostname_len);
|
||||
|
||||
if (flow_breed_str != NULL && flow_breed_len != 0 && flow_category_str != NULL && flow_category_len != 0)
|
||||
{
|
||||
ssize_t breed_index =
|
||||
get_value_index(flow_breeds, nDPIsrvd_ARRAY_LENGTH(flow_breeds), flow_breed_str, flow_breed_len);
|
||||
ssize_t category_index = get_value_index(flow_categories,
|
||||
nDPIsrvd_ARRAY_LENGTH(flow_categories),
|
||||
flow_category_str,
|
||||
flow_category_len);
|
||||
|
||||
if ((breed_index != -1 && desired_flow_breeds[breed_index] != 0) ||
|
||||
(category_index != -1 && desired_flow_categories[category_index] != 0) ||
|
||||
desired_severity_found != 0)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL,
|
||||
"Flow Notification",
|
||||
5000,
|
||||
"%.*s -> %.*s (%.*s)\nBreed: '%.*s', Category: '%.*s'\n%s",
|
||||
(int)flow_srcip_len,
|
||||
flow_srcip,
|
||||
(int)flow_dstip_len,
|
||||
flow_dstip,
|
||||
(flow_hostname_len > 0 ? (int)flow_hostname_len : 1),
|
||||
(flow_hostname_len > 0 ? flow_hostname : "-"),
|
||||
(int)flow_breed_len,
|
||||
flow_breed_str,
|
||||
(int)flow_category_len,
|
||||
flow_category_str,
|
||||
(utstring_len(&risks) > 0 ? utstring_body(&risks) : "No flow risks detected\n"));
|
||||
}
|
||||
|
||||
check_value(flow_breeds, nDPIsrvd_ARRAY_LENGTH(flow_breeds), flow_breed_str, flow_breed_len);
|
||||
check_value(flow_categories,
|
||||
nDPIsrvd_ARRAY_LENGTH(flow_categories),
|
||||
flow_category_str,
|
||||
flow_category_len);
|
||||
}
|
||||
else if (desired_severity_found != 0)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL,
|
||||
"Risky Flow",
|
||||
5000,
|
||||
"%.*s -> %.*s (%.*s)\n%s",
|
||||
(int)flow_srcip_len,
|
||||
flow_srcip,
|
||||
(int)flow_dstip_len,
|
||||
flow_dstip,
|
||||
(flow_hostname_len > 0 ? (int)flow_hostname_len : 1),
|
||||
(flow_hostname_len > 0 ? flow_hostname : "-"),
|
||||
utstring_body(&risks));
|
||||
}
|
||||
}
|
||||
|
||||
utstring_done(&risks);
|
||||
}
|
||||
|
||||
return CALLBACK_OK;
|
||||
}
|
||||
|
||||
static void print_usage(char const * const arg0)
|
||||
{
|
||||
static char const usage[] =
|
||||
"Usage: %s "
|
||||
"[-s host] [-C category...] [-B breed...] [-S severity...]\n\n"
|
||||
"\t-s\tDestination where nDPIsrvd is listening on.\n"
|
||||
"\t-C\tDesired nDPI category which fires a notificiation.\n"
|
||||
"\t \tCan be specified multiple times.\n"
|
||||
"\t-B\tDesired nDPI breed which fires a notification.\n"
|
||||
"\t \tCan be specified multiple times.\n"
|
||||
"\t-S\tDesired nDPI risk severity which fires a notification.\n"
|
||||
"\t \tCan be specified multiple times.\n"
|
||||
"\n"
|
||||
"Possible values for `-C': %s\n"
|
||||
"Possible values for `-B': %s\n"
|
||||
"Possible values for `-S': %s\n"
|
||||
"\n";
|
||||
|
||||
UT_string flow_categories_str, flow_breeds_str, flow_severities_str;
|
||||
utstring_init(&flow_categories_str);
|
||||
utstring_init(&flow_breeds_str);
|
||||
utstring_init(&flow_severities_str);
|
||||
for (size_t i = 0; i < nDPIsrvd_ARRAY_LENGTH(flow_categories); ++i)
|
||||
{
|
||||
utstring_printf(&flow_categories_str, "%s, ", flow_categories[i]);
|
||||
}
|
||||
flow_categories_str.d[flow_categories_str.i - 2] = '\0';
|
||||
for (size_t i = 0; i < nDPIsrvd_ARRAY_LENGTH(flow_breeds); ++i)
|
||||
{
|
||||
utstring_printf(&flow_breeds_str, "%s, ", flow_breeds[i]);
|
||||
}
|
||||
flow_breeds_str.d[flow_breeds_str.i - 2] = '\0';
|
||||
for (size_t i = 0; i < nDPIsrvd_ARRAY_LENGTH(flow_severities); ++i)
|
||||
{
|
||||
utstring_printf(&flow_severities_str, "%s, ", flow_severities[i]);
|
||||
}
|
||||
flow_severities_str.d[flow_severities_str.i - 2] = '\0';
|
||||
fprintf(stderr,
|
||||
usage,
|
||||
arg0,
|
||||
utstring_body(&flow_categories_str),
|
||||
utstring_body(&flow_breeds_str),
|
||||
utstring_body(&flow_severities_str));
|
||||
utstring_done(&flow_severities_str);
|
||||
utstring_done(&flow_breeds_str);
|
||||
utstring_done(&flow_categories_str);
|
||||
}
|
||||
|
||||
static int set_defaults(void)
|
||||
{
|
||||
char const * const default_severities[] = {"High", "Severe", "Critical", "Emergency"};
|
||||
char const * const default_breeds[] = {"Unsafe", "Potentially_Dangerous", "Dangerous", "Unrated"};
|
||||
char const * const default_categories[] = {"Mining", "Malware", "Banned_Site", "Crypto_Currency"};
|
||||
|
||||
for (size_t i = 0; i < nDPIsrvd_ARRAY_LENGTH(default_severities); ++i)
|
||||
{
|
||||
ssize_t index = get_value_index(flow_severities,
|
||||
nDPIsrvd_ARRAY_LENGTH(flow_severities),
|
||||
default_severities[i],
|
||||
strlen(default_severities[i]));
|
||||
if (index == -1)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
desired_flow_severities[index] = 1;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < nDPIsrvd_ARRAY_LENGTH(default_breeds); ++i)
|
||||
{
|
||||
ssize_t index = get_value_index(flow_breeds,
|
||||
nDPIsrvd_ARRAY_LENGTH(flow_breeds),
|
||||
default_breeds[i],
|
||||
strlen(default_breeds[i]));
|
||||
if (index == -1)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
desired_flow_breeds[index] = 1;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < nDPIsrvd_ARRAY_LENGTH(default_categories); ++i)
|
||||
{
|
||||
ssize_t index = get_value_index(flow_categories,
|
||||
nDPIsrvd_ARRAY_LENGTH(flow_categories),
|
||||
default_categories[i],
|
||||
strlen(default_categories[i]));
|
||||
if (index == -1)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
desired_flow_categories[index] = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_options(int argc, char ** argv, struct nDPIsrvd_socket * const sock)
|
||||
{
|
||||
int opt, force_defaults = 1;
|
||||
|
||||
while ((opt = getopt(argc, argv, "hldp:s:C:B:S:")) != -1)
|
||||
{
|
||||
switch (opt)
|
||||
{
|
||||
case 'd':
|
||||
daemonize_enable();
|
||||
break;
|
||||
case 'p':
|
||||
free(pidfile);
|
||||
pidfile = strdup(optarg);
|
||||
break;
|
||||
case 's':
|
||||
free(serv_optarg);
|
||||
serv_optarg = strdup(optarg);
|
||||
break;
|
||||
case 'C':
|
||||
{
|
||||
ssize_t index =
|
||||
get_value_index(flow_categories, nDPIsrvd_ARRAY_LENGTH(flow_categories), optarg, strlen(optarg));
|
||||
if (index == -1)
|
||||
{
|
||||
fprintf(stderr, "Invalid argument for `-C': %s\n", optarg);
|
||||
return 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
desired_flow_categories[index] = 1;
|
||||
}
|
||||
force_defaults = 0;
|
||||
break;
|
||||
}
|
||||
case 'B':
|
||||
{
|
||||
ssize_t index =
|
||||
get_value_index(flow_breeds, nDPIsrvd_ARRAY_LENGTH(flow_breeds), optarg, strlen(optarg));
|
||||
if (index == -1)
|
||||
{
|
||||
fprintf(stderr, "Invalid argument for `-B': %s\n", optarg);
|
||||
return 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
desired_flow_breeds[index] = 1;
|
||||
}
|
||||
force_defaults = 0;
|
||||
break;
|
||||
}
|
||||
case 'S':
|
||||
{
|
||||
ssize_t index =
|
||||
get_value_index(flow_severities, nDPIsrvd_ARRAY_LENGTH(flow_severities), optarg, strlen(optarg));
|
||||
if (index == -1)
|
||||
{
|
||||
fprintf(stderr, "Invalid argument for `-S': %s\n", optarg);
|
||||
return 1;
|
||||
}
|
||||
else
|
||||
{
|
||||
desired_flow_severities[index] = 1;
|
||||
}
|
||||
force_defaults = 0;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
print_usage(argv[0]);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (force_defaults != 0 && set_defaults() != 0)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL, "nDPIsrvd-notifyd", 5000, "%s\n", "BUG: Could not set default values.");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (serv_optarg == NULL)
|
||||
{
|
||||
serv_optarg = strdup(DISTRIBUTOR_UNIX_SOCKET);
|
||||
}
|
||||
|
||||
if (nDPIsrvd_setup_address(&sock->address, serv_optarg) != 0)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL, "nDPIsrvd-notifyd", 3000, "Could not parse address `%s'\n", serv_optarg);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (optind < argc)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL, "nDPIsrvd-notifyd", 3000, "%s\n", "Unexpected argument after options");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sighandler(int signum)
|
||||
{
|
||||
switch (signum)
|
||||
{
|
||||
case SIGINT:
|
||||
notify(DBUS_LOW, "nDPIsrvd-notifyd", 3000, "Received SIGINT, shutdown.");
|
||||
break;
|
||||
case SIGTERM:
|
||||
notify(DBUS_LOW, "nDPIsrvd-notifyd", 3000, "Received SIGTERM, shutdown.");
|
||||
break;
|
||||
default:
|
||||
notify(DBUS_LOW, "nDPIsrvd-notifyd", 3000, "Received unknown signal, shutdown.");
|
||||
break;
|
||||
}
|
||||
|
||||
main_thread_shutdown++;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv)
|
||||
{
|
||||
signal(SIGINT, sighandler);
|
||||
signal(SIGTERM, sighandler);
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
struct nDPIsrvd_socket * sock =
|
||||
nDPIsrvd_socket_init(0, 0, 0, sizeof(struct flow_user_data), notifyd_json_callback, NULL, NULL);
|
||||
if (sock == NULL)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL, "nDPIsrvd-notifyd", 5000, "%s\n", "BUG: nDPIsrvd socket memory allocation failed!");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (parse_options(argc, argv, sock) != 0)
|
||||
{
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if (daemonize_with_pidfile(pidfile) != 0)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
int previous_connect_succeeded = 1;
|
||||
do
|
||||
{
|
||||
if (nDPIsrvd_connect(sock) != CONNECT_OK)
|
||||
{
|
||||
if (main_thread_shutdown != 0)
|
||||
{
|
||||
break;
|
||||
}
|
||||
if (previous_connect_succeeded != 0)
|
||||
{
|
||||
notifyf(
|
||||
DBUS_CRITICAL, "nDPIsrvd-notifyd", 3000, "nDPIsrvd socket connect to %s failed!\n", serv_optarg);
|
||||
previous_connect_succeeded = 0;
|
||||
}
|
||||
nDPIsrvd_socket_close(sock);
|
||||
sleep(SLEEP_TIME_IN_S);
|
||||
continue;
|
||||
}
|
||||
previous_connect_succeeded = 1;
|
||||
|
||||
if (nDPIsrvd_set_read_timeout(sock, 3, 0) != 0)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL, "nDPIsrvd-notifyd", 3000, "nDPIsrvd set read timeout failed: %s\n", strerror(errno));
|
||||
goto failure;
|
||||
}
|
||||
|
||||
notifyf(DBUS_NORMAL, "nDPIsrvd-notifyd", 3000, "Connected to '%s'\n", serv_optarg);
|
||||
|
||||
while (main_thread_shutdown == 0)
|
||||
{
|
||||
enum nDPIsrvd_read_return read_ret = nDPIsrvd_read(sock);
|
||||
if (errno == EINTR)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (read_ret == READ_TIMEOUT)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if (read_ret != READ_OK)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL, "nDPIsrvd-notifyd", 3000, "nDPIsrvd socket read from %s failed!\n", serv_optarg);
|
||||
break;
|
||||
}
|
||||
|
||||
enum nDPIsrvd_parse_return parse_ret = nDPIsrvd_parse_all(sock);
|
||||
if (parse_ret != PARSE_NEED_MORE_DATA)
|
||||
{
|
||||
notifyf(DBUS_CRITICAL,
|
||||
"nDPIsrvd-notifyd",
|
||||
3000,
|
||||
"Could not parse JSON message %s: %.*s\n",
|
||||
nDPIsrvd_enum_to_string(parse_ret),
|
||||
nDPIsrvd_json_buffer_length(sock),
|
||||
nDPIsrvd_json_buffer_string(sock));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
nDPIsrvd_socket_close(sock);
|
||||
notifyf(DBUS_NORMAL, "nDPIsrvd-notifyd", 3000, "Disconnected from '%s'.\n", serv_optarg);
|
||||
if (main_thread_shutdown == 0)
|
||||
{
|
||||
sleep(SLEEP_TIME_IN_S);
|
||||
}
|
||||
} while (main_thread_shutdown == 0);
|
||||
|
||||
failure:
|
||||
nDPIsrvd_socket_free(&sock);
|
||||
daemonize_shutdown(pidfile);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -58,7 +58,7 @@ static void nDPIsrvd_write_flow_info_cb(struct nDPIsrvd_socket const * sock,
|
||||
#endif
|
||||
flow->last_seen,
|
||||
flow->idle_time,
|
||||
(flow->last_seen + flow->idle_time >= thread_data->most_recent_flow_time
|
||||
(thread_data != NULL && flow->last_seen + flow->idle_time >= thread_data->most_recent_flow_time
|
||||
? flow->last_seen + flow->idle_time - thread_data->most_recent_flow_time
|
||||
: 0));
|
||||
}
|
||||
@@ -253,14 +253,20 @@ int main(int argc, char ** argv)
|
||||
enum nDPIsrvd_parse_return parse_ret = nDPIsrvd_parse_all(sock);
|
||||
if (parse_ret != PARSE_NEED_MORE_DATA)
|
||||
{
|
||||
printf("Could not parse json string: %s\n", nDPIsrvd_enum_to_string(parse_ret));
|
||||
printf("Could not parse JSON message %s: %.*s\n",
|
||||
nDPIsrvd_enum_to_string(parse_ret),
|
||||
nDPIsrvd_json_buffer_length(sock),
|
||||
nDPIsrvd_json_buffer_string(sock));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (main_thread_shutdown == 0 && read_ret != READ_OK)
|
||||
{
|
||||
printf("Parse read %s\n", nDPIsrvd_enum_to_string(read_ret));
|
||||
printf("Parse read %s at JSON: %.*s\n",
|
||||
nDPIsrvd_enum_to_string(read_ret),
|
||||
nDPIsrvd_json_buffer_length(sock),
|
||||
nDPIsrvd_json_buffer_string(sock));
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
||||
1
examples/cxx-graph
Submodule
1
examples/cxx-graph
Submodule
Submodule examples/cxx-graph added at 68eb1b105d
Submodule examples/js-rt-analyzer updated: 44a2bd0a9d...87cb7a0af5
Submodule examples/js-rt-analyzer-frontend updated: 6efa702a18...6806ef7d13
BIN
examples/ndpid_grafana_example.png
Normal file
BIN
examples/ndpid_grafana_example.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 62 KiB |
@@ -1,3 +0,0 @@
|
||||
body {
|
||||
background: black;
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
sys.path.append(os.path.dirname(sys.argv[0]) + '/../../dependencies')
|
||||
sys.path.append(os.path.dirname(sys.argv[0]) + '/../share/nDPId')
|
||||
sys.path.append(os.path.dirname(sys.argv[0]))
|
||||
sys.path.append(sys.base_prefix + '/share/nDPId')
|
||||
import nDPIsrvd
|
||||
from nDPIsrvd import nDPIsrvdSocket
|
||||
import plotly_dash
|
||||
|
||||
FLOW_RISK_SEVERE = 4
|
||||
FLOW_RISK_HIGH = 3
|
||||
FLOW_RISK_MEDIUM = 2
|
||||
FLOW_RISK_LOW = 1
|
||||
|
||||
def nDPIsrvd_worker_onFlowCleanup(instance, current_flow, global_user_data):
|
||||
_, shared_flow_dict = global_user_data
|
||||
|
||||
flow_key = current_flow.flow_key
|
||||
|
||||
shared_flow_dict['current-flows'] -= 1
|
||||
|
||||
if flow_key not in shared_flow_dict:
|
||||
return True
|
||||
|
||||
shared_flow_dict['total-l4-bytes'] += shared_flow_dict[flow_key]['total-l4-bytes']
|
||||
|
||||
if shared_flow_dict[flow_key]['is_detected'] is True:
|
||||
shared_flow_dict['current-detected-flows'] -= 1
|
||||
|
||||
if shared_flow_dict[flow_key]['is_guessed'] is True:
|
||||
shared_flow_dict['current-guessed-flows'] -= 1
|
||||
|
||||
if shared_flow_dict[flow_key]['is_not_detected'] is True:
|
||||
shared_flow_dict['current-not-detected-flows'] -= 1
|
||||
|
||||
if shared_flow_dict[flow_key]['is_midstream'] is True:
|
||||
shared_flow_dict['current-midstream-flows'] -= 1
|
||||
|
||||
if shared_flow_dict[flow_key]['is_risky'] > 0:
|
||||
shared_flow_dict['current-risky-flows'] -= 1
|
||||
|
||||
if shared_flow_dict[flow_key]['is_risky'] == FLOW_RISK_LOW:
|
||||
shared_flow_dict['current-risky-flows-low'] -= 1
|
||||
elif shared_flow_dict[flow_key]['is_risky'] == FLOW_RISK_MEDIUM:
|
||||
shared_flow_dict['current-risky-flows-medium'] -= 1
|
||||
elif shared_flow_dict[flow_key]['is_risky'] == FLOW_RISK_HIGH:
|
||||
shared_flow_dict['current-risky-flows-high'] -= 1
|
||||
elif shared_flow_dict[flow_key]['is_risky'] == FLOW_RISK_SEVERE:
|
||||
shared_flow_dict['current-risky-flows-severe'] -= 1
|
||||
|
||||
del shared_flow_dict[current_flow.flow_key]
|
||||
|
||||
return True
|
||||
|
||||
def nDPIsrvd_worker_onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
nsock, shared_flow_dict = global_user_data
|
||||
|
||||
shared_flow_dict['total-events'] += 1
|
||||
shared_flow_dict['total-json-bytes'] = nsock.received_bytes
|
||||
|
||||
if 'error_event_name' in json_dict:
|
||||
shared_flow_dict['total-base-events'] += 1
|
||||
|
||||
if 'daemon_event_name' in json_dict:
|
||||
shared_flow_dict['total-daemon-events'] += 1
|
||||
|
||||
if 'packet_event_name' in json_dict and \
|
||||
(json_dict['packet_event_name'] == 'packet' or \
|
||||
json_dict['packet_event_name'] == 'packet-flow'):
|
||||
shared_flow_dict['total-packet-events'] += 1
|
||||
|
||||
if 'flow_id' not in json_dict:
|
||||
return True
|
||||
else:
|
||||
flow_key = json_dict['alias'] + '-' + json_dict['source'] + '-' + str(json_dict['flow_id'])
|
||||
|
||||
if flow_key not in shared_flow_dict:
|
||||
current_flow.flow_key = flow_key
|
||||
shared_flow_dict[flow_key] = mgr.dict()
|
||||
shared_flow_dict[flow_key]['is_detected'] = False
|
||||
shared_flow_dict[flow_key]['is_guessed'] = False
|
||||
shared_flow_dict[flow_key]['is_not_detected'] = False
|
||||
shared_flow_dict[flow_key]['is_midstream'] = False
|
||||
shared_flow_dict[flow_key]['is_risky'] = 0
|
||||
shared_flow_dict[flow_key]['total-l4-bytes'] = 0
|
||||
|
||||
shared_flow_dict[flow_key]['json'] = mgr.dict()
|
||||
|
||||
shared_flow_dict['total-flows'] += 1
|
||||
shared_flow_dict['current-flows'] += 1
|
||||
|
||||
if current_flow.flow_key != flow_key:
|
||||
return False
|
||||
|
||||
if 'flow_src_tot_l4_payload_len' in json_dict and 'flow_dst_tot_l4_payload_len' in json_dict:
|
||||
shared_flow_dict[flow_key]['total-l4-bytes'] = json_dict['flow_src_tot_l4_payload_len'] + \
|
||||
json_dict['flow_dst_tot_l4_payload_len']
|
||||
|
||||
if 'midstream' in json_dict and json_dict['midstream'] != 0:
|
||||
if shared_flow_dict[flow_key]['is_midstream'] is False:
|
||||
shared_flow_dict['total-midstream-flows'] += 1
|
||||
shared_flow_dict['current-midstream-flows'] += 1
|
||||
shared_flow_dict[flow_key]['is_midstream'] = True
|
||||
|
||||
if 'ndpi' in json_dict:
|
||||
shared_flow_dict[flow_key]['json']['ndpi'] = json_dict['ndpi']
|
||||
|
||||
if 'flow_risk' in json_dict['ndpi']:
|
||||
if shared_flow_dict[flow_key]['is_risky'] == 0:
|
||||
shared_flow_dict['total-risky-flows'] += 1
|
||||
shared_flow_dict['current-risky-flows'] += 1
|
||||
|
||||
severity = shared_flow_dict[flow_key]['is_risky']
|
||||
if severity == FLOW_RISK_LOW:
|
||||
shared_flow_dict['current-risky-flows-low'] -= 1
|
||||
elif severity == FLOW_RISK_MEDIUM:
|
||||
shared_flow_dict['current-risky-flows-medium'] -= 1
|
||||
elif severity == FLOW_RISK_HIGH:
|
||||
shared_flow_dict['current-risky-flows-high'] -= 1
|
||||
elif severity == FLOW_RISK_SEVERE:
|
||||
shared_flow_dict['current-risky-flows-severe'] -= 1
|
||||
|
||||
for key in json_dict['ndpi']['flow_risk']:
|
||||
if json_dict['ndpi']['flow_risk'][key]['severity'] == 'Low':
|
||||
severity = max(severity, FLOW_RISK_LOW)
|
||||
elif json_dict['ndpi']['flow_risk'][key]['severity'] == 'Medium':
|
||||
severity = max(severity, FLOW_RISK_MEDIUM)
|
||||
elif json_dict['ndpi']['flow_risk'][key]['severity'] == 'High':
|
||||
severity = max(severity, FLOW_RISK_HIGH)
|
||||
elif json_dict['ndpi']['flow_risk'][key]['severity'] == 'Severe':
|
||||
severity = max(severity, FLOW_RISK_SEVERE)
|
||||
else:
|
||||
raise RuntimeError('Invalid flow risk severity: {}'.format(
|
||||
json_dict['ndpi']['flow_risk'][key]['severity']))
|
||||
|
||||
shared_flow_dict[flow_key]['is_risky'] = severity
|
||||
if severity == FLOW_RISK_LOW:
|
||||
shared_flow_dict['current-risky-flows-low'] += 1
|
||||
elif severity == FLOW_RISK_MEDIUM:
|
||||
shared_flow_dict['current-risky-flows-medium'] += 1
|
||||
elif severity == FLOW_RISK_HIGH:
|
||||
shared_flow_dict['current-risky-flows-high'] += 1
|
||||
elif severity == FLOW_RISK_SEVERE:
|
||||
shared_flow_dict['current-risky-flows-severe'] += 1
|
||||
|
||||
if 'flow_event_name' not in json_dict:
|
||||
return True
|
||||
|
||||
if json_dict['flow_state'] == 'finished' and \
|
||||
json_dict['ndpi']['proto'] != 'Unknown' and \
|
||||
shared_flow_dict[flow_key]['is_detected'] is False:
|
||||
shared_flow_dict['total-detected-flows'] += 1
|
||||
shared_flow_dict['current-detected-flows'] += 1
|
||||
shared_flow_dict[flow_key]['is_detected'] = True
|
||||
|
||||
if json_dict['flow_event_name'] == 'new':
|
||||
|
||||
shared_flow_dict['total-flow-new-events'] += 1
|
||||
|
||||
elif json_dict['flow_event_name'] == 'update':
|
||||
|
||||
shared_flow_dict['total-flow-update-events'] += 1
|
||||
|
||||
elif json_dict['flow_event_name'] == 'analyse':
|
||||
|
||||
shared_flow_dict['total-flow-analyse-events'] += 1
|
||||
|
||||
elif json_dict['flow_event_name'] == 'end':
|
||||
|
||||
shared_flow_dict['total-flow-end-events'] += 1
|
||||
|
||||
elif json_dict['flow_event_name'] == 'idle':
|
||||
|
||||
shared_flow_dict['total-flow-idle-events'] += 1
|
||||
|
||||
elif json_dict['flow_event_name'] == 'guessed':
|
||||
|
||||
shared_flow_dict['total-flow-guessed-events'] += 1
|
||||
|
||||
if shared_flow_dict[flow_key]['is_guessed'] is False:
|
||||
shared_flow_dict['total-guessed-flows'] += 1
|
||||
shared_flow_dict['current-guessed-flows'] += 1
|
||||
shared_flow_dict[flow_key]['is_guessed'] = True
|
||||
|
||||
elif json_dict['flow_event_name'] == 'not-detected':
|
||||
|
||||
shared_flow_dict['total-flow-not-detected-events'] += 1
|
||||
|
||||
if shared_flow_dict[flow_key]['is_not_detected'] is False:
|
||||
shared_flow_dict['total-not-detected-flows'] += 1
|
||||
shared_flow_dict['current-not-detected-flows'] += 1
|
||||
shared_flow_dict[flow_key]['is_not_detected'] = True
|
||||
|
||||
elif json_dict['flow_event_name'] == 'detected' or \
|
||||
json_dict['flow_event_name'] == 'detection-update':
|
||||
|
||||
if json_dict['flow_event_name'] == 'detection-update':
|
||||
shared_flow_dict['total-flow-detection-update-events'] += 1
|
||||
else:
|
||||
shared_flow_dict['total-flow-detected-events'] += 1
|
||||
|
||||
if shared_flow_dict[flow_key]['is_detected'] is False:
|
||||
shared_flow_dict['total-detected-flows'] += 1
|
||||
shared_flow_dict['current-detected-flows'] += 1
|
||||
shared_flow_dict[flow_key]['is_detected'] = True
|
||||
|
||||
if shared_flow_dict[flow_key]['is_guessed'] is True:
|
||||
shared_flow_dict['total-guessed-flows'] -= 1
|
||||
shared_flow_dict['current-guessed-flows'] -= 1
|
||||
shared_flow_dict[flow_key]['is_guessed'] = False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def nDPIsrvd_worker(address, shared_flow_dict):
|
||||
sys.stderr.write('Recv buffer size: {}\n'
|
||||
.format(nDPIsrvd.NETWORK_BUFFER_MAX_SIZE))
|
||||
sys.stderr.write('Connecting to {} ..\n'
|
||||
.format(address[0]+':'+str(address[1])
|
||||
if type(address) is tuple else address))
|
||||
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
nsock = nDPIsrvdSocket()
|
||||
nsock.connect(address)
|
||||
nsock.loop(nDPIsrvd_worker_onJsonLineRecvd,
|
||||
nDPIsrvd_worker_onFlowCleanup,
|
||||
(nsock, shared_flow_dict))
|
||||
except nDPIsrvd.SocketConnectionBroken:
|
||||
sys.stderr.write('Lost connection to {} .. reconnecting\n'
|
||||
.format(address[0]+':'+str(address[1])
|
||||
if type(address) is tuple else address))
|
||||
time.sleep(1.0)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
argparser = nDPIsrvd.defaultArgumentParser()
|
||||
argparser.add_argument('--listen-address', type=str, default='127.0.0.1', help='Plotly listen address')
|
||||
argparser.add_argument('--listen-port', type=str, default=8050, help='Plotly listen port')
|
||||
args = argparser.parse_args()
|
||||
address = nDPIsrvd.validateAddress(args)
|
||||
|
||||
mgr = multiprocessing.Manager()
|
||||
shared_flow_dict = mgr.dict()
|
||||
|
||||
shared_flow_dict['total-events'] = 0
|
||||
shared_flow_dict['total-flow-new-events'] = 0
|
||||
shared_flow_dict['total-flow-update-events'] = 0
|
||||
shared_flow_dict['total-flow-analyse-events'] = 0
|
||||
shared_flow_dict['total-flow-end-events'] = 0
|
||||
shared_flow_dict['total-flow-idle-events'] = 0
|
||||
shared_flow_dict['total-flow-detected-events'] = 0
|
||||
shared_flow_dict['total-flow-detection-update-events'] = 0
|
||||
shared_flow_dict['total-flow-guessed-events'] = 0
|
||||
shared_flow_dict['total-flow-not-detected-events'] = 0
|
||||
shared_flow_dict['total-packet-events'] = 0
|
||||
shared_flow_dict['total-base-events'] = 0
|
||||
shared_flow_dict['total-daemon-events'] = 0
|
||||
|
||||
shared_flow_dict['total-json-bytes'] = 0
|
||||
shared_flow_dict['total-l4-bytes'] = 0
|
||||
shared_flow_dict['total-flows'] = 0
|
||||
shared_flow_dict['total-detected-flows'] = 0
|
||||
shared_flow_dict['total-risky-flows'] = 0
|
||||
shared_flow_dict['total-midstream-flows'] = 0
|
||||
shared_flow_dict['total-guessed-flows'] = 0
|
||||
shared_flow_dict['total-not-detected-flows'] = 0
|
||||
|
||||
shared_flow_dict['current-flows'] = 0
|
||||
shared_flow_dict['current-detected-flows'] = 0
|
||||
shared_flow_dict['current-midstream-flows'] = 0
|
||||
shared_flow_dict['current-guessed-flows'] = 0
|
||||
shared_flow_dict['current-not-detected-flows'] = 0
|
||||
|
||||
shared_flow_dict['current-risky-flows'] = 0
|
||||
shared_flow_dict['current-risky-flows-severe'] = 0
|
||||
shared_flow_dict['current-risky-flows-high'] = 0
|
||||
shared_flow_dict['current-risky-flows-medium'] = 0
|
||||
shared_flow_dict['current-risky-flows-low'] = 0
|
||||
|
||||
nDPIsrvd_job = multiprocessing.Process(target=nDPIsrvd_worker,
|
||||
args=(address, shared_flow_dict))
|
||||
nDPIsrvd_job.start()
|
||||
|
||||
web_job = multiprocessing.Process(target=plotly_dash.web_worker,
|
||||
args=(shared_flow_dict, args.listen_address, args.listen_port))
|
||||
web_job.start()
|
||||
|
||||
nDPIsrvd_job.join()
|
||||
web_job.terminate()
|
||||
web_job.join()
|
||||
@@ -1,415 +0,0 @@
|
||||
import math
|
||||
|
||||
import dash
|
||||
|
||||
try:
|
||||
from dash import dcc
|
||||
except ImportError:
|
||||
import dash_core_components as dcc
|
||||
|
||||
try:
|
||||
from dash import html
|
||||
except ImportError:
|
||||
import dash_html_components as html
|
||||
|
||||
try:
|
||||
from dash import dash_table as dt
|
||||
except ImportError:
|
||||
import dash_table as dt
|
||||
|
||||
from dash.dependencies import Input, Output, State
|
||||
|
||||
import dash_daq as daq
|
||||
|
||||
import plotly.graph_objects as go
|
||||
|
||||
global shared_flow_dict
|
||||
|
||||
app = dash.Dash(__name__)
|
||||
|
||||
def generate_box():
|
||||
return {
|
||||
'display': 'flex', 'flex-direction': 'row',
|
||||
'background-color': '#082255'
|
||||
}
|
||||
|
||||
def generate_led_display(div_id, label_name):
|
||||
return daq.LEDDisplay(
|
||||
id=div_id,
|
||||
label={'label': label_name, 'style': {'color': '#C4CDD5'}},
|
||||
labelPosition='bottom',
|
||||
value='0',
|
||||
backgroundColor='#082255',
|
||||
color='#C4CDD5',
|
||||
)
|
||||
|
||||
def generate_gauge(div_id, label_name, max_value=10):
|
||||
return daq.Gauge(
|
||||
id=div_id,
|
||||
value=0,
|
||||
label={'label': label_name, 'style': {'color': '#C4CDD5'}},
|
||||
max=max_value,
|
||||
min=0,
|
||||
)
|
||||
|
||||
def build_gauge(key, max_value=100):
|
||||
gauge_max = int(max(max_value,
|
||||
shared_flow_dict[key]))
|
||||
grad_green = [0, int(gauge_max * 1/3)]
|
||||
grad_yellow = [int(gauge_max * 1/3), int(gauge_max * 2/3)]
|
||||
grad_red = [int(gauge_max * 2/3), gauge_max]
|
||||
|
||||
grad_dict = {
|
||||
"gradient":True,
|
||||
"ranges":{
|
||||
"green":grad_green,
|
||||
"yellow":grad_yellow,
|
||||
"red":grad_red
|
||||
}
|
||||
}
|
||||
|
||||
return shared_flow_dict[key], gauge_max, grad_dict
|
||||
|
||||
def build_piechart(labels, values, color_map=None):
|
||||
lay = dict(
|
||||
plot_bgcolor = '#082255',
|
||||
paper_bgcolor = '#082255',
|
||||
font={"color": "#fff"},
|
||||
uirevision=True,
|
||||
autosize=True,
|
||||
height=250,
|
||||
margin = {'autoexpand': True, 'b': 0, 'l': 0, 'r': 0, 't': 0, 'pad': 0},
|
||||
width = 500,
|
||||
uniformtext_minsize = 12,
|
||||
uniformtext_mode = 'hide',
|
||||
)
|
||||
|
||||
return go.Figure(layout=lay, data=[go.Pie(labels=labels, values=values, sort=False, marker_colors=color_map, textinfo='percent', textposition='inside')])
|
||||
|
||||
COLOR_MAP = {
|
||||
'piechart-flows': ['rgb(153, 153, 255)', 'rgb(153, 204, 255)', 'rgb(255, 204, 153)', 'rgb(255, 255, 255)'],
|
||||
'piechart-midstream-flows': ['rgb(255, 255, 153)', 'rgb(153, 153, 255)'],
|
||||
'piechart-risky-flows': ['rgb(255, 0, 0)', 'rgb(255, 128, 0)', 'rgb(255, 255, 0)', 'rgb(128, 255, 0)', 'rgb(153, 153, 255)'],
|
||||
'graph-flows': {'Current Active Flows': {'color': 'rgb(153, 153, 255)', 'width': 1},
|
||||
'Current Risky Flows': {'color': 'rgb(255, 153, 153)', 'width': 3},
|
||||
'Current Midstream Flows': {'color': 'rgb(255, 255, 153)', 'width': 3},
|
||||
'Current Guessed Flows': {'color': 'rgb(153, 204, 255)', 'width': 1},
|
||||
'Current Not-Detected Flows': {'color': 'rgb(255, 204, 153)', 'width': 1},
|
||||
'Current Unclassified Flows': {'color': 'rgb(255, 255, 255)', 'width': 1},
|
||||
},
|
||||
}
|
||||
|
||||
def generate_tab_flow():
|
||||
return html.Div([
|
||||
html.Div(children=[
|
||||
dcc.Interval(id="tab-flow-default-interval", interval=1 * 2000, n_intervals=0),
|
||||
|
||||
html.Div(children=[
|
||||
|
||||
dt.DataTable(
|
||||
id='table-info',
|
||||
columns=[{'id': c.lower(), 'name': c, 'editable': False}
|
||||
for c in ['Name', 'Total']],
|
||||
style_header={
|
||||
'backgroundColor': '#082233',
|
||||
'color': 'white'
|
||||
},
|
||||
style_data={
|
||||
'backgroundColor': '#082244',
|
||||
'color': 'white'
|
||||
},
|
||||
)
|
||||
|
||||
], style={'display': 'flex', 'flex-direction': 'row'}),
|
||||
|
||||
html.Div(children=[
|
||||
dcc.Graph(
|
||||
id='piechart-flows',
|
||||
config={
|
||||
'displayModeBar': False,
|
||||
},
|
||||
figure=build_piechart(['Detected', 'Guessed', 'Not-Detected', 'Unclassified'],
|
||||
[0, 0, 0, 0], COLOR_MAP['piechart-flows']),
|
||||
),
|
||||
], style={'padding': 10, 'flex': 1}),
|
||||
|
||||
html.Div(children=[
|
||||
dcc.Graph(
|
||||
id='piechart-midstream-flows',
|
||||
config={
|
||||
'displayModeBar': False,
|
||||
},
|
||||
figure=build_piechart(['Midstream', 'Not Midstream'],
|
||||
[0, 0], COLOR_MAP['piechart-midstream-flows']),
|
||||
),
|
||||
], style={'padding': 10, 'flex': 1}),
|
||||
|
||||
html.Div(children=[
|
||||
dcc.Graph(
|
||||
id='piechart-risky-flows',
|
||||
config={
|
||||
'displayModeBar': False,
|
||||
},
|
||||
figure=build_piechart(['Severy Risk', 'High Risk', 'Medium Risk', 'Low Risk', 'No Risk'],
|
||||
[0, 0], COLOR_MAP['piechart-risky-flows']),
|
||||
),
|
||||
], style={'padding': 10, 'flex': 1}),
|
||||
], style=generate_box()),
|
||||
|
||||
html.Div(children=[
|
||||
dcc.Interval(id="tab-flow-graph-interval", interval=4 * 1000, n_intervals=0),
|
||||
dcc.Store(id="graph-traces"),
|
||||
|
||||
html.Div(children=[
|
||||
dcc.Graph(
|
||||
id="graph-flows",
|
||||
config={
|
||||
'displayModeBar': True,
|
||||
'displaylogo': False,
|
||||
},
|
||||
style={'height':'60vh'},
|
||||
),
|
||||
], style={'padding': 10, 'flex': 1})
|
||||
], style=generate_box())
|
||||
])
|
||||
|
||||
def generate_tab_other():
|
||||
return html.Div([
|
||||
html.Div(children=[
|
||||
dcc.Interval(id="tab-other-default-interval", interval=1 * 2000, n_intervals=0),
|
||||
|
||||
html.Div(children=[
|
||||
dcc.Graph(
|
||||
id='piechart-events',
|
||||
config={
|
||||
'displayModeBar': False,
|
||||
},
|
||||
),
|
||||
], style={'padding': 10, 'flex': 1}),
|
||||
], style=generate_box())
|
||||
])
|
||||
|
||||
TABS_STYLES = {
|
||||
'height': '34px'
|
||||
}
|
||||
TAB_STYLE = {
|
||||
'borderBottom': '1px solid #d6d6d6',
|
||||
'backgroundColor': '#385285',
|
||||
'padding': '6px',
|
||||
'fontWeight': 'bold',
|
||||
}
|
||||
TAB_SELECTED_STYLE = {
|
||||
'borderTop': '1px solid #d6d6d6',
|
||||
'borderBottom': '1px solid #d6d6d6',
|
||||
'backgroundColor': '#119DFF',
|
||||
'color': 'white',
|
||||
'padding': '6px'
|
||||
}
|
||||
|
||||
app.layout = html.Div([
|
||||
dcc.Tabs(id="tabs-flow-dash", value="tab-flows", children=[
|
||||
dcc.Tab(label="Flow", value="tab-flows", style=TAB_STYLE,
|
||||
selected_style=TAB_SELECTED_STYLE,
|
||||
children=generate_tab_flow()),
|
||||
dcc.Tab(label="Other", value="tab-other", style=TAB_STYLE,
|
||||
selected_style=TAB_SELECTED_STYLE,
|
||||
children=generate_tab_other()),
|
||||
], style=TABS_STYLES),
|
||||
html.Div(id="tabs-content")
|
||||
])
|
||||
|
||||
def prettifyBytes(bytes_received):
|
||||
size_names = ['B', 'KB', 'MB', 'GB', 'TB']
|
||||
if bytes_received == 0:
|
||||
i = 0
|
||||
else:
|
||||
i = min(int(math.floor(math.log(bytes_received, 1024))), len(size_names) - 1)
|
||||
p = math.pow(1024, i)
|
||||
s = round(bytes_received / p, 2)
|
||||
return '{:.2f} {}'.format(s, size_names[i])
|
||||
|
||||
@app.callback(output=[Output('table-info', 'data'),
|
||||
Output('piechart-flows', 'figure'),
|
||||
Output('piechart-midstream-flows', 'figure'),
|
||||
Output('piechart-risky-flows', 'figure')],
|
||||
|
||||
inputs=[Input('tab-flow-default-interval', 'n_intervals')])
|
||||
def tab_flow_update_components(n):
|
||||
return [[{'name': 'JSON Events', 'total': shared_flow_dict['total-events']},
|
||||
{'name': 'JSON Bytes', 'total': prettifyBytes(shared_flow_dict['total-json-bytes'])},
|
||||
{'name': 'Layer4 Bytes', 'total': prettifyBytes(shared_flow_dict['total-l4-bytes'])},
|
||||
{'name': 'Flows', 'total': shared_flow_dict['total-flows']},
|
||||
{'name': 'Risky Flows', 'total': shared_flow_dict['total-risky-flows']},
|
||||
{'name': 'Midstream Flows', 'total': shared_flow_dict['total-midstream-flows']},
|
||||
{'name': 'Guessed Flows', 'total': shared_flow_dict['total-guessed-flows']},
|
||||
{'name': 'Not Detected Flows', 'total': shared_flow_dict['total-not-detected-flows']}],
|
||||
build_piechart(['Detected', 'Guessed', 'Not-Detected', 'Unclassified'],
|
||||
[shared_flow_dict['current-detected-flows'],
|
||||
shared_flow_dict['current-guessed-flows'],
|
||||
shared_flow_dict['current-not-detected-flows'],
|
||||
shared_flow_dict['current-flows']
|
||||
- shared_flow_dict['current-detected-flows']
|
||||
- shared_flow_dict['current-guessed-flows']
|
||||
- shared_flow_dict['current-not-detected-flows']],
|
||||
COLOR_MAP['piechart-flows']),
|
||||
build_piechart(['Midstream', 'Not Midstream'],
|
||||
[shared_flow_dict['current-midstream-flows'],
|
||||
shared_flow_dict['current-flows'] -
|
||||
shared_flow_dict['current-midstream-flows']],
|
||||
COLOR_MAP['piechart-midstream-flows']),
|
||||
build_piechart(['Severe', 'High', 'Medium', 'Low', 'No Risk'],
|
||||
[shared_flow_dict['current-risky-flows-severe'],
|
||||
shared_flow_dict['current-risky-flows-high'],
|
||||
shared_flow_dict['current-risky-flows-medium'],
|
||||
shared_flow_dict['current-risky-flows-low'],
|
||||
shared_flow_dict['current-flows'] -
|
||||
shared_flow_dict['current-risky-flows']],
|
||||
COLOR_MAP['piechart-risky-flows'])]
|
||||
|
||||
@app.callback(output=[Output('graph-flows', 'figure'),
|
||||
Output('graph-traces', 'data')],
|
||||
inputs=[Input('tab-flow-graph-interval', 'n_intervals'),
|
||||
Input('tab-flow-graph-interval', 'interval')],
|
||||
state=[State('graph-traces', 'data')])
|
||||
def tab_flow_update_graph(n, i, traces):
|
||||
if traces is None:
|
||||
traces = ([], [], [], [], [], [])
|
||||
|
||||
max_bins = 75
|
||||
|
||||
traces[0].append(shared_flow_dict['current-flows'])
|
||||
traces[1].append(shared_flow_dict['current-risky-flows'])
|
||||
traces[2].append(shared_flow_dict['current-midstream-flows'])
|
||||
traces[3].append(shared_flow_dict['current-guessed-flows'])
|
||||
traces[4].append(shared_flow_dict['current-not-detected-flows'])
|
||||
traces[5].append(shared_flow_dict['current-flows']
|
||||
- shared_flow_dict['current-detected-flows']
|
||||
- shared_flow_dict['current-guessed-flows']
|
||||
- shared_flow_dict['current-not-detected-flows'])
|
||||
if len(traces[0]) > max_bins:
|
||||
traces[0] = traces[0][1:]
|
||||
traces[1] = traces[1][1:]
|
||||
traces[2] = traces[2][1:]
|
||||
traces[3] = traces[3][1:]
|
||||
traces[4] = traces[4][1:]
|
||||
traces[5] = traces[5][1:]
|
||||
|
||||
i /= 1000.0
|
||||
x = list(range(max(n - max_bins, 0) * int(i), n * int(i), max(int(i), 0)))
|
||||
if len(x) > 0 and x[0] > 60:
|
||||
x = [round(t / 60, 2) for t in x]
|
||||
x_div = 60
|
||||
x_axis_title = 'Time (min)'
|
||||
else:
|
||||
x_div = 1
|
||||
x_axis_title = 'Time (sec)'
|
||||
min_x = max(0, x[0] if len(x) >= max_bins else 0)
|
||||
max_x = max((max_bins * i) / x_div, x[max_bins - 1] if len(x) >= max_bins else 0)
|
||||
|
||||
lay = dict(
|
||||
plot_bgcolor = '#082255',
|
||||
paper_bgcolor = '#082255',
|
||||
font={"color": "#fff"},
|
||||
xaxis = {
|
||||
'title': x_axis_title,
|
||||
"showgrid": False,
|
||||
"showline": False,
|
||||
"fixedrange": True,
|
||||
"tickmode": 'linear',
|
||||
"tick0": round(max_bins / x_div, 2),
|
||||
"dtick": round(max_bins / x_div, 2),
|
||||
},
|
||||
yaxis = {
|
||||
'title': 'Flow Count',
|
||||
"showgrid": False,
|
||||
"showline": False,
|
||||
"zeroline": False,
|
||||
"fixedrange": True,
|
||||
"tickmode": 'linear',
|
||||
"dtick": 10,
|
||||
},
|
||||
uirevision=True,
|
||||
autosize=True,
|
||||
bargap=0.01,
|
||||
bargroupgap=0,
|
||||
hovermode="closest",
|
||||
margin = {'b': 0, 'l': 0, 'r': 0, 't': 30, 'pad': 0},
|
||||
legend = {'borderwidth': 0},
|
||||
)
|
||||
|
||||
fig = go.Figure(layout=lay)
|
||||
fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='#004D80', zeroline=True, zerolinewidth=1, range=[min_x, max_x])
|
||||
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='#004D80', zeroline=True, zerolinewidth=1)
|
||||
fig.add_trace(go.Scatter(
|
||||
x=x,
|
||||
y=traces[0],
|
||||
name='Current Active Flows',
|
||||
mode='lines+markers',
|
||||
line=COLOR_MAP['graph-flows']['Current Active Flows'],
|
||||
))
|
||||
fig.add_trace(go.Scatter(
|
||||
x=x,
|
||||
y=traces[1],
|
||||
name='Current Risky Flows',
|
||||
mode='lines+markers',
|
||||
line=COLOR_MAP['graph-flows']['Current Risky Flows'],
|
||||
))
|
||||
fig.add_trace(go.Scatter(
|
||||
x=x,
|
||||
y=traces[2],
|
||||
name='Current Midstream Flows',
|
||||
mode='lines+markers',
|
||||
line=COLOR_MAP['graph-flows']['Current Midstream Flows'],
|
||||
))
|
||||
fig.add_trace(go.Scatter(
|
||||
x=x,
|
||||
y=traces[3],
|
||||
name='Current Guessed Flows',
|
||||
mode='lines+markers',
|
||||
line=COLOR_MAP['graph-flows']['Current Guessed Flows'],
|
||||
))
|
||||
fig.add_trace(go.Scatter(
|
||||
x=x,
|
||||
y=traces[4],
|
||||
name='Current Not-Detected Flows',
|
||||
mode='lines+markers',
|
||||
line=COLOR_MAP['graph-flows']['Current Not-Detected Flows'],
|
||||
))
|
||||
fig.add_trace(go.Scatter(
|
||||
x=x,
|
||||
y=traces[5],
|
||||
name='Current Unclassified Flows',
|
||||
mode='lines+markers',
|
||||
line=COLOR_MAP['graph-flows']['Current Unclassified Flows'],
|
||||
))
|
||||
|
||||
return [fig, traces]
|
||||
|
||||
@app.callback(output=[Output('piechart-events', 'figure')],
|
||||
inputs=[Input('tab-other-default-interval', 'n_intervals')])
|
||||
def tab_other_update_components(n):
|
||||
return [build_piechart(['Base', 'Daemon', 'Packet',
|
||||
'Flow New', 'Flow Update', 'Flow Analyse', 'Flow End', 'Flow Idle',
|
||||
'Flow Detection', 'Flow Detection-Updates', 'Flow Guessed', 'Flow Not-Detected'],
|
||||
[shared_flow_dict['total-base-events'],
|
||||
shared_flow_dict['total-daemon-events'],
|
||||
shared_flow_dict['total-packet-events'],
|
||||
shared_flow_dict['total-flow-new-events'],
|
||||
shared_flow_dict['total-flow-update-events'],
|
||||
shared_flow_dict['total-flow-analyse-events'],
|
||||
shared_flow_dict['total-flow-end-events'],
|
||||
shared_flow_dict['total-flow-idle-events'],
|
||||
shared_flow_dict['total-flow-detected-events'],
|
||||
shared_flow_dict['total-flow-detection-update-events'],
|
||||
shared_flow_dict['total-flow-guessed-events'],
|
||||
shared_flow_dict['total-flow-not-detected-events']])]
|
||||
|
||||
def web_worker(mp_shared_flow_dict, listen_host, listen_port):
|
||||
global shared_flow_dict
|
||||
|
||||
shared_flow_dict = mp_shared_flow_dict
|
||||
|
||||
try:
|
||||
app.run_server(debug=False, host=listen_host, port=listen_port)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
@@ -1,3 +0,0 @@
|
||||
dash
|
||||
dash_daq
|
||||
Werkzeug==2.0
|
||||
@@ -120,26 +120,51 @@ class Stats:
|
||||
flow_count += 1
|
||||
current_flow = instances[alias][source].flows[flow_id]
|
||||
|
||||
flow_tot_l4_payload_len += current_flow.flow_src_tot_l4_payload_len + current_flow.flow_dst_tot_l4_payload_len
|
||||
risky += 1 if len(current_flow.flow_risk) > 0 else 0
|
||||
midstream += 1 if current_flow.midstream != 0 else 0
|
||||
guessed += 1 if current_flow.guessed != 0 else 0
|
||||
not_detected = 1 if current_flow.not_detected != 0 else 0
|
||||
try:
|
||||
flow_src_tot_l4_payload_len = current_flow.flow_src_tot_l4_payload_len
|
||||
flow_dst_tot_l4_payload_len = current_flow.flow_dst_tot_l4_payload_len
|
||||
flow_risk = current_flow.flow_risk
|
||||
midstream = current_flow.midstream
|
||||
guessed = current_flow.guessed
|
||||
not_detected = current_flow.not_detected
|
||||
except AttributeError:
|
||||
flow_src_tot_l4_payload_len = 0
|
||||
flow_dst_tot_l4_payload_len = 0
|
||||
flow_risk = []
|
||||
midstream = 0
|
||||
guessed = 0
|
||||
not_detected = 0
|
||||
|
||||
flow_tot_l4_payload_len += flow_src_tot_l4_payload_len + flow_dst_tot_l4_payload_len
|
||||
risky += 1 if len(flow_risk) > 0 else 0
|
||||
midstream += 1 if midstream != 0 else 0
|
||||
guessed += 1 if guessed != 0 else 0
|
||||
not_detected = 1 if not_detected != 0 else 0
|
||||
|
||||
return alias_count, source_count, flow_count, \
|
||||
flow_tot_l4_payload_len, \
|
||||
risky, midstream, guessed, not_detected
|
||||
|
||||
@staticmethod
|
||||
def prettifyBytes(bytes_received):
|
||||
size_names = ['B', 'KB', 'MB', 'GB', 'TB']
|
||||
def prettifyBytes(bytes_received, is_byte_unit = True):
|
||||
if not is_byte_unit:
|
||||
size_names = ['', 'K', 'M', 'G', 'T']
|
||||
divisor = 1000
|
||||
else:
|
||||
size_names = ['B', 'KiB', 'MiB', 'GiB', 'TiB']
|
||||
divisor = 1024
|
||||
|
||||
if bytes_received == 0:
|
||||
i = 0
|
||||
else:
|
||||
i = min(int(math.floor(math.log(bytes_received, 1024))), len(size_names) - 1)
|
||||
p = math.pow(1024, i)
|
||||
i = min(int(math.floor(math.log(bytes_received, divisor))), len(size_names) - 1)
|
||||
p = math.pow(divisor, i)
|
||||
s = round(bytes_received / p, 2)
|
||||
return '{:.2f} {}'.format(s, size_names[i])
|
||||
|
||||
if not is_byte_unit:
|
||||
return '{:.0f}{}'.format(s, ' ' + size_names[i] if len(size_names[i]) > 0 else size_names[i])
|
||||
else:
|
||||
return '{:.2f} {}'.format(s, size_names[i])
|
||||
|
||||
def resetStatus(self):
|
||||
if self.statusbar_enabled is False:
|
||||
@@ -232,9 +257,19 @@ def onFlowCleanup(instance, current_flow, global_user_data):
|
||||
|
||||
return True
|
||||
|
||||
def limitFloatValue(value, fmt, limit):
|
||||
if float(value) < float(limit) and float(value) > 0.0:
|
||||
return '<' + str(fmt).format(limit)
|
||||
else:
|
||||
return ' ' + str(fmt).format(value)
|
||||
|
||||
def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
stats = global_user_data
|
||||
stats.update(json_dict, current_flow)
|
||||
|
||||
if 'packet_event_id' in json_dict:
|
||||
return True
|
||||
|
||||
stats.resetStatus()
|
||||
|
||||
instance_and_source = ''
|
||||
@@ -311,6 +346,8 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
next_lines = []
|
||||
|
||||
if 'ndpi' in json_dict:
|
||||
ndpi_proto_categ_breed += ' '
|
||||
|
||||
if 'proto' in json_dict['ndpi']:
|
||||
if args.ignore_protocol is not None:
|
||||
for proto in args.ignore_protocol:
|
||||
@@ -343,7 +380,7 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
return True
|
||||
ndpi_proto_categ_breed += '[' + str(json_dict['ndpi']['breed']) + ']'
|
||||
|
||||
if 'flow_risk' in json_dict['ndpi']:
|
||||
if 'flow_risk' in json_dict['ndpi'] and args.hide_risk_info == False:
|
||||
severity = 0
|
||||
cnt = 0
|
||||
|
||||
@@ -371,7 +408,10 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
else:
|
||||
color = ''
|
||||
|
||||
next_lines[0] = '{}{}{}: {}'.format(color, 'RISK', TermColor.END, next_lines[0][:-2])
|
||||
if severity >= args.min_risk_severity:
|
||||
next_lines[0] = '{}{}{}: {}'.format(color, 'RISK', TermColor.END, next_lines[0][:-2])
|
||||
else:
|
||||
del next_lines[0]
|
||||
|
||||
line_suffix = ''
|
||||
flow_event_name = ''
|
||||
@@ -386,24 +426,31 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
flow_event_name += '{}{:>16}{}'.format(TermColor.WARNING,
|
||||
json_dict['flow_event_name'], TermColor.END)
|
||||
if args.print_analyse_results is True:
|
||||
next_lines = [' {:>9}|{:>9}|{:>9}|{:>9}|{:>15}|{:>8}'.format(
|
||||
next_lines = [' {:>10}|{:>10}|{:>10}|{:>10}|{:>17}|{:>9}'.format(
|
||||
'min', 'max', 'avg', 'stddev', 'variance', 'entropy')]
|
||||
next_lines += ['[IAT.........: {:>9.3f}|{:>9.3f}|{:>9.3f}|{:>9.3f}|{:>15.3f}|{:>8.3f}]'.format(
|
||||
nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['min']),
|
||||
nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['max']),
|
||||
nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['avg']),
|
||||
nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['stddev']),
|
||||
nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['var']),
|
||||
json_dict['data_analysis']['iat']['ent']
|
||||
next_lines += ['[IAT.........: {}|{}|{}|{}|{}|{}]'.format(
|
||||
limitFloatValue(nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['min']),
|
||||
'{:>9.3f}', 0.001),
|
||||
limitFloatValue(nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['max']),
|
||||
'{:>9.3f}', 0.001),
|
||||
limitFloatValue(nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['avg']),
|
||||
'{:>9.3f}', 0.001),
|
||||
limitFloatValue(nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['stddev']),
|
||||
'{:>9.3f}', 0.001),
|
||||
limitFloatValue(nDPIsrvd.toSeconds(json_dict['data_analysis']['iat']['var']),
|
||||
'{:>16.3f}', 0.001),
|
||||
limitFloatValue(json_dict['data_analysis']['iat']['ent'],
|
||||
'{:>8.3f}', 0.001)
|
||||
)]
|
||||
next_lines += ['']
|
||||
next_lines[-1] += '[PKTLEN......: {:>9.3f}|{:>9.3f}|{:>9.3f}|{:>9.3f}|{:>15.3f}|{:>8.3f}]'.format(
|
||||
json_dict['data_analysis']['pktlen']['min'],
|
||||
json_dict['data_analysis']['pktlen']['max'],
|
||||
json_dict['data_analysis']['pktlen']['avg'],
|
||||
json_dict['data_analysis']['pktlen']['stddev'],
|
||||
json_dict['data_analysis']['pktlen']['var'],
|
||||
json_dict['data_analysis']['pktlen']['ent']
|
||||
next_lines[-1] += '[PKTLEN......: {}|{}|{}|{}|{}|{}]'.format(
|
||||
limitFloatValue(json_dict['data_analysis']['pktlen']['min'], '{:>9.3f}', 0.001),
|
||||
limitFloatValue(json_dict['data_analysis']['pktlen']['max'], '{:>9.3f}', 0.001),
|
||||
limitFloatValue(json_dict['data_analysis']['pktlen']['avg'], '{:>9.3f}', 0.001),
|
||||
limitFloatValue(json_dict['data_analysis']['pktlen']['stddev'],
|
||||
'{:>9.3f}', 0.001),
|
||||
limitFloatValue(json_dict['data_analysis']['pktlen']['var'], '{:>16.3f}', 0.001),
|
||||
limitFloatValue(json_dict['data_analysis']['pktlen']['ent'], '{:>8.3f}', 0.001)
|
||||
)
|
||||
next_lines += ['']
|
||||
next_lines[-1] += '[BINS(c->s)..: {}]'.format(','.join([str(n) for n in json_dict['data_analysis']['bins']['c_to_s']]))
|
||||
@@ -429,11 +476,11 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
if json_dict['flow_event_name'] == 'new':
|
||||
line_suffix = ''
|
||||
if json_dict['midstream'] != 0:
|
||||
line_suffix += '[{}] '.format(TermColor.WARNING + TermColor.BLINK + 'MIDSTREAM' + TermColor.END)
|
||||
line_suffix += ' [{}]'.format(TermColor.WARNING + TermColor.BLINK + 'MIDSTREAM' + TermColor.END)
|
||||
if args.ipwhois is True:
|
||||
src_whois = whois(json_dict['src_ip'].lower())
|
||||
dst_whois = whois(json_dict['dst_ip'].lower())
|
||||
line_suffix += '['
|
||||
line_suffix += ' ['
|
||||
if src_whois is not None:
|
||||
line_suffix += '{}'.format(src_whois)
|
||||
if dst_whois is not None:
|
||||
@@ -454,17 +501,45 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
if args.print_hostname is True:
|
||||
line_suffix += '[{}]'.format(json_dict['ndpi']['hostname'])
|
||||
|
||||
if args.skip_empty is True:
|
||||
if json_dict['flow_src_tot_l4_payload_len'] == 0 or json_dict['flow_dst_tot_l4_payload_len'] == 0:
|
||||
stats.printStatus()
|
||||
return True
|
||||
|
||||
if args.print_bytes is True:
|
||||
src_color = ''
|
||||
dst_color = ''
|
||||
tot_color = ''
|
||||
if json_dict['flow_src_tot_l4_payload_len'] >= 1 * 1024 * 1024:
|
||||
tot_color = src_color = TermColor.HINT
|
||||
if json_dict['flow_src_tot_l4_payload_len'] >= 1 * 1024 * 1024 * 1024:
|
||||
src_color += TermColor.BOLD + TermColor.BLINK
|
||||
if json_dict['flow_dst_tot_l4_payload_len'] >= 1 * 1024 * 1024:
|
||||
tot_color = dst_color = TermColor.HINT
|
||||
if json_dict['flow_dst_tot_l4_payload_len'] >= 1 * 1024 * 1024 * 1024:
|
||||
dst_color += TermColor.BOLD + TermColor.BLINK
|
||||
line_suffix += '[' + src_color + Stats.prettifyBytes(json_dict['flow_src_tot_l4_payload_len']) + TermColor.END + ']' \
|
||||
'[' + dst_color + Stats.prettifyBytes(json_dict['flow_dst_tot_l4_payload_len']) + TermColor.END +']' \
|
||||
'[' + tot_color + Stats.prettifyBytes(json_dict['flow_src_tot_l4_payload_len'] + \
|
||||
json_dict['flow_dst_tot_l4_payload_len']) + TermColor.END + ']'
|
||||
|
||||
if args.print_packets is True:
|
||||
line_suffix += '[' + Stats.prettifyBytes(json_dict['flow_src_packets_processed'], False) + ']' \
|
||||
'[' + Stats.prettifyBytes(json_dict['flow_dst_packets_processed'], False) + ']'
|
||||
|
||||
if json_dict['l3_proto'] == 'ip4':
|
||||
print('{}{}{}{}{}: [{:.>6}] [{}][{:.>5}] [{:.>15}]{} -> [{:.>15}]{} {}{}' \
|
||||
print('{}{}{}{}{}: [{:.>6}]{} [{}][{:.>5}] [{:.>15}]{} -> [{:.>15}]{}{}{}' \
|
||||
''.format(timestamp, first_seen, last_seen, instance_and_source, flow_event_name,
|
||||
json_dict['flow_id'], json_dict['l3_proto'], json_dict['l4_proto'],
|
||||
json_dict['flow_id'],
|
||||
'[{:.>4}]'.format(json_dict['vlan_id']) if 'vlan_id' in json_dict else '',
|
||||
json_dict['l3_proto'], json_dict['l4_proto'],
|
||||
json_dict['src_ip'].lower(),
|
||||
'[{:.>5}]'.format(json_dict['src_port']) if 'src_port' in json_dict else '',
|
||||
json_dict['dst_ip'].lower(),
|
||||
'[{:.>5}]'.format(json_dict['dst_port']) if 'dst_port' in json_dict else '',
|
||||
ndpi_proto_categ_breed, line_suffix))
|
||||
elif json_dict['l3_proto'] == 'ip6':
|
||||
print('{}{}{}{}{}: [{:.>6}] [{}][{:.>5}] [{:.>39}]{} -> [{:.>39}]{} {}{}' \
|
||||
print('{}{}{}{}{}: [{:.>6}] [{}][{:.>5}] [{:.>39}]{} -> [{:.>39}]{}{}{}' \
|
||||
''.format(timestamp, first_seen, last_seen, instance_and_source, flow_event_name,
|
||||
json_dict['flow_id'], json_dict['l3_proto'], json_dict['l4_proto'],
|
||||
json_dict['src_ip'].lower(),
|
||||
@@ -484,19 +559,29 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
return True
|
||||
|
||||
if __name__ == '__main__':
|
||||
argparser = nDPIsrvd.defaultArgumentParser('Prettify and print events using the nDPIsrvd Python interface.')
|
||||
argparser = nDPIsrvd.defaultArgumentParser('Prettify and print events using the nDPIsrvd Python interface.', True)
|
||||
argparser.add_argument('--no-color', action='store_true', default=False,
|
||||
help='Disable all terminal colors.')
|
||||
argparser.add_argument('--no-blink', action='store_true', default=False,
|
||||
help='Disable all blink effects.')
|
||||
argparser.add_argument('--no-statusbar', action='store_true', default=False,
|
||||
help='Disable informational status bar.')
|
||||
argparser.add_argument('--hide-instance-info', action='store_true', default=False,
|
||||
help='Hide instance Alias/Source prefixed every line.')
|
||||
argparser.add_argument('--hide-risk-info', action='store_true', default=False,
|
||||
help='Skip printing risks.')
|
||||
argparser.add_argument('--print-timestamp', action='store_true', default=False,
|
||||
help='Print received event timestamps.')
|
||||
argparser.add_argument('--print-first-seen', action='store_true', default=False,
|
||||
help='Print first seen flow time diff.')
|
||||
argparser.add_argument('--print-last-seen', action='store_true', default=False,
|
||||
help='Print last seen flow time diff.')
|
||||
argparser.add_argument('--print-bytes', action='store_true', default=False,
|
||||
help='Print received/transmitted source/dest bytes for every flow.')
|
||||
argparser.add_argument('--print-packets', action='store_true', default=False,
|
||||
help='Print received/transmitted source/dest packets for every flow.')
|
||||
argparser.add_argument('--skip-empty', action='store_true', default=False,
|
||||
help='Do not print flows that did not carry any layer7 payload.')
|
||||
argparser.add_argument('--guessed', action='store_true', default=False, help='Print only guessed flow events.')
|
||||
argparser.add_argument('--not-detected', action='store_true', default=False, help='Print only undetected flow events.')
|
||||
argparser.add_argument('--detected', action='store_true', default=False, help='Print only detected flow events.')
|
||||
@@ -518,11 +603,15 @@ if __name__ == '__main__':
|
||||
argparser.add_argument('--ignore-category', action='append', help='Ignore printing lines with a certain category.')
|
||||
argparser.add_argument('--ignore-breed', action='append', help='Ignore printing lines with a certain breed.')
|
||||
argparser.add_argument('--ignore-hostname', action='append', help='Ignore printing lines with a certain hostname.')
|
||||
argparser.add_argument('--min-risk-severity', action='store', type=int, default=0, help='Print only risks with a risk severity greater or equal to the given argument')
|
||||
args = argparser.parse_args()
|
||||
|
||||
if args.no_color is True:
|
||||
TermColor.disableColor()
|
||||
|
||||
if args.no_blink is True:
|
||||
TermColor.disableBlink()
|
||||
|
||||
if args.ipwhois is True:
|
||||
import dns, ipwhois
|
||||
whois_db = dict()
|
||||
@@ -538,6 +627,7 @@ if __name__ == '__main__':
|
||||
sys.stderr.write('Connecting to {} ..\n'.format(address[0]+':'+str(address[1]) if type(address) is tuple else address))
|
||||
|
||||
nsock = nDPIsrvdSocket()
|
||||
nDPIsrvd.prepareJsonFilter(args, nsock)
|
||||
nsock.connect(address)
|
||||
nsock.timeout(1.0)
|
||||
stats = Stats(nsock)
|
||||
|
||||
@@ -15,7 +15,7 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
return True
|
||||
|
||||
if __name__ == '__main__':
|
||||
argparser = nDPIsrvd.defaultArgumentParser()
|
||||
argparser = nDPIsrvd.defaultArgumentParser('Plain and simple nDPIsrvd JSON event printer with filter capabilities.', True)
|
||||
args = argparser.parse_args()
|
||||
address = nDPIsrvd.validateAddress(args)
|
||||
|
||||
@@ -23,5 +23,6 @@ if __name__ == '__main__':
|
||||
sys.stderr.write('Connecting to {} ..\n'.format(address[0]+':'+str(address[1]) if type(address) is tuple else address))
|
||||
|
||||
nsock = nDPIsrvdSocket()
|
||||
nDPIsrvd.prepareJsonFilter(args, nsock)
|
||||
nsock.connect(address)
|
||||
nsock.loop(onJsonLineRecvd, None, None)
|
||||
|
||||
384
examples/py-machine-learning/keras-autoencoder.py
Executable file
384
examples/py-machine-learning/keras-autoencoder.py
Executable file
@@ -0,0 +1,384 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import base64
|
||||
import binascii
|
||||
import datetime as dt
|
||||
import math
|
||||
import matplotlib.animation as ani
|
||||
import matplotlib.pyplot as plt
|
||||
import multiprocessing as mp
|
||||
import numpy as np
|
||||
import os
|
||||
import queue
|
||||
import sys
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import models, layers, preprocessing
|
||||
from tensorflow.keras.layers import Embedding, Masking, Input, Dense
|
||||
from tensorflow.keras.models import Model
|
||||
from tensorflow.keras.utils import plot_model
|
||||
from tensorflow.keras.losses import MeanSquaredError, KLDivergence
|
||||
from tensorflow.keras.optimizers import Adam, SGD
|
||||
from tensorflow.keras.callbacks import TensorBoard, EarlyStopping
|
||||
|
||||
sys.path.append(os.path.dirname(sys.argv[0]) + '/../../dependencies')
|
||||
sys.path.append(os.path.dirname(sys.argv[0]) + '/../share/nDPId')
|
||||
sys.path.append(os.path.dirname(sys.argv[0]))
|
||||
sys.path.append(sys.base_prefix + '/share/nDPId')
|
||||
import nDPIsrvd
|
||||
from nDPIsrvd import nDPIsrvdSocket, TermColor
|
||||
|
||||
INPUT_SIZE = nDPIsrvd.nDPId_PACKETS_PLEN_MAX
|
||||
LATENT_SIZE = 8
|
||||
TRAINING_SIZE = 512
|
||||
EPOCH_COUNT = 3
|
||||
BATCH_SIZE = 16
|
||||
LEARNING_RATE = 0.000001
|
||||
ES_PATIENCE = 3
|
||||
PLOT = False
|
||||
PLOT_HISTORY = 100
|
||||
TENSORBOARD = False
|
||||
TB_LOGPATH = 'logs/' + dt.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
VAE_USE_KLDIV = False
|
||||
VAE_USE_SGD = False
|
||||
|
||||
def generate_autoencoder():
|
||||
# TODO: The current model does handle *each* packet separatly.
|
||||
# But in fact, depending on the nDPId settings (nDPId_PACKETS_PER_FLOW_TO_SEND), packets can be in relation to each other.
|
||||
# The accuracy may (or may not) improve significantly, but some of changes in the code are required.
|
||||
input_i = Input(shape=(), name='input_i')
|
||||
input_e = Embedding(input_dim=INPUT_SIZE, output_dim=INPUT_SIZE, mask_zero=True, name='input_e')(input_i)
|
||||
masked_e = Masking(mask_value=0.0, name='masked_e')(input_e)
|
||||
encoded_h1 = Dense(4096, activation='relu', name='encoded_h1')(masked_e)
|
||||
encoded_h2 = Dense(2048, activation='relu', name='encoded_h2')(encoded_h1)
|
||||
encoded_h3 = Dense(1024, activation='relu', name='encoded_h3')(encoded_h2)
|
||||
encoded_h4 = Dense(512, activation='relu', name='encoded_h4')(encoded_h3)
|
||||
encoded_h5 = Dense(128, activation='relu', name='encoded_h5')(encoded_h4)
|
||||
encoded_h6 = Dense(64, activation='relu', name='encoded_h6')(encoded_h5)
|
||||
encoded_h7 = Dense(32, activation='relu', name='encoded_h7')(encoded_h6)
|
||||
latent = Dense(LATENT_SIZE, activation='relu', name='latent')(encoded_h7)
|
||||
|
||||
input_l = Input(shape=(LATENT_SIZE), name='input_l')
|
||||
decoder_h1 = Dense(32, activation='relu', name='decoder_h1')(input_l)
|
||||
decoder_h2 = Dense(64, activation='relu', name='decoder_h2')(decoder_h1)
|
||||
decoder_h3 = Dense(128, activation='relu', name='decoder_h3')(decoder_h2)
|
||||
decoder_h4 = Dense(512, activation='relu', name='decoder_h4')(decoder_h3)
|
||||
decoder_h5 = Dense(1024, activation='relu', name='decoder_h5')(decoder_h4)
|
||||
decoder_h6 = Dense(2048, activation='relu', name='decoder_h6')(decoder_h5)
|
||||
decoder_h7 = Dense(4096, activation='relu', name='decoder_h7')(decoder_h6)
|
||||
output_i = Dense(INPUT_SIZE, activation='sigmoid', name='output_i')(decoder_h7)
|
||||
|
||||
encoder = Model(input_e, latent, name='encoder')
|
||||
decoder = Model(input_l, output_i, name='decoder')
|
||||
return KLDivergence() if VAE_USE_KLDIV else MeanSquaredError(), \
|
||||
SGD() if VAE_USE_SGD else Adam(learning_rate=LEARNING_RATE), \
|
||||
Model(input_e, decoder(encoder(input_e)), name='VAE')
|
||||
|
||||
def compile_autoencoder():
|
||||
loss, optimizer, autoencoder = generate_autoencoder()
|
||||
autoencoder.compile(loss=loss, optimizer=optimizer, metrics=[])
|
||||
return autoencoder
|
||||
|
||||
def get_autoencoder(load_from_file=None):
|
||||
if load_from_file is None:
|
||||
autoencoder = compile_autoencoder()
|
||||
else:
|
||||
autoencoder = models.load_model(load_from_file)
|
||||
|
||||
encoder_submodel = autoencoder.layers[1]
|
||||
decoder_submodel = autoencoder.layers[2]
|
||||
return encoder_submodel, decoder_submodel, autoencoder
|
||||
|
||||
def on_json_line(json_dict, instance, current_flow, global_user_data):
|
||||
if 'packet_event_name' not in json_dict:
|
||||
return True
|
||||
|
||||
if json_dict['packet_event_name'] != 'packet' and \
|
||||
json_dict['packet_event_name'] != 'packet-flow':
|
||||
return True
|
||||
|
||||
shutdown_event, training_event, padded_pkts, print_dots = global_user_data
|
||||
if shutdown_event.is_set():
|
||||
return False
|
||||
|
||||
try:
|
||||
buf = base64.b64decode(json_dict['pkt'], validate=True)
|
||||
except binascii.Error as err:
|
||||
sys.stderr.write('\nBase64 Exception: {}\n'.format(str(err)))
|
||||
sys.stderr.write('Affected JSON: {}\n'.format(str(json_dict)))
|
||||
sys.stderr.flush()
|
||||
return False
|
||||
|
||||
# Generate decimal byte buffer with valus from 0-255
|
||||
int_buf = []
|
||||
for v in buf:
|
||||
int_buf.append(int(v))
|
||||
|
||||
mat = np.array([int_buf], dtype='float64')
|
||||
|
||||
# Normalize the values
|
||||
mat = mat.astype('float64') / 255.0
|
||||
|
||||
# Mean removal
|
||||
matmean = np.mean(mat, dtype='float64')
|
||||
mat -= matmean
|
||||
|
||||
# Pad resulting matrice
|
||||
buf = preprocessing.sequence.pad_sequences(mat, padding="post", maxlen=INPUT_SIZE, truncating='post', dtype='float64')
|
||||
padded_pkts.put(buf[0])
|
||||
|
||||
#print(list(buf[0]))
|
||||
|
||||
if not training_event.is_set():
|
||||
sys.stdout.write('.' * print_dots)
|
||||
sys.stdout.flush()
|
||||
print_dots = 1
|
||||
else:
|
||||
print_dots += 1
|
||||
|
||||
return True
|
||||
|
||||
def ndpisrvd_worker(address, shared_shutdown_event, shared_training_event, shared_packet_list):
|
||||
nsock = nDPIsrvdSocket()
|
||||
|
||||
try:
|
||||
nsock.connect(address)
|
||||
print_dots = 1
|
||||
nsock.loop(on_json_line, None, (shared_shutdown_event, shared_training_event, shared_packet_list, print_dots))
|
||||
except nDPIsrvd.SocketConnectionBroken as err:
|
||||
sys.stderr.write('\nnDPIsrvd-Worker Socket Error: {}\n'.format(err))
|
||||
except KeyboardInterrupt:
|
||||
sys.stderr.write('\n')
|
||||
except Exception as err:
|
||||
sys.stderr.write('\nnDPIsrvd-Worker Exception: {}\n'.format(str(err)))
|
||||
sys.stderr.flush()
|
||||
|
||||
shared_shutdown_event.set()
|
||||
|
||||
def keras_worker(load_model, save_model, shared_shutdown_event, shared_training_event, shared_packet_queue, shared_plot_queue):
|
||||
shared_training_event.set()
|
||||
try:
|
||||
encoder, _, autoencoder = get_autoencoder(load_model)
|
||||
except Exception as err:
|
||||
sys.stderr.write('Could not load Keras model from file: {}\n'.format(str(err)))
|
||||
sys.stderr.flush()
|
||||
encoder, _, autoencoder = get_autoencoder()
|
||||
autoencoder.summary()
|
||||
additional_callbacks = []
|
||||
if TENSORBOARD is True:
|
||||
tensorboard = TensorBoard(log_dir=TB_LOGPATH, histogram_freq=1)
|
||||
additional_callbacks += [tensorboard]
|
||||
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=ES_PATIENCE, restore_best_weights=True, start_from_epoch=0, verbose=0, mode='auto')
|
||||
additional_callbacks += [early_stopping]
|
||||
shared_training_event.clear()
|
||||
|
||||
try:
|
||||
packets = list()
|
||||
while not shared_shutdown_event.is_set():
|
||||
try:
|
||||
packet = shared_packet_queue.get(timeout=1)
|
||||
except queue.Empty:
|
||||
packet = None
|
||||
|
||||
if packet is None:
|
||||
continue
|
||||
|
||||
packets.append(packet)
|
||||
if len(packets) % TRAINING_SIZE == 0:
|
||||
shared_training_event.set()
|
||||
print('\nGot {} packets, training..'.format(len(packets)))
|
||||
tmp = np.array(packets)
|
||||
history = autoencoder.fit(
|
||||
tmp, tmp, epochs=EPOCH_COUNT, batch_size=BATCH_SIZE,
|
||||
validation_split=0.2,
|
||||
shuffle=True,
|
||||
callbacks=[additional_callbacks]
|
||||
)
|
||||
reconstructed_data = autoencoder.predict(tmp)
|
||||
mse = np.mean(np.square(tmp - reconstructed_data))
|
||||
reconstruction_accuracy = (1.0 / mse)
|
||||
encoded_data = encoder.predict(tmp)
|
||||
latent_activations = encoder.predict(tmp)
|
||||
shared_plot_queue.put((reconstruction_accuracy, history.history['val_loss'], encoded_data[:, 0], encoded_data[:, 1], latent_activations))
|
||||
packets.clear()
|
||||
shared_training_event.clear()
|
||||
except KeyboardInterrupt:
|
||||
sys.stderr.write('\n')
|
||||
except Exception as err:
|
||||
if len(str(err)) == 0:
|
||||
err = 'Unknown'
|
||||
sys.stderr.write('\nKeras-Worker Exception: {}\n'.format(str(err)))
|
||||
sys.stderr.flush()
|
||||
|
||||
if save_model is not None:
|
||||
sys.stderr.write('Saving model to {}\n'.format(save_model))
|
||||
sys.stderr.flush()
|
||||
autoencoder.save(save_model)
|
||||
|
||||
try:
|
||||
shared_shutdown_event.set()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def plot_animate(i, shared_plot_queue, ax, xs, ys):
|
||||
if not shared_plot_queue.empty():
|
||||
accuracy, loss, encoded_data0, encoded_data1, latent_activations = shared_plot_queue.get(timeout=1)
|
||||
epochs = len(loss)
|
||||
loss_mean = sum(loss) / epochs
|
||||
else:
|
||||
return
|
||||
|
||||
(ax1, ax2, ax3, ax4) = ax
|
||||
(ys1, ys2, ys3, ys4) = ys
|
||||
|
||||
if len(xs) == 0:
|
||||
xs.append(epochs)
|
||||
else:
|
||||
xs.append(xs[-1] + epochs)
|
||||
ys1.append(accuracy)
|
||||
ys2.append(loss_mean)
|
||||
|
||||
xs = xs[-PLOT_HISTORY:]
|
||||
ys1 = ys1[-PLOT_HISTORY:]
|
||||
ys2 = ys2[-PLOT_HISTORY:]
|
||||
|
||||
ax1.clear()
|
||||
ax1.plot(xs, ys1, '-')
|
||||
ax2.clear()
|
||||
ax2.plot(xs, ys2, '-')
|
||||
ax3.clear()
|
||||
ax3.scatter(encoded_data0, encoded_data1, marker='.')
|
||||
ax4.clear()
|
||||
ax4.imshow(latent_activations, cmap='viridis', aspect='auto')
|
||||
|
||||
ax1.set_xlabel('Epoch Count')
|
||||
ax1.set_ylabel('Accuracy')
|
||||
ax2.set_xlabel('Epoch Count')
|
||||
ax2.set_ylabel('Validation Loss')
|
||||
ax3.set_title('Latent Space')
|
||||
ax4.set_title('Latent Space Heatmap')
|
||||
ax4.set_xlabel('Latent Dimensions')
|
||||
ax4.set_ylabel('Datapoints')
|
||||
|
||||
def plot_worker(shared_shutdown_event, shared_plot_queue):
|
||||
try:
|
||||
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
|
||||
fig.tight_layout()
|
||||
ax1.set_xlabel('Epoch Count')
|
||||
ax1.set_ylabel('Accuracy')
|
||||
ax2.set_xlabel('Epoch Count')
|
||||
ax2.set_ylabel('Validation Loss')
|
||||
ax3.set_title('Latent Space')
|
||||
ax4.set_title('Latent Space Heatmap')
|
||||
ax4.set_xlabel('Latent Dimensions')
|
||||
ax4.set_ylabel('Datapoints')
|
||||
xs = []
|
||||
ys1 = []
|
||||
ys2 = []
|
||||
ys3 = []
|
||||
ys4 = []
|
||||
ani.FuncAnimation(fig, plot_animate, fargs=(shared_plot_queue, (ax1, ax2, ax3, ax4), xs, (ys1, ys2, ys3, ys4)), interval=1000, cache_frame_data=False)
|
||||
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
|
||||
plt.margins(x=0, y=0)
|
||||
plt.show()
|
||||
except Exception as err:
|
||||
sys.stderr.write('\nPlot-Worker Exception: {}\n'.format(str(err)))
|
||||
sys.stderr.flush()
|
||||
shared_shutdown_event.set()
|
||||
return
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.stderr.write('\b\n***************\n')
|
||||
sys.stderr.write('*** WARNING ***\n')
|
||||
sys.stderr.write('***************\n')
|
||||
sys.stderr.write('\nThis is an unmature Autoencoder example.\n')
|
||||
sys.stderr.write('Please do not rely on any of it\'s output!\n\n')
|
||||
|
||||
argparser = nDPIsrvd.defaultArgumentParser()
|
||||
argparser.add_argument('--load-model', action='store',
|
||||
help='Load a pre-trained model file.')
|
||||
argparser.add_argument('--save-model', action='store',
|
||||
help='Save the trained model to a file.')
|
||||
argparser.add_argument('--training-size', action='store', type=int, default=TRAINING_SIZE,
|
||||
help='Set the amount of captured packets required to start the training phase.')
|
||||
argparser.add_argument('--batch-size', action='store', type=int, default=BATCH_SIZE,
|
||||
help='Set the batch size used for the training phase.')
|
||||
argparser.add_argument('--learning-rate', action='store', type=float, default=LEARNING_RATE,
|
||||
help='Set the (initial) learning rate for the optimizer.')
|
||||
argparser.add_argument('--plot', action='store_true', default=PLOT,
|
||||
help='Show some model metrics using pyplot.')
|
||||
argparser.add_argument('--plot-history', action='store', type=int, default=PLOT_HISTORY,
|
||||
help='Set the history size of Line plots. Requires --plot')
|
||||
argparser.add_argument('--tensorboard', action='store_true', default=TENSORBOARD,
|
||||
help='Enable TensorBoard compatible logging callback.')
|
||||
argparser.add_argument('--tensorboard-logpath', action='store', default=TB_LOGPATH,
|
||||
help='TensorBoard logging path.')
|
||||
argparser.add_argument('--use-sgd', action='store_true', default=VAE_USE_SGD,
|
||||
help='Use SGD optimizer instead of Adam.')
|
||||
argparser.add_argument('--use-kldiv', action='store_true', default=VAE_USE_KLDIV,
|
||||
help='Use Kullback-Leibler loss function instead of Mean-Squared-Error.')
|
||||
argparser.add_argument('--patience', action='store', type=int, default=ES_PATIENCE,
|
||||
help='Epoch value for EarlyStopping. This value forces VAE fitting to if no improvment achieved.')
|
||||
args = argparser.parse_args()
|
||||
address = nDPIsrvd.validateAddress(args)
|
||||
|
||||
LEARNING_RATE = args.learning_rate
|
||||
TRAINING_SIZE = args.training_size
|
||||
BATCH_SIZE = args.batch_size
|
||||
PLOT = args.plot
|
||||
PLOT_HISTORY = args.plot_history
|
||||
TENSORBOARD = args.tensorboard
|
||||
TB_LOGPATH = args.tensorboard_logpath if args.tensorboard_logpath is not None else TB_LOGPATH
|
||||
VAE_USE_SGD = args.use_sgd
|
||||
VAE_USE_KLDIV = args.use_kldiv
|
||||
ES_PATIENCE = args.patience
|
||||
|
||||
sys.stderr.write('Recv buffer size: {}\n'.format(nDPIsrvd.NETWORK_BUFFER_MAX_SIZE))
|
||||
sys.stderr.write('Connecting to {} ..\n'.format(address[0]+':'+str(address[1]) if type(address) is tuple else address))
|
||||
sys.stderr.write('PLOT={}, PLOT_HISTORY={}, LEARNING_RATE={}, TRAINING_SIZE={}, BATCH_SIZE={}\n\n'.format(PLOT, PLOT_HISTORY, LEARNING_RATE, TRAINING_SIZE, BATCH_SIZE))
|
||||
|
||||
mgr = mp.Manager()
|
||||
|
||||
shared_training_event = mgr.Event()
|
||||
shared_training_event.clear()
|
||||
|
||||
shared_shutdown_event = mgr.Event()
|
||||
shared_shutdown_event.clear()
|
||||
|
||||
shared_packet_queue = mgr.JoinableQueue()
|
||||
shared_plot_queue = mgr.JoinableQueue()
|
||||
|
||||
nDPIsrvd_job = mp.Process(target=ndpisrvd_worker, args=(
|
||||
address,
|
||||
shared_shutdown_event,
|
||||
shared_training_event,
|
||||
shared_packet_queue
|
||||
))
|
||||
nDPIsrvd_job.start()
|
||||
|
||||
keras_job = mp.Process(target=keras_worker, args=(
|
||||
args.load_model,
|
||||
args.save_model,
|
||||
shared_shutdown_event,
|
||||
shared_training_event,
|
||||
shared_packet_queue,
|
||||
shared_plot_queue
|
||||
))
|
||||
keras_job.start()
|
||||
|
||||
if PLOT is True:
|
||||
plot_job = mp.Process(target=plot_worker, args=(shared_shutdown_event, shared_plot_queue))
|
||||
plot_job.start()
|
||||
|
||||
try:
|
||||
shared_shutdown_event.wait()
|
||||
except KeyboardInterrupt:
|
||||
print('\nShutting down worker processess..')
|
||||
|
||||
if PLOT is True:
|
||||
plot_job.terminate()
|
||||
plot_job.join()
|
||||
nDPIsrvd_job.terminate()
|
||||
nDPIsrvd_job.join()
|
||||
keras_job.join(timeout=3)
|
||||
keras_job.terminate()
|
||||
@@ -28,6 +28,8 @@ ENABLE_FEATURE_PKTLEN = False
|
||||
ENABLE_FEATURE_DIRS = True
|
||||
ENABLE_FEATURE_BINS = True
|
||||
|
||||
PROTO_CLASSES = None
|
||||
|
||||
def getFeatures(json):
|
||||
return [json['flow_src_packets_processed'],
|
||||
json['flow_dst_packets_processed'],
|
||||
@@ -107,6 +109,18 @@ def plotPermutatedImportance(model, X, y):
|
||||
fig.tight_layout()
|
||||
matplotlib.pyplot.show()
|
||||
|
||||
def isProtoClass(proto_class, line):
|
||||
if type(proto_class) != list or type(line) != str:
|
||||
raise TypeError('Invalid type: {}/{}.'.format(type(proto_class), type(line)))
|
||||
|
||||
s = line.lower()
|
||||
|
||||
for x in range(len(proto_class)):
|
||||
if s.startswith(proto_class[x].lower()) is True:
|
||||
return x + 1
|
||||
|
||||
return 0
|
||||
|
||||
def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
if 'flow_event_name' not in json_dict:
|
||||
return True
|
||||
@@ -125,7 +139,6 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
try:
|
||||
X = getRelevantFeaturesJSON(json_dict)
|
||||
y = model.predict(X)
|
||||
s = model.score(X, y)
|
||||
p = model.predict_log_proba(X)
|
||||
|
||||
if y[0] <= 0:
|
||||
@@ -145,7 +158,7 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
pass
|
||||
else:
|
||||
pred_failed = True
|
||||
color_start = TermColor.FAIL + TermColor.BOLD + TermColor.BLINK
|
||||
color_start = TermColor.WARNING + TermColor.BOLD
|
||||
color_end = TermColor.END
|
||||
|
||||
probs = str()
|
||||
@@ -159,23 +172,24 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
probs += '{:>2.1f}, '.format(p[0][i])
|
||||
probs = probs[:-2]
|
||||
|
||||
print('DPI Engine detected: {}{:>24}{}, Predicted: {}{:>24}{}, Score: {}, Probabilities: {}'.format(
|
||||
print('DPI Engine detected: {}{:>24}{}, Predicted: {}{:>24}{}, Probabilities: {}'.format(
|
||||
color_start, json_dict['ndpi']['proto'].lower(), color_end,
|
||||
color_start, y_text, color_end, s, probs))
|
||||
color_start, y_text, color_end, probs))
|
||||
|
||||
if pred_failed is True:
|
||||
pclass = isProtoClass(args.proto_class, json_dict['ndpi']['proto'].lower())
|
||||
if pclass == 0:
|
||||
msg = 'false positive'
|
||||
else:
|
||||
msg = 'false negative'
|
||||
|
||||
print('{:>46} {}{}{}'.format('[-]', TermColor.FAIL + TermColor.BOLD + TermColor.BLINK, msg, TermColor.END))
|
||||
|
||||
except Exception as err:
|
||||
print('Got exception `{}\'\nfor json: {}'.format(err, json_dict))
|
||||
|
||||
return True
|
||||
|
||||
def isProtoClass(proto_class, line):
|
||||
s = line.lower()
|
||||
|
||||
for x in range(len(proto_class)):
|
||||
if s.startswith(proto_class[x].lower()) is True:
|
||||
return x + 1
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
argparser = nDPIsrvd.defaultArgumentParser()
|
||||
argparser.add_argument('--load-model', action='store',
|
||||
@@ -203,7 +217,7 @@ if __name__ == '__main__':
|
||||
help='Number of sklearn processes during training.')
|
||||
argparser.add_argument('--sklearn-estimators', action='store', type=int, default=1000,
|
||||
help='Number of trees in the forest.')
|
||||
argparser.add_argument('--sklearn-min-samples-leaf', action='store', type=int, default=5,
|
||||
argparser.add_argument('--sklearn-min-samples-leaf', action='store', type=int, default=0.0001,
|
||||
help='The minimum number of samples required to be at a leaf node.')
|
||||
argparser.add_argument('--sklearn-class-weight', default='balanced', const='balanced', nargs='?',
|
||||
choices=['balanced', 'balanced_subsample'],
|
||||
@@ -211,6 +225,8 @@ if __name__ == '__main__':
|
||||
argparser.add_argument('--sklearn-max-features', default='sqrt', const='sqrt', nargs='?',
|
||||
choices=['sqrt', 'log2'],
|
||||
help='The number of features to consider when looking for the best split.')
|
||||
argparser.add_argument('--sklearn-max-depth', action='store', type=int, default=128,
|
||||
help='The maximum depth of a tree.')
|
||||
argparser.add_argument('--sklearn-verbosity', action='store', type=int, default=0,
|
||||
help='Controls the verbosity of sklearn\'s random forest classifier.')
|
||||
args = argparser.parse_args()
|
||||
@@ -233,6 +249,9 @@ if __name__ == '__main__':
|
||||
sys.exit(1)
|
||||
|
||||
if args.load_model is not None:
|
||||
sys.stderr.write('{}: You are loading an existing model file. ' \
|
||||
'Some --sklearn-* command line parameters won\'t have any effect!\n'.format(sys.argv[0]))
|
||||
|
||||
if args.enable_iat is not None:
|
||||
sys.stderr.write('{}: `--enable-iat` set, but you want to load an existing model.\n'.format(sys.argv[0]))
|
||||
sys.exit(1)
|
||||
@@ -250,6 +269,7 @@ if __name__ == '__main__':
|
||||
ENABLE_FEATURE_PKTLEN = args.enable_pktlen if args.enable_pktlen is not None else ENABLE_FEATURE_PKTLEN
|
||||
ENABLE_FEATURE_DIRS = args.disable_dirs if args.disable_dirs is not None else ENABLE_FEATURE_DIRS
|
||||
ENABLE_FEATURE_BINS = args.disable_bins if args.disable_bins is not None else ENABLE_FEATURE_BINS
|
||||
PROTO_CLASSES = args.proto_class
|
||||
|
||||
numpy.set_printoptions(formatter={'float_kind': "{:.1f}".format}, sign=' ')
|
||||
numpy.seterr(divide = 'ignore')
|
||||
@@ -278,9 +298,19 @@ if __name__ == '__main__':
|
||||
for line in reader:
|
||||
try:
|
||||
X += getRelevantFeaturesCSV(line)
|
||||
y += [isProtoClass(args.proto_class, line['proto'])]
|
||||
except RuntimeError as err:
|
||||
print('Error: `{}\'\non line: {}'.format(err, line))
|
||||
print('Runtime Error: `{}\'\non line {}: {}'.format(err, reader.line_num - 1, line))
|
||||
continue
|
||||
except TypeError as err:
|
||||
print('Type Error: `{}\'\non line {}: {}'.format(err, reader.line_num - 1, line))
|
||||
continue
|
||||
|
||||
try:
|
||||
y += [isProtoClass(args.proto_class, line['proto'])]
|
||||
except TypeError as err:
|
||||
X.pop()
|
||||
print('Type Error: `{}\'\non line {}: {}'.format(err, reader.line_num - 1, line))
|
||||
continue
|
||||
|
||||
sys.stderr.write('CSV data set contains {} entries.\n'.format(len(X)))
|
||||
|
||||
@@ -291,7 +321,8 @@ if __name__ == '__main__':
|
||||
n_estimators = args.sklearn_estimators,
|
||||
verbose = args.sklearn_verbosity,
|
||||
min_samples_leaf = args.sklearn_min_samples_leaf,
|
||||
max_features = args.sklearn_max_features
|
||||
max_features = args.sklearn_max_features,
|
||||
max_depth = args.sklearn_max_depth
|
||||
)
|
||||
options = (ENABLE_FEATURE_IAT, ENABLE_FEATURE_PKTLEN, ENABLE_FEATURE_DIRS, ENABLE_FEATURE_BINS, args.proto_class)
|
||||
sys.stderr.write('Training model..\n')
|
||||
|
||||
1
examples/py-schema-validation/requirements.txt
Normal file
1
examples/py-schema-validation/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
jsonschema
|
||||
@@ -86,8 +86,8 @@ def verifyFlows(nsock, instance):
|
||||
l4_proto = 'n/a'
|
||||
invalid_flows_str += '{} proto[{},{}] ts[{} + {} < {}] diff[{}], '.format(flow_id, l4_proto, flow.flow_idle_time,
|
||||
flow.flow_last_seen, flow.flow_idle_time,
|
||||
instance.most_recent_flow_time,
|
||||
instance.most_recent_flow_time -
|
||||
instance.getMostRecentFlowTime(flow.thread_id),
|
||||
instance.getMostRecentFlowTime(flow.thread_id) -
|
||||
(flow.flow_last_seen + flow.flow_idle_time))
|
||||
|
||||
raise SemanticValidationException(None, 'Flow Manager verification failed for: {}'.format(invalid_flows_str[:-2]))
|
||||
@@ -193,7 +193,7 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
if (flow_last_seen is not None and 'flow_idle_time' not in json_dict) or \
|
||||
(flow_last_seen is None and 'flow_idle_time' in json_dict):
|
||||
raise SemanticValidationException(current_flow,
|
||||
'Got a JSON string with only 2 of 3 keys, ' \
|
||||
'Got a JSON message with only 2 of 3 keys, ' \
|
||||
'required for timeout handling: flow_idle_time')
|
||||
|
||||
if 'thread_ts_usec' in json_dict:
|
||||
@@ -213,7 +213,7 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
try:
|
||||
if current_flow.flow_ended == True:
|
||||
raise SemanticValidationException(current_flow,
|
||||
'Received JSON string for a flow that already ended/idled.')
|
||||
'Received JSON message for a flow that already ended/idled.')
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
@@ -256,10 +256,25 @@ def onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
if current_flow.flow_finished == True and \
|
||||
json_dict['flow_event_name'] == 'detection-update':
|
||||
raise SemanticValidationException(current_flow,
|
||||
'Flow state already finished, but another detection-update received.')
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
if json_dict['flow_state'] == 'finished':
|
||||
current_flow.flow_finished = True
|
||||
elif json_dict['flow_state'] == 'info' and \
|
||||
current_flow.flow_finished is True:
|
||||
raise SemanticValidationException(current_flow,
|
||||
'Flow state already finished, but switched back to info state.')
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
if current_flow.flow_finished == True and \
|
||||
json_dict['flow_event_name'] != 'analyse' and \
|
||||
json_dict['flow_event_name'] != 'update' and \
|
||||
|
||||
21
examples/rs-simple/Cargo.toml
Normal file
21
examples/rs-simple/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "rs-simple"
|
||||
version = "0.1.0"
|
||||
authors = ["Toni Uhlig <toni@impl.cc>"]
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
argh = "0.1"
|
||||
bytes = "1"
|
||||
crossterm = "0.29.0"
|
||||
io = "0.0.2"
|
||||
moka = { version = "0.12.10", features = ["future"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tui = "0.19.0"
|
||||
|
||||
[profile.release]
|
||||
strip = true
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
860
examples/rs-simple/src/main.rs
Normal file
860
examples/rs-simple/src/main.rs
Normal file
@@ -0,0 +1,860 @@
|
||||
use argh::FromArgs;
|
||||
use bytes::BytesMut;
|
||||
use crossterm::{
|
||||
cursor,
|
||||
event::{self, KeyCode, KeyEvent},
|
||||
ExecutableCommand,
|
||||
terminal::{self, ClearType},
|
||||
};
|
||||
use moka::{future::Cache, Expiry};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt,
|
||||
hash::{Hash, Hasher},
|
||||
io::self,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tokio::io::AsyncReadExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::MutexGuard;
|
||||
use tokio::net::TcpStream;
|
||||
use tui::{
|
||||
backend::CrosstermBackend,
|
||||
layout::{Layout, Constraint, Direction},
|
||||
style::{Style, Color, Modifier},
|
||||
Terminal,
|
||||
widgets::{Block, Borders, List, ListItem, Row, Table, TableState},
|
||||
};
|
||||
|
||||
#[derive(FromArgs, Debug)]
|
||||
/// Simple Rust nDPIsrvd Client Example
|
||||
struct Args {
|
||||
/// nDPIsrvd host(s) to connect to
|
||||
#[argh(option)]
|
||||
host: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ParseError {
|
||||
Protocol(),
|
||||
Json(),
|
||||
Schema(),
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for ParseError {
|
||||
fn from(_: serde_json::Error) -> Self {
|
||||
ParseError::Json()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum EventName {
|
||||
Invalid, New, End, Idle, Update, Analyse,
|
||||
Guessed, Detected,
|
||||
#[serde(rename = "detection-update")]
|
||||
DetectionUpdate,
|
||||
#[serde(rename = "not-detected")]
|
||||
NotDetected,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Copy, Clone, Debug)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
enum State {
|
||||
Unknown, Info, Finished,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct FlowEventNdpiFlowRisk {
|
||||
#[serde(rename = "risk")]
|
||||
risk: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct FlowEventNdpi {
|
||||
#[serde(rename = "proto")]
|
||||
proto: String,
|
||||
#[serde(rename = "flow_risk")]
|
||||
risks: Option<HashMap<String, FlowEventNdpiFlowRisk>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct FlowEvent {
|
||||
#[serde(rename = "flow_event_name")]
|
||||
name: EventName,
|
||||
#[serde(rename = "flow_id")]
|
||||
id: u64,
|
||||
#[serde(rename = "alias")]
|
||||
alias: String,
|
||||
#[serde(rename = "source")]
|
||||
source: String,
|
||||
#[serde(rename = "thread_id")]
|
||||
thread_id: u64,
|
||||
#[serde(rename = "flow_state")]
|
||||
state: State,
|
||||
#[serde(rename = "flow_first_seen")]
|
||||
first_seen: u64,
|
||||
#[serde(rename = "flow_src_last_pkt_time")]
|
||||
src_last_pkt_time: u64,
|
||||
#[serde(rename = "flow_dst_last_pkt_time")]
|
||||
dst_last_pkt_time: u64,
|
||||
#[serde(rename = "flow_idle_time")]
|
||||
idle_time: u64,
|
||||
#[serde(rename = "flow_src_packets_processed")]
|
||||
src_packets_processed: u64,
|
||||
#[serde(rename = "flow_dst_packets_processed")]
|
||||
dst_packets_processed: u64,
|
||||
#[serde(rename = "flow_src_tot_l4_payload_len")]
|
||||
src_tot_l4_payload_len: u64,
|
||||
#[serde(rename = "flow_dst_tot_l4_payload_len")]
|
||||
dst_tot_l4_payload_len: u64,
|
||||
#[serde(rename = "l3_proto")]
|
||||
l3_proto: String,
|
||||
#[serde(rename = "l4_proto")]
|
||||
l4_proto: String,
|
||||
#[serde(rename = "ndpi")]
|
||||
ndpi: Option<FlowEventNdpi>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct PacketEvent {
|
||||
pkt_datalink: u16,
|
||||
pkt_caplen: u64,
|
||||
pkt_len: u64,
|
||||
pkt_l4_len: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
struct DaemonEventStatus {
|
||||
#[serde(rename = "alias")]
|
||||
alias: String,
|
||||
#[serde(rename = "source")]
|
||||
source: String,
|
||||
#[serde(rename = "thread_id")]
|
||||
thread_id: u64,
|
||||
#[serde(rename = "packets-captured")]
|
||||
packets_captured: u64,
|
||||
#[serde(rename = "packets-processed")]
|
||||
packets_processed: u64,
|
||||
#[serde(rename = "total-skipped-flows")]
|
||||
total_skipped_flows: u64,
|
||||
#[serde(rename = "total-l4-payload-len")]
|
||||
total_l4_payload_len: u64,
|
||||
#[serde(rename = "total-not-detected-flows")]
|
||||
total_not_detected_flows: u64,
|
||||
#[serde(rename = "total-guessed-flows")]
|
||||
total_guessed_flows: u64,
|
||||
#[serde(rename = "total-detected-flows")]
|
||||
total_detected_flows: u64,
|
||||
#[serde(rename = "total-detection-updates")]
|
||||
total_detection_updates: u64,
|
||||
#[serde(rename = "total-updates")]
|
||||
total_updates: u64,
|
||||
#[serde(rename = "current-active-flows")]
|
||||
current_active_flows: u64,
|
||||
#[serde(rename = "total-active-flows")]
|
||||
total_active_flows: u64,
|
||||
#[serde(rename = "total-idle-flows")]
|
||||
total_idle_flows: u64,
|
||||
#[serde(rename = "total-compressions")]
|
||||
total_compressions: u64,
|
||||
#[serde(rename = "total-compression-diff")]
|
||||
total_compression_diff: u64,
|
||||
#[serde(rename = "current-compression-diff")]
|
||||
current_compression_diff: u64,
|
||||
#[serde(rename = "global-alloc-bytes")]
|
||||
global_alloc_bytes: u64,
|
||||
#[serde(rename = "global-alloc-count")]
|
||||
global_alloc_count: u64,
|
||||
#[serde(rename = "global-free-bytes")]
|
||||
global_free_bytes: u64,
|
||||
#[serde(rename = "global-free-count")]
|
||||
global_free_count: u64,
|
||||
#[serde(rename = "total-events-serialized")]
|
||||
total_events_serialized: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum EventType {
|
||||
Flow(FlowEvent),
|
||||
Packet(PacketEvent),
|
||||
DaemonStatus(DaemonEventStatus),
|
||||
Other(),
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct Stats {
|
||||
ui_updates: u64,
|
||||
flow_count: u64,
|
||||
parse_errors: u64,
|
||||
events: u64,
|
||||
flow_events: u64,
|
||||
packet_events: u64,
|
||||
daemon_events: u64,
|
||||
packet_events_total_caplen: u64,
|
||||
packet_events_total_len: u64,
|
||||
packet_events_total_l4_len: u64,
|
||||
packets_captured: u64,
|
||||
packets_processed: u64,
|
||||
flows_total_skipped: u64,
|
||||
flows_total_l4_payload_len: u64,
|
||||
flows_total_not_detected: u64,
|
||||
flows_total_guessed: u64,
|
||||
flows_current_active: u64,
|
||||
flows_total_compressions: u64,
|
||||
flows_total_compression_diff: u64,
|
||||
flows_current_compression_diff: u64,
|
||||
global_alloc_bytes: u64,
|
||||
global_alloc_count: u64,
|
||||
global_free_bytes: u64,
|
||||
global_free_count: u64,
|
||||
total_events_serialized: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
enum FlowExpiration {
|
||||
IdleTime(u64),
|
||||
}
|
||||
|
||||
struct FlowExpiry;
|
||||
|
||||
#[derive(Clone, Eq, Default, Debug)]
|
||||
struct FlowKey {
|
||||
id: u64,
|
||||
alias: String,
|
||||
source: String,
|
||||
thread_id: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct FlowValue {
|
||||
state: State,
|
||||
total_src_packets: u64,
|
||||
total_dst_packets: u64,
|
||||
total_src_bytes: u64,
|
||||
total_dst_bytes: u64,
|
||||
first_seen: std::time::SystemTime,
|
||||
last_seen: std::time::SystemTime,
|
||||
timeout_in: std::time::SystemTime,
|
||||
risks: usize,
|
||||
proto: String,
|
||||
app_proto: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, Default, Debug)]
|
||||
struct DaemonKey {
|
||||
alias: String,
|
||||
source: String,
|
||||
thread_id: u64,
|
||||
}
|
||||
|
||||
impl Default for State {
|
||||
fn default() -> State {
|
||||
State::Unknown
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for State {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
State::Unknown => write!(f, "N/A"),
|
||||
State::Info => write!(f, "Info"),
|
||||
State::Finished => write!(f, "Finished"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FlowExpiration {
|
||||
fn as_duration(&self) -> Option<Duration> {
|
||||
match self {
|
||||
FlowExpiration::IdleTime(value) => Some(Duration::from_micros(*value)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for FlowExpiration {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self.as_duration() {
|
||||
Some(duration) => {
|
||||
let secs = duration.as_secs();
|
||||
write!(f, "{} s", secs)
|
||||
}
|
||||
None => write!(f, "N/A"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Expiry<FlowKey, (FlowExpiration, FlowValue)> for FlowExpiry {
|
||||
fn expire_after_create(
|
||||
&self,
|
||||
_key: &FlowKey,
|
||||
value: &(FlowExpiration, FlowValue),
|
||||
_current_time: Instant,
|
||||
) -> Option<Duration> {
|
||||
value.0.as_duration()
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for FlowKey {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.id.hash(state);
|
||||
self.alias.hash(state);
|
||||
self.source.hash(state);
|
||||
self.thread_id.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for FlowKey {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.id == other.id &&
|
||||
self.alias == other.alias &&
|
||||
self.source == other.source &&
|
||||
self.thread_id == other.thread_id
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for DaemonKey {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.alias.hash(state);
|
||||
self.source.hash(state);
|
||||
self.thread_id.hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for DaemonKey {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.alias == other.alias &&
|
||||
self.source == other.source &&
|
||||
self.thread_id == other.thread_id
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let args: Args = argh::from_env();
|
||||
if args.host.len() == 0 {
|
||||
eprintln!("At least one --host required");
|
||||
return;
|
||||
}
|
||||
|
||||
let mut connections: Vec<TcpStream> = Vec::new();
|
||||
for host in args.host {
|
||||
match TcpStream::connect(host.clone()).await {
|
||||
Ok(stream) => {
|
||||
connections.push(stream);
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Fehler bei Verbindung zu {}: {}", host, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = terminal::enable_raw_mode() {
|
||||
eprintln!("Could not enable terminal raw mode: {}", e);
|
||||
return;
|
||||
}
|
||||
let mut stdout = io::stdout();
|
||||
if let Err(e) = stdout.execute(terminal::Clear(ClearType::All)) {
|
||||
eprintln!("Could not clear your terminal: {}", e);
|
||||
return;
|
||||
}
|
||||
if let Err(e) = stdout.execute(cursor::Hide) {
|
||||
eprintln!("Could not hide your cursor: {}", e);
|
||||
return;
|
||||
}
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend);
|
||||
|
||||
let (tx, mut rx): (mpsc::Sender<String>, mpsc::Receiver<String>) = mpsc::channel(1024);
|
||||
let data = Arc::new(Mutex::new(Stats::default()));
|
||||
let data_tx = Arc::clone(&data);
|
||||
let data_rx = Arc::clone(&data);
|
||||
let flow_cache: Arc<Cache<FlowKey, (FlowExpiration, FlowValue)>> = Arc::new(Cache::builder()
|
||||
.expire_after(FlowExpiry)
|
||||
.build());
|
||||
let flow_cache_rx = Arc::clone(&flow_cache);
|
||||
let daemon_cache: Arc<Cache<DaemonKey, DaemonEventStatus>> = Arc::new(Cache::builder()
|
||||
.time_to_live(Duration::from_secs(1800))
|
||||
.build());
|
||||
|
||||
tokio::spawn(async move {
|
||||
while let Some(msg) = rx.recv().await {
|
||||
match parse_json(&msg) {
|
||||
Ok(message) => {
|
||||
let mut data_lock = data_tx.lock().await;
|
||||
data_lock.events += 1;
|
||||
update_stats(&message, &mut data_lock, &flow_cache, &daemon_cache).await;
|
||||
}
|
||||
Err(_message) => {
|
||||
let mut data_lock = data_tx.lock().await;
|
||||
data_lock.parse_errors += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for mut stream in connections {
|
||||
let cloned_tx = tx.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut buffer = BytesMut::with_capacity(33792usize);
|
||||
|
||||
loop {
|
||||
let n = match stream.read_buf(&mut buffer).await {
|
||||
Ok(len) => len,
|
||||
Err(_) => {
|
||||
continue; // Versuche es erneut, wenn ein Fehler auftritt
|
||||
}
|
||||
};
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
while let Some(message) = parse_message(&mut buffer) {
|
||||
match cloned_tx.send(message).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let mut table_state = TableState::default();
|
||||
let mut old_selected: Option<FlowKey> = None;
|
||||
|
||||
loop {
|
||||
let flows: Vec<(FlowKey, (FlowExpiration, FlowValue))> = flow_cache_rx.iter().map(|(k, v)| (k.as_ref().clone(), v.clone()))
|
||||
.take(128)
|
||||
.collect();
|
||||
let mut table_selected = match table_state.selected() {
|
||||
Some(mut table_index) => {
|
||||
if table_index >= flows.len() {
|
||||
flows.len().saturating_sub(1)
|
||||
} else {
|
||||
if let Some(ref old_flow_key_selected) = old_selected {
|
||||
if let Some(old_index) = flows.iter().position(|x| x.0 == *old_flow_key_selected) {
|
||||
if old_index != table_index {
|
||||
table_index = old_index;
|
||||
}
|
||||
} else {
|
||||
old_selected = Some(flows.get(table_index).unwrap().0.clone());
|
||||
}
|
||||
}
|
||||
table_index
|
||||
}
|
||||
}
|
||||
None => 0,
|
||||
};
|
||||
|
||||
match read_keypress() {
|
||||
Some(KeyCode::Esc) => break,
|
||||
Some(KeyCode::Char('q')) => break,
|
||||
Some(KeyCode::Up) => {
|
||||
table_selected = match table_selected {
|
||||
i if i == 0 => flows.len().saturating_sub(1),
|
||||
i => i - 1,
|
||||
};
|
||||
if let Some(new_selected) = flows.get(table_selected) {
|
||||
old_selected = Some(new_selected.0.clone());
|
||||
}
|
||||
},
|
||||
Some(KeyCode::Down) => {
|
||||
table_selected = match table_selected {
|
||||
i if i >= flows.len().saturating_sub(1) => 0,
|
||||
i => i + 1,
|
||||
};
|
||||
if let Some(new_selected) = flows.get(table_selected) {
|
||||
old_selected = Some(new_selected.0.clone());
|
||||
}
|
||||
},
|
||||
Some(KeyCode::PageUp) => {
|
||||
table_selected = match table_selected {
|
||||
i if i == 0 => flows.len().saturating_sub(1),
|
||||
i if i < 25 => 0,
|
||||
i => i - 25,
|
||||
};
|
||||
if let Some(new_selected) = flows.get(table_selected) {
|
||||
old_selected = Some(new_selected.0.clone());
|
||||
}
|
||||
},
|
||||
Some(KeyCode::PageDown) => {
|
||||
table_selected = match table_selected {
|
||||
i if i >= flows.len().saturating_sub(1) => 0,
|
||||
i if i >= flows.len().saturating_sub(25) => flows.len().saturating_sub(1),
|
||||
i => i + 25,
|
||||
};
|
||||
if let Some(new_selected) = flows.get(table_selected) {
|
||||
old_selected = Some(new_selected.0.clone());
|
||||
}
|
||||
},
|
||||
Some(KeyCode::Home) => {
|
||||
table_selected = 0;
|
||||
if let Some(new_selected) = flows.get(table_selected) {
|
||||
old_selected = Some(new_selected.0.clone());
|
||||
}
|
||||
},
|
||||
Some(KeyCode::End) => {
|
||||
table_selected = match table_selected {
|
||||
_ => flows.len().saturating_sub(1),
|
||||
};
|
||||
if let Some(new_selected) = flows.get(table_selected) {
|
||||
old_selected = Some(new_selected.0.clone());
|
||||
}
|
||||
},
|
||||
Some(_) => (),
|
||||
None => ()
|
||||
};
|
||||
|
||||
let mut data_lock = data_rx.lock().await;
|
||||
data_lock.ui_updates += 1;
|
||||
draw_ui(terminal.as_mut().unwrap(), &mut table_state, table_selected, &data_lock, &flows);
|
||||
}
|
||||
|
||||
if let Err(e) = terminal.unwrap().backend_mut().execute(cursor::Show) {
|
||||
eprintln!("Could not show your cursor: {}", e);
|
||||
return;
|
||||
}
|
||||
let mut stdout = io::stdout();
|
||||
if let Err(e) = stdout.execute(terminal::Clear(ClearType::All)) {
|
||||
eprintln!("Could not clear your terminal: {}", e);
|
||||
return;
|
||||
}
|
||||
if let Err(e) = terminal::disable_raw_mode() {
|
||||
eprintln!("Could not disable raw mode: {}", e);
|
||||
return;
|
||||
}
|
||||
println!("\nDone.");
|
||||
}
|
||||
|
||||
fn read_keypress() -> Option<KeyCode> {
|
||||
if event::poll(Duration::from_millis(1000)).unwrap() {
|
||||
if let event::Event::Key(KeyEvent { code, .. }) = event::read().unwrap() {
|
||||
return Some(code);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn parse_message(buffer: &mut BytesMut) -> Option<String> {
|
||||
if let Some(pos) = buffer.iter().position(|&b| b == b'\n') {
|
||||
let message = buffer.split_to(pos + 1);
|
||||
return Some(String::from_utf8_lossy(&message).to_string());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn parse_json(data: &str) -> Result<EventType, ParseError> {
|
||||
let first_non_digit = data.find(|c: char| !c.is_ascii_digit()).unwrap_or(0);
|
||||
let length_str = &data[0..first_non_digit];
|
||||
let length: usize = length_str.parse().unwrap_or(0);
|
||||
if length == 0 {
|
||||
return Err(ParseError::Protocol());
|
||||
}
|
||||
|
||||
let json_str = &data[first_non_digit..first_non_digit + length];
|
||||
let value: Value = serde_json::from_str(json_str).map_err(|_| ParseError::Json()).unwrap();
|
||||
if value.get("flow_event_name").is_some() {
|
||||
let flow_event: FlowEvent = serde_json::from_value(value)?;
|
||||
return Ok(EventType::Flow(flow_event));
|
||||
} else if value.get("packet_event_name").is_some() {
|
||||
let packet_event: PacketEvent = serde_json::from_value(value)?;
|
||||
return Ok(EventType::Packet(packet_event));
|
||||
} else if value.get("daemon_event_name").is_some() {
|
||||
if value.get("daemon_event_name").unwrap() == "status" ||
|
||||
value.get("daemon_event_name").unwrap() == "shutdown"
|
||||
{
|
||||
let daemon_status_event: DaemonEventStatus = serde_json::from_value(value)?;
|
||||
return Ok(EventType::DaemonStatus(daemon_status_event));
|
||||
}
|
||||
return Ok(EventType::Other());
|
||||
} else if value.get("error_event_name").is_some() {
|
||||
return Ok(EventType::Other());
|
||||
}
|
||||
|
||||
Err(ParseError::Schema())
|
||||
}
|
||||
|
||||
async fn update_stats(event: &EventType, stats: &mut MutexGuard<'_, Stats>, cache: &Cache<FlowKey, (FlowExpiration, FlowValue)>, daemon_cache: &Cache<DaemonKey, DaemonEventStatus>) {
|
||||
match &event {
|
||||
EventType::Flow(flow_event) => {
|
||||
stats.flow_events += 1;
|
||||
stats.flow_count = cache.entry_count();
|
||||
let key = FlowKey { id: flow_event.id, alias: flow_event.alias.to_string(),
|
||||
source: flow_event.source.to_string(), thread_id: flow_event.thread_id };
|
||||
|
||||
if flow_event.name == EventName::End ||
|
||||
flow_event.name == EventName::Idle
|
||||
{
|
||||
cache.remove(&key).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let first_seen_seconds = flow_event.first_seen / 1_000_000;
|
||||
let first_seen_nanos = (flow_event.first_seen % 1_000_000) * 1_000;
|
||||
let first_seen_epoch = std::time::Duration::new(first_seen_seconds, first_seen_nanos as u32);
|
||||
let first_seen_system = UNIX_EPOCH + first_seen_epoch;
|
||||
|
||||
let last_seen = std::cmp::max(flow_event.src_last_pkt_time,
|
||||
flow_event.dst_last_pkt_time);
|
||||
let last_seen_seconds = last_seen / 1_000_000;
|
||||
let last_seen_nanos = (last_seen % 1_000_000) * 1_000;
|
||||
let last_seen_epoch = std::time::Duration::new(last_seen_seconds, last_seen_nanos as u32);
|
||||
let last_seen_system = UNIX_EPOCH + last_seen_epoch;
|
||||
|
||||
let timeout_seconds = (last_seen + flow_event.idle_time) / 1_000_000;
|
||||
let timeout_nanos = ((last_seen + flow_event.idle_time) % 1_000_000) * 1_000;
|
||||
let timeout_epoch = std::time::Duration::new(timeout_seconds, timeout_nanos as u32);
|
||||
let timeout_system = UNIX_EPOCH + timeout_epoch;
|
||||
|
||||
let risks = match &flow_event.ndpi {
|
||||
None => 0,
|
||||
Some(ndpi) => match &ndpi.risks {
|
||||
None => 0,
|
||||
Some(risks) => risks.len(),
|
||||
},
|
||||
};
|
||||
|
||||
let app_proto = match &flow_event.ndpi {
|
||||
None => "-",
|
||||
Some(ndpi) => &ndpi.proto,
|
||||
};
|
||||
|
||||
let value = FlowValue {
|
||||
state: flow_event.state,
|
||||
total_src_packets: flow_event.src_packets_processed,
|
||||
total_dst_packets: flow_event.dst_packets_processed,
|
||||
total_src_bytes: flow_event.src_tot_l4_payload_len,
|
||||
total_dst_bytes: flow_event.dst_tot_l4_payload_len,
|
||||
first_seen: first_seen_system,
|
||||
last_seen: last_seen_system,
|
||||
timeout_in: timeout_system,
|
||||
risks: risks,
|
||||
proto: flow_event.l3_proto.to_string() + "/" + &flow_event.l4_proto,
|
||||
app_proto: app_proto.to_string(),
|
||||
};
|
||||
cache.insert(key, (FlowExpiration::IdleTime(flow_event.idle_time), value)).await;
|
||||
}
|
||||
EventType::Packet(packet_event) => {
|
||||
stats.packet_events += 1;
|
||||
stats.packet_events_total_caplen += packet_event.pkt_caplen;
|
||||
stats.packet_events_total_len += packet_event.pkt_len;
|
||||
stats.packet_events_total_l4_len += packet_event.pkt_l4_len;
|
||||
}
|
||||
EventType::DaemonStatus(daemon_status_event) => {
|
||||
let key = DaemonKey { alias: daemon_status_event.alias.to_string(),
|
||||
source: daemon_status_event.source.to_string(),
|
||||
thread_id: daemon_status_event.thread_id };
|
||||
stats.daemon_events += 1;
|
||||
daemon_cache.insert(key, daemon_status_event.clone()).await;
|
||||
|
||||
stats.packets_captured = 0;
|
||||
stats.packets_processed = 0;
|
||||
stats.flows_total_skipped = 0;
|
||||
stats.flows_total_l4_payload_len = 0;
|
||||
stats.flows_total_not_detected = 0;
|
||||
stats.flows_total_guessed = 0;
|
||||
stats.flows_current_active = 0;
|
||||
stats.flows_total_compressions = 0;
|
||||
stats.flows_total_compression_diff = 0;
|
||||
stats.flows_current_compression_diff = 0;
|
||||
stats.global_alloc_bytes = 0;
|
||||
stats.global_alloc_count = 0;
|
||||
stats.global_free_bytes = 0;
|
||||
stats.global_free_count = 0;
|
||||
stats.total_events_serialized = 0;
|
||||
let daemons: Vec<DaemonEventStatus> = daemon_cache.iter().map(|(_, v)| (v.clone())).collect();
|
||||
for daemon in daemons {
|
||||
stats.packets_captured += daemon.packets_captured;
|
||||
stats.packets_processed += daemon.packets_processed;
|
||||
stats.flows_total_skipped += daemon.total_skipped_flows;
|
||||
stats.flows_total_l4_payload_len += daemon.total_l4_payload_len;
|
||||
stats.flows_total_not_detected += daemon.total_not_detected_flows;
|
||||
stats.flows_total_guessed += daemon.total_guessed_flows;
|
||||
stats.flows_current_active += daemon.current_active_flows;
|
||||
stats.flows_total_compressions += daemon.total_compressions;
|
||||
stats.flows_total_compression_diff += daemon.total_compression_diff;
|
||||
stats.flows_current_compression_diff += daemon.current_compression_diff;
|
||||
stats.global_alloc_bytes += daemon.global_alloc_bytes;
|
||||
stats.global_alloc_count += daemon.global_alloc_count;
|
||||
stats.global_free_bytes += daemon.global_free_bytes;
|
||||
stats.global_free_count += daemon.global_free_count;
|
||||
stats.total_events_serialized += daemon.total_events_serialized;
|
||||
}
|
||||
}
|
||||
EventType::Other() => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn format_bytes(bytes: u64) -> String {
|
||||
const KB: u64 = 1024;
|
||||
const MB: u64 = KB * 1024;
|
||||
const GB: u64 = MB * 1024;
|
||||
|
||||
if bytes >= GB {
|
||||
format!("{} GB", bytes / GB)
|
||||
} else if bytes >= MB {
|
||||
format!("{} MB", bytes / MB)
|
||||
} else if bytes >= KB {
|
||||
format!("{} kB", bytes / KB)
|
||||
} else {
|
||||
format!("{} B", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
fn draw_ui<B: tui::backend::Backend>(terminal: &mut Terminal<B>, table_state: &mut TableState, table_selected: usize, data: &MutexGuard<Stats>, flows: &Vec<(FlowKey, (FlowExpiration, FlowValue))>) {
|
||||
let general_items = vec![
|
||||
ListItem::new("TUI Updates..: ".to_owned() + &data.ui_updates.to_string()),
|
||||
ListItem::new("Flows Cached.: ".to_owned() + &data.flow_count.to_string()),
|
||||
ListItem::new("Total Events.: ".to_owned() + &data.events.to_string()),
|
||||
ListItem::new("Parse Errors.: ".to_owned() + &data.parse_errors.to_string()),
|
||||
ListItem::new("Flow Events..: ".to_owned() + &data.flow_events.to_string()),
|
||||
];
|
||||
let packet_items = vec![
|
||||
ListItem::new("Total Events........: ".to_owned() + &data.packet_events.to_string()),
|
||||
ListItem::new("Total Capture Length: ".to_owned() + &format_bytes(data.packet_events_total_caplen)),
|
||||
ListItem::new("Total Length........: ".to_owned() + &format_bytes(data.packet_events_total_len)),
|
||||
ListItem::new("Total L4 Length.....: ".to_owned() + &format_bytes(data.packet_events_total_l4_len)),
|
||||
];
|
||||
let daemon_items = vec![
|
||||
ListItem::new("Total Events.............: ".to_owned() + &data.daemon_events.to_string()),
|
||||
ListItem::new("Total Packets Captured...: ".to_owned() + &data.packets_captured.to_string()),
|
||||
ListItem::new("Total Packets Processed..: ".to_owned() + &data.packets_processed.to_string()),
|
||||
ListItem::new("Total Flows Skipped......: ".to_owned() + &data.flows_total_skipped.to_string()),
|
||||
ListItem::new("Total Flows Not-Detected.: ".to_owned() + &data.flows_total_not_detected.to_string()),
|
||||
ListItem::new("Total Compressions/Memory: ".to_owned() + &data.flows_total_compressions.to_string()
|
||||
+ " / " + &format_bytes(data.flows_total_compression_diff) + " deflate"),
|
||||
ListItem::new("Total Memory in Use......: ".to_owned() + &format_bytes(data.global_alloc_bytes - data.global_free_bytes)
|
||||
+ " (" + &format_bytes(data.flows_current_compression_diff) + " deflate)"),
|
||||
ListItem::new("Total Events Serialized..: ".to_owned() + &data.total_events_serialized.to_string()),
|
||||
ListItem::new("Current Flows Active.....: ".to_owned() + &data.flows_current_active.to_string()),
|
||||
];
|
||||
let table_rows: Vec<Row> = flows
|
||||
.into_iter()
|
||||
.map(|(key, (_exp, val))| {
|
||||
let first_seen_display = match val.first_seen.elapsed() {
|
||||
Ok(elapsed) => {
|
||||
match elapsed.as_secs() {
|
||||
t if t > (3_600 * 24) => format!("{} d ago", t / (3_600 * 24)),
|
||||
t if t > 3_600 => format!("{} h ago", t / 3_600),
|
||||
t if t > 60 => format!("{} min ago", t / 60),
|
||||
t if t > 0 => format!("{} s ago", t),
|
||||
t if t == 0 => "< 1 s ago".to_string(),
|
||||
t => format!("INVALID: {}", t),
|
||||
}
|
||||
}
|
||||
Err(err) => format!("ERROR: {}", err)
|
||||
};
|
||||
|
||||
let last_seen_display = match val.last_seen.elapsed() {
|
||||
Ok(elapsed) => {
|
||||
match elapsed.as_secs() {
|
||||
t if t > (3_600 * 24) => format!("{} d ago", t / (3_600 * 24)),
|
||||
t if t > 3_600 => format!("{} h ago", t / 3_600),
|
||||
t if t > 60 => format!("{} min ago", t / 60),
|
||||
t if t > 0 => format!("{} s ago", t),
|
||||
t if t == 0 => "< 1 s ago".to_string(),
|
||||
t => format!("INVALID: {}", t),
|
||||
}
|
||||
}
|
||||
Err(_err) => "ERROR".to_string()
|
||||
};
|
||||
|
||||
let timeout_display = match val.timeout_in.duration_since(SystemTime::now()) {
|
||||
Ok(elapsed) => {
|
||||
match elapsed.as_secs() {
|
||||
t if t > (3_600 * 24) => format!("in {} d", t / (3_600 * 24)),
|
||||
t if t > 3_600 => format!("in {} h", t / 3_600),
|
||||
t if t > 60 => format!("in {} min", t / 60),
|
||||
t if t > 0 => format!("in {} s", t),
|
||||
t if t == 0 => "in < 1 s".to_string(),
|
||||
t => format!("INVALID: {}", t),
|
||||
}
|
||||
}
|
||||
Err(_err) => "EXPIRED".to_string()
|
||||
};
|
||||
|
||||
Row::new(vec![
|
||||
key.id.to_string(),
|
||||
val.state.to_string(),
|
||||
first_seen_display,
|
||||
last_seen_display,
|
||||
timeout_display,
|
||||
(val.total_src_packets + val.total_dst_packets).to_string(),
|
||||
format_bytes(val.total_src_bytes + val.total_dst_bytes),
|
||||
val.risks.to_string(),
|
||||
val.proto.to_string(),
|
||||
val.app_proto.to_string(),
|
||||
])
|
||||
})
|
||||
.collect();
|
||||
|
||||
terminal.draw(|f| {
|
||||
let size = f.size();
|
||||
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints(
|
||||
[
|
||||
Constraint::Length(11),
|
||||
Constraint::Percentage(100),
|
||||
].as_ref()
|
||||
)
|
||||
.split(size);
|
||||
|
||||
let top_chunks = Layout::default()
|
||||
.direction(Direction::Horizontal)
|
||||
.constraints(
|
||||
[
|
||||
Constraint::Percentage(25),
|
||||
Constraint::Percentage(30),
|
||||
Constraint::Percentage(55),
|
||||
].as_ref()
|
||||
)
|
||||
.split(chunks[0]);
|
||||
|
||||
let table_selected_abs = match table_selected {
|
||||
_ if flows.len() == 0 => 0,
|
||||
i => i + 1,
|
||||
};
|
||||
let table = Table::new(table_rows)
|
||||
.header(Row::new(vec!["Flow ID", "State", "First Seen", "Last Seen", "Timeout", "Total Packets", "Total Bytes", "Risks", "L3/L4", "L7"])
|
||||
.style(Style::default().fg(Color::Yellow).add_modifier(Modifier::BOLD)))
|
||||
.block(Block::default().title("Flow Table (selected: ".to_string() +
|
||||
&table_selected_abs.to_string() +
|
||||
"): " +
|
||||
&flows.len().to_string() +
|
||||
" item(s)").borders(Borders::ALL))
|
||||
.highlight_style(Style::default().bg(Color::Blue))
|
||||
.widths(&[
|
||||
Constraint::Length(10),
|
||||
Constraint::Length(10),
|
||||
Constraint::Length(12),
|
||||
Constraint::Length(12),
|
||||
Constraint::Length(10),
|
||||
Constraint::Length(13),
|
||||
Constraint::Length(12),
|
||||
Constraint::Length(6),
|
||||
Constraint::Length(12),
|
||||
Constraint::Length(15),
|
||||
]);
|
||||
|
||||
let general_list = List::new(general_items)
|
||||
.block(Block::default().title("General").borders(Borders::ALL));
|
||||
let packet_list = List::new(packet_items)
|
||||
.block(Block::default().title("Packet Events").borders(Borders::ALL));
|
||||
let daemon_list = List::new(daemon_items)
|
||||
.block(Block::default().title("Daemon Events").borders(Borders::ALL));
|
||||
|
||||
table_state.select(Some(table_selected));
|
||||
f.render_widget(general_list, top_chunks[0]);
|
||||
f.render_widget(packet_list, top_chunks[1]);
|
||||
f.render_widget(daemon_list, top_chunks[2]);
|
||||
f.render_stateful_widget(table, chunks[1], table_state);
|
||||
}).unwrap();
|
||||
}
|
||||
28
examples/yaml-filebeat/filebeat.yml
Normal file
28
examples/yaml-filebeat/filebeat.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
filebeat.inputs:
|
||||
- type: unix
|
||||
id: "NDPId-logs" # replace this index to your preference
|
||||
max_message_size: 100MiB
|
||||
index: "index-name" # Replace this with your desired index name in Elasticsearch
|
||||
enabled: true
|
||||
path: "/var/run/nDPId.sock" # point nDPId to this Unix Socket (Collector)
|
||||
processors:
|
||||
- script: # execute javascript to remove the first 5-digit-number and also the Newline at the end
|
||||
lang: javascript
|
||||
id: trim
|
||||
source: >
|
||||
function process(event) {
|
||||
event.Put("message", event.Get("message").trim().slice(5));
|
||||
}
|
||||
- decode_json_fields: # Decode the Json output
|
||||
fields: ["message"]
|
||||
process_array: true
|
||||
max_depth: 10
|
||||
target: ""
|
||||
overwrite_keys: true
|
||||
add_error_key: false
|
||||
- drop_fields: # Deletes the Message field, which is the undecoded json (You may comment this out if you need the original message)
|
||||
fields: ["message"]
|
||||
- rename:
|
||||
fields:
|
||||
- from: "source" # Prevents a conflict in Elasticsearch and renames the field
|
||||
to: "Source_Interface"
|
||||
2
libnDPI
2
libnDPI
Submodule libnDPI updated: a944514dde...75db1a8a66
906
nDPId-test.c
906
nDPId-test.c
File diff suppressed because it is too large
Load Diff
767
nDPIsrvd.c
767
nDPIsrvd.c
File diff suppressed because it is too large
Load Diff
207
ncrypt.c
Normal file
207
ncrypt.c
Normal file
@@ -0,0 +1,207 @@
|
||||
#include "ncrypt.h"
|
||||
|
||||
#include <endian.h>
|
||||
#include <openssl/conf.h>
|
||||
#include <openssl/core_names.h>
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/pem.h>
|
||||
#include <openssl/ssl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int ncrypt_init(void)
|
||||
{
|
||||
SSL_load_error_strings();
|
||||
OpenSSL_add_all_algorithms();
|
||||
|
||||
return NCRYPT_SUCCESS;
|
||||
}
|
||||
|
||||
static int ncrypt_init_ctx(struct ncrypt_ctx * const ctx, SSL_METHOD const * const meth)
|
||||
{
|
||||
if (meth == NULL)
|
||||
{
|
||||
return NCRYPT_NULL_PTR;
|
||||
}
|
||||
if (ctx->ssl_ctx != NULL)
|
||||
{
|
||||
return NCRYPT_ALREADY_INITIALIZED;
|
||||
}
|
||||
|
||||
ctx->ssl_ctx = SSL_CTX_new(meth);
|
||||
if (ctx->ssl_ctx == NULL)
|
||||
{
|
||||
return NCRYPT_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
SSL_CTX_set_min_proto_version(ctx->ssl_ctx, TLS1_3_VERSION);
|
||||
SSL_CTX_set_max_proto_version(ctx->ssl_ctx, TLS1_3_VERSION);
|
||||
SSL_CTX_set_ciphersuites(ctx->ssl_ctx, "TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256");
|
||||
|
||||
return NCRYPT_SUCCESS;
|
||||
}
|
||||
|
||||
static int ncrypt_load_pems(struct ncrypt_ctx * const ctx,
|
||||
char const * const ca_path,
|
||||
char const * const privkey_pem_path,
|
||||
char const * const pubkey_pem_path)
|
||||
{
|
||||
if (SSL_CTX_use_certificate_file(ctx->ssl_ctx, pubkey_pem_path, SSL_FILETYPE_PEM) <= 0 ||
|
||||
SSL_CTX_use_PrivateKey_file(ctx->ssl_ctx, privkey_pem_path, SSL_FILETYPE_PEM) <= 0 ||
|
||||
SSL_CTX_load_verify_locations(ctx->ssl_ctx, ca_path, NULL) <= 0)
|
||||
{
|
||||
return NCRYPT_PEM_LOAD_FAILED;
|
||||
}
|
||||
|
||||
SSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
|
||||
SSL_CTX_set_verify_depth(ctx->ssl_ctx, 4);
|
||||
return NCRYPT_SUCCESS;
|
||||
}
|
||||
|
||||
int ncrypt_init_client(struct ncrypt_ctx * const ctx,
|
||||
char const * const ca_path,
|
||||
char const * const privkey_pem_path,
|
||||
char const * const pubkey_pem_path)
|
||||
{
|
||||
if (ca_path == NULL || privkey_pem_path == NULL || pubkey_pem_path == NULL)
|
||||
{
|
||||
return NCRYPT_NULL_PTR;
|
||||
}
|
||||
|
||||
int rv = ncrypt_init_ctx(ctx, TLS_client_method());
|
||||
|
||||
if (rv != NCRYPT_SUCCESS)
|
||||
{
|
||||
return rv;
|
||||
}
|
||||
|
||||
return ncrypt_load_pems(ctx, ca_path, privkey_pem_path, pubkey_pem_path);
|
||||
}
|
||||
|
||||
int ncrypt_init_server(struct ncrypt_ctx * const ctx,
|
||||
char const * const ca_path,
|
||||
char const * const privkey_pem_path,
|
||||
char const * const pubkey_pem_path)
|
||||
{
|
||||
if (ca_path == NULL || privkey_pem_path == NULL || pubkey_pem_path == NULL)
|
||||
{
|
||||
return NCRYPT_NULL_PTR;
|
||||
}
|
||||
|
||||
int rv = ncrypt_init_ctx(ctx, TLS_server_method());
|
||||
|
||||
if (rv != NCRYPT_SUCCESS)
|
||||
{
|
||||
return rv;
|
||||
}
|
||||
|
||||
return ncrypt_load_pems(ctx, ca_path, privkey_pem_path, pubkey_pem_path);
|
||||
}
|
||||
|
||||
int ncrypt_on_connect(struct ncrypt_ctx * const ctx, int connect_fd, struct ncrypt_entity * const ent)
|
||||
{
|
||||
if (ent->ssl == NULL)
|
||||
{
|
||||
ent->ssl = SSL_new(ctx->ssl_ctx);
|
||||
if (ent->ssl == NULL)
|
||||
{
|
||||
return NCRYPT_NOT_INITIALIZED;
|
||||
}
|
||||
SSL_set_fd(ent->ssl, connect_fd);
|
||||
SSL_set_connect_state(ent->ssl);
|
||||
}
|
||||
|
||||
int rv = SSL_do_handshake(ent->ssl);
|
||||
if (rv != 1)
|
||||
{
|
||||
return SSL_get_error(ent->ssl, rv);
|
||||
}
|
||||
|
||||
return NCRYPT_SUCCESS;
|
||||
}
|
||||
|
||||
int ncrypt_on_accept(struct ncrypt_ctx * const ctx, int accept_fd, struct ncrypt_entity * const ent)
|
||||
{
|
||||
if (ent->ssl == NULL)
|
||||
{
|
||||
ent->ssl = SSL_new(ctx->ssl_ctx);
|
||||
if (ent->ssl == NULL)
|
||||
{
|
||||
return NCRYPT_NOT_INITIALIZED;
|
||||
}
|
||||
SSL_set_fd(ent->ssl, accept_fd);
|
||||
SSL_set_accept_state(ent->ssl);
|
||||
}
|
||||
|
||||
int rv = SSL_accept(ent->ssl);
|
||||
if (rv != 1)
|
||||
{
|
||||
return SSL_get_error(ent->ssl, rv);
|
||||
}
|
||||
|
||||
return NCRYPT_SUCCESS;
|
||||
}
|
||||
|
||||
ssize_t ncrypt_read(struct ncrypt_entity * const ent, char * const json_msg, size_t json_msg_len)
|
||||
{
|
||||
if (ent->ssl == NULL)
|
||||
{
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int rv = SSL_read(ent->ssl, json_msg, json_msg_len);
|
||||
if (rv <= 0)
|
||||
{
|
||||
int err = SSL_get_error(ent->ssl, rv);
|
||||
if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ)
|
||||
{
|
||||
errno = EAGAIN;
|
||||
}
|
||||
else if (err != SSL_ERROR_SYSCALL)
|
||||
{
|
||||
errno = EPROTO;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
ssize_t ncrypt_write(struct ncrypt_entity * const ent, char const * const json_msg, size_t json_msg_len)
|
||||
{
|
||||
if (ent->ssl == NULL)
|
||||
{
|
||||
errno = EPROTO;
|
||||
return -1;
|
||||
}
|
||||
|
||||
int rv = SSL_write(ent->ssl, json_msg, json_msg_len);
|
||||
if (rv <= 0)
|
||||
{
|
||||
int err = SSL_get_error(ent->ssl, rv);
|
||||
if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ)
|
||||
{
|
||||
errno = EAGAIN;
|
||||
}
|
||||
else if (err != SSL_ERROR_SYSCALL)
|
||||
{
|
||||
errno = EPROTO;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
void ncrypt_free_entity(struct ncrypt_entity * const ent)
|
||||
{
|
||||
SSL_free(ent->ssl);
|
||||
ent->ssl = NULL;
|
||||
}
|
||||
|
||||
void ncrypt_free_ctx(struct ncrypt_ctx * const ctx)
|
||||
{
|
||||
SSL_CTX_free(ctx->ssl_ctx);
|
||||
ctx->ssl_ctx = NULL;
|
||||
EVP_cleanup();
|
||||
}
|
||||
73
ncrypt.h
Normal file
73
ncrypt.h
Normal file
@@ -0,0 +1,73 @@
|
||||
#ifndef NCRYPT_H
|
||||
#define NCRYPT_H 1
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#define ncrypt_ctx(x) \
|
||||
do \
|
||||
{ \
|
||||
(x)->ssl_ctx = NULL; \
|
||||
} while (0);
|
||||
#define ncrypt_entity(x) \
|
||||
do \
|
||||
{ \
|
||||
(x)->ssl = NULL; \
|
||||
(x)->handshake_done = 0; \
|
||||
} while (0);
|
||||
#define ncrypt_handshake_done(x) ((x)->handshake_done)
|
||||
#define ncrypt_set_handshake(x) \
|
||||
do \
|
||||
{ \
|
||||
(x)->handshake_done = 1; \
|
||||
} while (0)
|
||||
#define ncrypt_clear_handshake(x) \
|
||||
do \
|
||||
{ \
|
||||
(x)->handshake_done = 0; \
|
||||
} while (0)
|
||||
|
||||
enum
|
||||
{
|
||||
NCRYPT_SUCCESS = 0,
|
||||
NCRYPT_NOT_INITIALIZED = -1,
|
||||
NCRYPT_ALREADY_INITIALIZED = -2,
|
||||
NCRYPT_NULL_PTR = -3,
|
||||
NCRYPT_PEM_LOAD_FAILED = -4
|
||||
};
|
||||
|
||||
struct ncrypt_ctx
|
||||
{
|
||||
void * ssl_ctx;
|
||||
};
|
||||
|
||||
struct ncrypt_entity
|
||||
{
|
||||
void * ssl;
|
||||
int handshake_done;
|
||||
};
|
||||
|
||||
int ncrypt_init(void);
|
||||
|
||||
int ncrypt_init_client(struct ncrypt_ctx * const ctx,
|
||||
char const * const ca_path,
|
||||
char const * const privkey_pem_path,
|
||||
char const * const pubkey_pem_path);
|
||||
|
||||
int ncrypt_init_server(struct ncrypt_ctx * const ctx,
|
||||
char const * const ca_path,
|
||||
char const * const privkey_pem_path,
|
||||
char const * const pubkey_pem_path);
|
||||
|
||||
int ncrypt_on_connect(struct ncrypt_ctx * const ctx, int connect_fd, struct ncrypt_entity * const ent);
|
||||
|
||||
int ncrypt_on_accept(struct ncrypt_ctx * const ctx, int accept_fd, struct ncrypt_entity * const ent);
|
||||
|
||||
ssize_t ncrypt_read(struct ncrypt_entity * const ent, char * const json_msg, size_t json_msg_len);
|
||||
|
||||
ssize_t ncrypt_write(struct ncrypt_entity * const ent, char const * const json_msg, size_t json_msg_len);
|
||||
|
||||
void ncrypt_free_entity(struct ncrypt_entity * const ent);
|
||||
|
||||
void ncrypt_free_ctx(struct ncrypt_ctx * const ctx);
|
||||
|
||||
#endif
|
||||
102
ndpid.conf.example
Normal file
102
ndpid.conf.example
Normal file
@@ -0,0 +1,102 @@
|
||||
[general]
|
||||
# Set the network interface from which packets are captured and processed.
|
||||
# Leave it empty to let nDPId choose the default network interface.
|
||||
#netif = eth0
|
||||
|
||||
# Set a Berkeley Packet Filter.
|
||||
# This will work for libpcap as well as with PF_RING.
|
||||
#bpf = udp or tcp
|
||||
|
||||
# Decapsulate Layer4 tunnel protocols.
|
||||
# Supported protocols: GRE
|
||||
#decode-tunnel = true
|
||||
|
||||
#pidfile = /tmp/ndpid.pid
|
||||
#user = nobody
|
||||
#group = daemon
|
||||
#riskdomains = /path/to/libnDPI/example/risky_domains.txt
|
||||
#protocols = /path/to/libnDPI/example/protos.txt
|
||||
#categories = /path/to/libnDPI/example/categories.txt
|
||||
#ja4 = /path/to/libnDPI/example/ja4_fingerprints.csv
|
||||
#sha1 = /path/to/libnDPI/example/sha1_fingerprints.csv
|
||||
|
||||
# Collector endpoint as UNIX socket (usually nDPIsrvd)
|
||||
#collector = /run/nDPIsrvd/collector
|
||||
# Collector endpoint as UDP socket (usually a custom application)
|
||||
#collector = 127.0.0.1:7777
|
||||
|
||||
# Set a name for this nDPId instance
|
||||
#alias = myhostname
|
||||
|
||||
# Set an optional UUID for this instance
|
||||
# If the value starts with a '/' or '.', it is interpreted as a path
|
||||
# from which the uuid is read from.
|
||||
#uuid = 00000000-dead-c0de-0000-123456789abc
|
||||
#uuid = ./path/to/some/file
|
||||
#uuid = /proc/sys/kernel/random/uuid
|
||||
#uuid = /sys/class/dmi/id/product_uuid
|
||||
|
||||
# Process only internal initial connections (src->dst)
|
||||
#internal = true
|
||||
|
||||
# Process only external initial connections (dst->src)
|
||||
#external = true
|
||||
|
||||
# Enable zLib compression of flow memory for long lasting flows
|
||||
compression = true
|
||||
|
||||
# Enable "analyse" events, which can be used for machine learning.
|
||||
# The daemon will generate some statistical values for every single flow.
|
||||
# An "analyse" event is thrown after "max-packets-per-flow-to-analyse".
|
||||
# Please note that the daemon will require a lot more heap memory for every flow.
|
||||
#analysis = true
|
||||
|
||||
# Force poll() on systems that support epoll() as well
|
||||
#poll = false
|
||||
|
||||
# Enable PF_RING packet capture instead of libpcap
|
||||
#pfring = false
|
||||
|
||||
[tuning]
|
||||
max-flows-per-thread = 2048
|
||||
max-idle-flows-per-thread = 64
|
||||
max-reader-threads = 10
|
||||
daemon-status-interval = 600000000
|
||||
#memory-profiling-log-interval = 5
|
||||
compression-scan-interval = 20000000
|
||||
compression-flow-inactivity = 30000000
|
||||
flow-scan-interval = 10000000
|
||||
generic-max-idle-time = 600000000
|
||||
icmp-max-idle-time = 120000000
|
||||
tcp-max-idle-time = 180000000
|
||||
udp-max-idle-time = 7440000000
|
||||
tcp-max-post-end-flow-time = 120000000
|
||||
max-packets-per-flow-to-send = 15
|
||||
max-packets-per-flow-to-process = 32
|
||||
max-packets-per-flow-to-analyse = 32
|
||||
error-event-threshold-n = 16
|
||||
error-event-threshold-time = 10000000
|
||||
|
||||
# Please note that the following options are libnDPI related and can only be set via config file,
|
||||
# not as commnand line parameter.
|
||||
# See libnDPI/doc/configuration_parameters.md for detailed information.
|
||||
|
||||
[ndpi]
|
||||
packets_limit_per_flow = 32
|
||||
flow.direction_detection = enable
|
||||
flow.track_payload = disable
|
||||
tcp_ack_payload_heuristic = disable
|
||||
fully_encrypted_heuristic = enable
|
||||
libgcrypt.init = 1
|
||||
dpi.compute_entropy = 1
|
||||
fpc = disable
|
||||
dpi.guess_on_giveup = 0x03
|
||||
flow_risk_lists.load = 1
|
||||
# Currently broken (upstream)
|
||||
#flow_risk.crawler_bot.list.load = 1
|
||||
log.level = 0
|
||||
|
||||
[protos]
|
||||
tls.certificate_expiration_threshold = 7
|
||||
tls.application_blocks_tracking = enable
|
||||
stun.max_packets_extra_dissection = 8
|
||||
31
ndpisrvd.conf.example
Normal file
31
ndpisrvd.conf.example
Normal file
@@ -0,0 +1,31 @@
|
||||
[general]
|
||||
#pidfile = /tmp/ndpisrvd.pid
|
||||
#user = nobody
|
||||
#group = nogroup
|
||||
|
||||
# Collector listener as UNIX socket
|
||||
#collector = /run/nDPIsrvd/collector
|
||||
|
||||
# Distributor listener as UNIX socket
|
||||
#distributor-unix = /run/nDPIsrvd/distributor
|
||||
|
||||
# Distributor listener as IP socket
|
||||
#distributor-in = 127.0.0.1:7000
|
||||
|
||||
# Change group of the collector socket
|
||||
#collector-group = daemon
|
||||
|
||||
# Change group of the distirbutor socket
|
||||
#distirbutor-group = staff
|
||||
|
||||
# Max (distributor) clients allowed to connect to nDPIsrvd
|
||||
max-remote-descriptors = 128
|
||||
|
||||
# Additional output buffers useful if a distributor sink speed unstable
|
||||
max-write-buffers = 1024
|
||||
|
||||
# Fallback to blocking I/O if output buffers full
|
||||
blocking-io-fallback = true
|
||||
|
||||
# Force poll() on systems that support epoll() as well
|
||||
#poll = false
|
||||
417
nio.c
Normal file
417
nio.c
Normal file
@@ -0,0 +1,417 @@
|
||||
#include "nio.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#ifdef ENABLE_EPOLL
|
||||
#include <sys/epoll.h>
|
||||
#endif
|
||||
#include <unistd.h>
|
||||
|
||||
void nio_init(struct nio * io)
|
||||
{
|
||||
io->nready = -1;
|
||||
io->poll_max_fds = 0;
|
||||
io->poll_fds = NULL;
|
||||
io->poll_ptrs = NULL;
|
||||
io->poll_fds_set = NULL;
|
||||
io->epoll_fd = -1;
|
||||
io->max_events = 0;
|
||||
io->events = NULL;
|
||||
}
|
||||
|
||||
int nio_use_poll(struct nio * io, nfds_t max_fds)
|
||||
{
|
||||
if (io->epoll_fd != -1 || io->poll_max_fds != 0 || max_fds <= 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
io->poll_max_fds = max_fds;
|
||||
io->poll_fds = (struct pollfd *)calloc(max_fds, sizeof(*io->poll_fds));
|
||||
io->poll_ptrs = calloc(max_fds, sizeof(*io->poll_ptrs));
|
||||
io->poll_fds_set = calloc(max_fds, sizeof(*io->poll_fds_set));
|
||||
|
||||
for (size_t i = 0; i < max_fds; ++i)
|
||||
{
|
||||
io->poll_fds[i].fd = -1;
|
||||
}
|
||||
|
||||
return io->poll_fds == NULL || io->poll_ptrs == NULL || io->poll_fds_set == NULL; // return NIO_ERROR_INTERNAL on
|
||||
// error
|
||||
}
|
||||
|
||||
int nio_use_epoll(struct nio * io, int max_events)
|
||||
{
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd != -1 || io->poll_max_fds != 0 || max_events == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
io->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
|
||||
io->max_events = max_events;
|
||||
io->events = calloc(max_events, sizeof(struct epoll_event));
|
||||
|
||||
return io->events == NULL || io->epoll_fd < 0; // return NIO_ERROR_INTERNAL on error
|
||||
#else
|
||||
(void)io;
|
||||
(void)max_events;
|
||||
|
||||
return NIO_ERROR_INTERNAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
int nio_add_fd(struct nio * io, int fd, int event_flags, void * ptr)
|
||||
{
|
||||
if (fd < 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
int rv;
|
||||
struct epoll_event event = {};
|
||||
|
||||
if (ptr == NULL)
|
||||
{
|
||||
event.data.fd = fd;
|
||||
}
|
||||
else
|
||||
{
|
||||
event.data.ptr = ptr;
|
||||
}
|
||||
|
||||
if ((event_flags & NIO_EVENT_INPUT) != 0)
|
||||
event.events |= EPOLLIN;
|
||||
if ((event_flags & NIO_EVENT_OUTPUT) != 0)
|
||||
event.events |= EPOLLOUT;
|
||||
if (event.events == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
while ((rv = epoll_ctl(io->epoll_fd, EPOLL_CTL_ADD, fd, &event)) != 0 && errno == EINTR)
|
||||
{
|
||||
/* If epoll_ctl() was interrupted by the system, repeat. */
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (io->poll_max_fds > 0)
|
||||
{
|
||||
struct pollfd * unused_pollfd = NULL;
|
||||
void ** unused_ptr = NULL;
|
||||
|
||||
for (size_t i = 0; i < io->poll_max_fds; ++i)
|
||||
{
|
||||
if (io->poll_fds[i].fd < 0)
|
||||
{
|
||||
unused_pollfd = &io->poll_fds[i];
|
||||
unused_ptr = &io->poll_ptrs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (unused_pollfd == NULL)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
unused_pollfd->events = 0;
|
||||
if ((event_flags & NIO_EVENT_INPUT) != 0)
|
||||
unused_pollfd->events |= POLLIN;
|
||||
if ((event_flags & NIO_EVENT_OUTPUT) != 0)
|
||||
unused_pollfd->events |= POLLOUT;
|
||||
if (unused_pollfd->events == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
unused_pollfd->fd = fd;
|
||||
*unused_ptr = ptr;
|
||||
|
||||
return NIO_SUCCESS;
|
||||
}
|
||||
|
||||
return NIO_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
int nio_mod_fd(struct nio * io, int fd, int event_flags, void * ptr)
|
||||
{
|
||||
if (fd < 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
int rv;
|
||||
struct epoll_event event = {};
|
||||
|
||||
if (ptr == NULL)
|
||||
{
|
||||
event.data.fd = fd;
|
||||
}
|
||||
else
|
||||
{
|
||||
event.data.ptr = ptr;
|
||||
}
|
||||
|
||||
if ((event_flags & NIO_EVENT_INPUT) != 0)
|
||||
event.events |= EPOLLIN;
|
||||
if ((event_flags & NIO_EVENT_OUTPUT) != 0)
|
||||
event.events |= EPOLLOUT;
|
||||
if (event.events == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
while ((rv = epoll_ctl(io->epoll_fd, EPOLL_CTL_MOD, fd, &event)) != 0 && errno == EINTR)
|
||||
{
|
||||
/* If epoll_ctl() was interrupted by the system, repeat. */
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (io->poll_max_fds > 0)
|
||||
{
|
||||
struct pollfd * used_pollfd = NULL;
|
||||
void ** used_ptr = NULL;
|
||||
|
||||
for (size_t i = 0; i < io->poll_max_fds; ++i)
|
||||
{
|
||||
if (io->poll_fds[i].fd == fd)
|
||||
{
|
||||
used_pollfd = &io->poll_fds[i];
|
||||
used_ptr = &io->poll_ptrs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (used_pollfd == NULL)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
used_pollfd->events = 0;
|
||||
if ((event_flags & NIO_EVENT_INPUT) != 0)
|
||||
used_pollfd->events |= POLLIN;
|
||||
if ((event_flags & NIO_EVENT_OUTPUT) != 0)
|
||||
used_pollfd->events |= POLLOUT;
|
||||
if (used_pollfd->events == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
used_pollfd->fd = fd;
|
||||
*used_ptr = ptr;
|
||||
|
||||
return NIO_SUCCESS;
|
||||
}
|
||||
|
||||
return NIO_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
int nio_del_fd(struct nio * io, int fd)
|
||||
{
|
||||
if (fd < 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
int rv;
|
||||
|
||||
while ((rv = epoll_ctl(io->epoll_fd, EPOLL_CTL_DEL, fd, NULL)) != 0 && errno == EINTR)
|
||||
{
|
||||
/* If epoll_ctl() was interrupted by the system, repeat. */
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (io->poll_max_fds > 0)
|
||||
{
|
||||
struct pollfd * used_pollfd = NULL;
|
||||
void ** used_ptr = NULL;
|
||||
|
||||
for (size_t i = 0; i < io->poll_max_fds; ++i)
|
||||
{
|
||||
if (io->poll_fds[i].fd == fd)
|
||||
{
|
||||
used_pollfd = &io->poll_fds[i];
|
||||
used_ptr = &io->poll_ptrs[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (used_pollfd == NULL)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
used_pollfd->fd = -1;
|
||||
*used_ptr = NULL;
|
||||
|
||||
return NIO_SUCCESS;
|
||||
}
|
||||
|
||||
return NIO_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
int nio_run(struct nio * io, int timeout)
|
||||
{
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
do
|
||||
{
|
||||
io->nready = epoll_wait(io->epoll_fd, io->events, io->max_events, timeout);
|
||||
} while (io->nready < 0 && errno == EINTR);
|
||||
|
||||
if (io->nready < 0)
|
||||
return NIO_ERROR_SYSTEM;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (io->poll_max_fds > 0)
|
||||
{
|
||||
do
|
||||
{
|
||||
io->nready = poll(io->poll_fds, io->poll_max_fds, timeout);
|
||||
} while (io->nready < 0 && errno == EINTR);
|
||||
|
||||
if (io->nready < 0)
|
||||
return NIO_ERROR_SYSTEM;
|
||||
|
||||
if (io->nready > 0)
|
||||
{
|
||||
for (nfds_t i = 0, j = 0; i < io->poll_max_fds; ++i)
|
||||
{
|
||||
if (io->poll_fds[i].fd >= 0 && io->poll_fds[i].revents != 0)
|
||||
{
|
||||
io->poll_fds_set[j++] = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NIO_SUCCESS;
|
||||
}
|
||||
|
||||
int nio_check(struct nio * io, int index, int event_flags)
|
||||
{
|
||||
if (nio_is_valid(io, index) != NIO_SUCCESS)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
uint32_t epoll_events = 0;
|
||||
|
||||
if ((event_flags & NIO_EVENT_INPUT) != 0)
|
||||
epoll_events |= EPOLLIN;
|
||||
if ((event_flags & NIO_EVENT_OUTPUT) != 0)
|
||||
epoll_events |= EPOLLOUT;
|
||||
if ((event_flags & NIO_EVENT_ERROR) != 0)
|
||||
epoll_events |= EPOLLERR | EPOLLHUP;
|
||||
if (epoll_events == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
struct epoll_event const * const events = (struct epoll_event *)io->events;
|
||||
if ((events[index].events & epoll_events) == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
return NIO_SUCCESS;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (io->poll_max_fds > 0)
|
||||
{
|
||||
short int poll_events = 0;
|
||||
|
||||
if ((event_flags & NIO_EVENT_INPUT) != 0)
|
||||
poll_events |= POLLIN;
|
||||
if ((event_flags & NIO_EVENT_OUTPUT) != 0)
|
||||
poll_events |= POLLOUT;
|
||||
if ((event_flags & NIO_EVENT_ERROR) != 0)
|
||||
poll_events |= POLLERR | POLLHUP;
|
||||
if (poll_events == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
if ((io->poll_fds[io->poll_fds_set[index]].revents & poll_events) == 0)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
return NIO_SUCCESS;
|
||||
}
|
||||
|
||||
return NIO_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
int nio_is_valid(struct nio const * const io, int index)
|
||||
{
|
||||
if (index < 0 || index >= io->nready)
|
||||
return NIO_ERROR_INTERNAL;
|
||||
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
return NIO_SUCCESS;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (io->poll_max_fds > 0 && io->poll_fds[io->poll_fds_set[index]].fd >= 0)
|
||||
{
|
||||
return NIO_SUCCESS;
|
||||
}
|
||||
|
||||
return NIO_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
int nio_get_fd(struct nio * io, int index)
|
||||
{
|
||||
if (nio_is_valid(io, index) != NIO_SUCCESS)
|
||||
return -1;
|
||||
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
struct epoll_event const * const events = (struct epoll_event *)io->events;
|
||||
|
||||
return events[index].data.fd;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (io->poll_max_fds > 0)
|
||||
{
|
||||
return io->poll_fds[io->poll_fds_set[index]].fd;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void * nio_get_ptr(struct nio * io, int index)
|
||||
{
|
||||
if (nio_is_valid(io, index) != NIO_SUCCESS)
|
||||
return NULL;
|
||||
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
struct epoll_event * const events = (struct epoll_event *)io->events;
|
||||
|
||||
return events[index].data.ptr;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (io->poll_max_fds > 0)
|
||||
{
|
||||
return io->poll_ptrs[io->poll_fds_set[index]];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void nio_free(struct nio * io)
|
||||
{
|
||||
for (size_t i = 0; i < io->poll_max_fds; ++i)
|
||||
{
|
||||
if (io->poll_fds[i].fd >= 0)
|
||||
{
|
||||
close(io->poll_fds[i].fd);
|
||||
io->poll_fds[i].fd = -1;
|
||||
}
|
||||
}
|
||||
#ifdef ENABLE_EPOLL
|
||||
if (io->epoll_fd >= 0)
|
||||
{
|
||||
close(io->epoll_fd);
|
||||
io->epoll_fd = -1;
|
||||
}
|
||||
#endif
|
||||
free(io->poll_fds);
|
||||
free(io->poll_ptrs);
|
||||
free(io->poll_fds_set);
|
||||
free(io->events);
|
||||
}
|
||||
95
nio.h
Normal file
95
nio.h
Normal file
@@ -0,0 +1,95 @@
|
||||
#ifndef NIO_H
|
||||
#define NIO_H 1
|
||||
|
||||
#include <poll.h>
|
||||
|
||||
#define WARN_UNUSED __attribute__((__warn_unused_result__))
|
||||
|
||||
enum
|
||||
{
|
||||
NIO_SUCCESS = 0,
|
||||
NIO_ERROR_INTERNAL = 1,
|
||||
NIO_ERROR_SYSTEM = -1
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
NIO_EVENT_INVALID = 0,
|
||||
NIO_EVENT_INPUT = 1,
|
||||
NIO_EVENT_OUTPUT = 2,
|
||||
NIO_EVENT_ERROR = 4,
|
||||
};
|
||||
|
||||
struct nio
|
||||
{
|
||||
int nready;
|
||||
|
||||
nfds_t poll_max_fds;
|
||||
struct pollfd * poll_fds;
|
||||
void ** poll_ptrs;
|
||||
nfds_t * poll_fds_set;
|
||||
|
||||
int epoll_fd;
|
||||
int max_events;
|
||||
void * events;
|
||||
};
|
||||
|
||||
void nio_init(struct nio * io);
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_use_poll(struct nio * io, nfds_t max_fds);
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_use_epoll(struct nio * io, int max_events);
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_add_fd(struct nio * io, int fd, int event_flags, void * ptr);
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_mod_fd(struct nio * io, int fd, int event_flags, void * ptr);
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_del_fd(struct nio * io, int fd);
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_run(struct nio * io, int timeout);
|
||||
|
||||
WARN_UNUSED
|
||||
static inline int nio_get_nready(struct nio const * const io)
|
||||
{
|
||||
return io->nready;
|
||||
}
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_check(struct nio * io, int index, int events);
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_is_valid(struct nio const * const io, int index);
|
||||
|
||||
WARN_UNUSED
|
||||
int nio_get_fd(struct nio * io, int index);
|
||||
|
||||
WARN_UNUSED
|
||||
void * nio_get_ptr(struct nio * io, int index);
|
||||
|
||||
WARN_UNUSED
|
||||
static inline int nio_has_input(struct nio * io, int index)
|
||||
{
|
||||
return nio_check(io, index, NIO_EVENT_INPUT);
|
||||
}
|
||||
|
||||
WARN_UNUSED
|
||||
static inline int nio_can_output(struct nio * io, int index)
|
||||
{
|
||||
return nio_check(io, index, NIO_EVENT_OUTPUT);
|
||||
}
|
||||
|
||||
WARN_UNUSED
|
||||
static inline int nio_has_error(struct nio * io, int index)
|
||||
{
|
||||
return nio_check(io, index, NIO_EVENT_ERROR);
|
||||
}
|
||||
|
||||
void nio_free(struct nio * io);
|
||||
|
||||
#endif
|
||||
149
npfring.c
Normal file
149
npfring.c
Normal file
@@ -0,0 +1,149 @@
|
||||
#include <pfring.h>
|
||||
#include <sched.h>
|
||||
|
||||
#include "npfring.h"
|
||||
#include "utils.h"
|
||||
|
||||
void npfring_print_version(FILE * const out)
|
||||
{
|
||||
uint32_t pfring_version;
|
||||
|
||||
pfring_version_noring(&pfring_version);
|
||||
fprintf(out,
|
||||
"PF_RING version: %d.%d.%d\n",
|
||||
(pfring_version & 0xFFFF0000) >> 16,
|
||||
(pfring_version & 0x0000FF00) >> 8,
|
||||
(pfring_version & 0x000000FF));
|
||||
}
|
||||
|
||||
int npfring_init(char const * device_name, uint32_t caplen, struct npfring * result)
|
||||
{
|
||||
pfring * pd = pfring_open(device_name, caplen, PF_RING_REENTRANT | PF_RING_PROMISC);
|
||||
|
||||
if (pd == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
pfring_set_application_name(pd, "nDPId");
|
||||
logger_early(0, "PF_RING RX channels: %d", pfring_get_num_rx_channels(pd));
|
||||
result->pfring_desc = pd;
|
||||
|
||||
int rc;
|
||||
if ((rc = pfring_set_socket_mode(pd, recv_only_mode)) != 0)
|
||||
{
|
||||
logger_early(1, "pfring_set_sock_moode returned: %d", rc);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void npfring_close(struct npfring * npf)
|
||||
{
|
||||
if (npf->pfring_desc != NULL)
|
||||
{
|
||||
pfring_close(npf->pfring_desc);
|
||||
npf->pfring_desc = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int npfring_set_bpf(struct npfring * npf, char const * bpf_filter)
|
||||
{
|
||||
char buf[BUFSIZ];
|
||||
|
||||
if (npf->pfring_desc == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
// pfring_set_bpf_filter expects a char*
|
||||
snprintf(buf, sizeof(buf), "%s", bpf_filter);
|
||||
return pfring_set_bpf_filter(npf->pfring_desc, buf);
|
||||
}
|
||||
|
||||
int npfring_datalink(struct npfring * npf)
|
||||
{
|
||||
if (npf->pfring_desc != NULL)
|
||||
{
|
||||
return pfring_get_link_type(npf->pfring_desc);
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
int npfring_enable(struct npfring * npf)
|
||||
{
|
||||
if (npf->pfring_desc == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pfring_enable_ring(npf->pfring_desc);
|
||||
}
|
||||
|
||||
int npfring_get_selectable_fd(struct npfring * npf)
|
||||
{
|
||||
if (npf->pfring_desc == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return pfring_get_selectable_fd(npf->pfring_desc);
|
||||
}
|
||||
|
||||
int npfring_recv(struct npfring * npf, struct pcap_pkthdr * pcap_hdr)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (npf->pfring_desc == NULL || pcap_hdr == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
unsigned char * buf = &npf->pfring_buffer[0];
|
||||
struct pfring_pkthdr pfring_pkthdr;
|
||||
rc = pfring_recv(npf->pfring_desc, &buf, PFRING_BUFFER_SIZE, &pfring_pkthdr, 0);
|
||||
if (rc > 0)
|
||||
{
|
||||
pcap_hdr->ts = pfring_pkthdr.ts;
|
||||
pcap_hdr->caplen = pfring_pkthdr.caplen;
|
||||
pcap_hdr->len = pfring_pkthdr.len;
|
||||
}
|
||||
else
|
||||
{
|
||||
pcap_hdr->ts.tv_sec = 0;
|
||||
pcap_hdr->ts.tv_usec = 0;
|
||||
pcap_hdr->caplen = 0;
|
||||
pcap_hdr->len = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int npfring_stats(struct npfring * npf, struct npfring_stats * stats)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (npf->pfring_desc == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
pfring_stat pstats;
|
||||
rc = pfring_stats(npf->pfring_desc, &pstats);
|
||||
if (rc == 0)
|
||||
{
|
||||
stats->recv = pstats.recv;
|
||||
stats->drop = pstats.drop;
|
||||
stats->shunt = pstats.shunt;
|
||||
}
|
||||
else
|
||||
{
|
||||
stats->drop = 0;
|
||||
stats->recv = 0;
|
||||
stats->shunt = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
41
npfring.h
Normal file
41
npfring.h
Normal file
@@ -0,0 +1,41 @@
|
||||
#ifndef PFRING_H
|
||||
#define PFRING_H 1
|
||||
|
||||
#include <pcap/pcap.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "config.h"
|
||||
|
||||
struct npfring
|
||||
{
|
||||
void * pfring_desc;
|
||||
uint8_t pfring_buffer[PFRING_BUFFER_SIZE];
|
||||
};
|
||||
|
||||
struct npfring_stats
|
||||
{
|
||||
uint64_t recv;
|
||||
uint64_t drop;
|
||||
uint64_t shunt;
|
||||
};
|
||||
|
||||
void npfring_print_version(FILE * const out);
|
||||
|
||||
int npfring_init(char const * device_name, uint32_t caplen, struct npfring * result);
|
||||
|
||||
void npfring_close(struct npfring * npf);
|
||||
|
||||
int npfring_set_bpf(struct npfring * npf, char const * bpf_filter);
|
||||
|
||||
int npfring_datalink(struct npfring * npf);
|
||||
|
||||
int npfring_enable(struct npfring * npf);
|
||||
|
||||
int npfring_get_selectable_fd(struct npfring * npf);
|
||||
|
||||
int npfring_recv(struct npfring * npf, struct pcap_pkthdr * pf_hdr);
|
||||
|
||||
int npfring_stats(struct npfring * npf, struct npfring_stats * stats);
|
||||
|
||||
#endif
|
||||
@@ -1,12 +1,14 @@
|
||||
# Maintainer: Toni Uhlig <toni@impl.cc>
|
||||
|
||||
pkgname=nDPId-testing
|
||||
pkgname=ndpid-testing
|
||||
pkgver=1.0
|
||||
pkgrel=1
|
||||
pkgdesc="Tiny nDPI based deep packet inspection daemons / toolkit."
|
||||
arch=('i686' 'x86_64')
|
||||
url="https://github.com/utoni/nDPId"
|
||||
license=('GPL3')
|
||||
depends=('libpcap')
|
||||
makedepends=('cmake' 'make' 'gcc' 'wget' 'unzip')
|
||||
options=()
|
||||
|
||||
build() {
|
||||
|
||||
11
packages/debian/postrm
Executable file
11
packages/debian/postrm
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" = "remove" -o "$1" = "purge" ]; then
|
||||
rm -rf /run/nDPId /run/nDPIsrvd
|
||||
|
||||
if [ "$1" = "purge" ]; then
|
||||
deluser ndpid || true
|
||||
deluser ndpisrvd || true
|
||||
delgroup ndpisrvd-distributor || true
|
||||
fi
|
||||
fi
|
||||
17
packages/debian/preinst
Executable file
17
packages/debian/preinst
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/sh
|
||||
|
||||
addgroup --system ndpisrvd-distributor
|
||||
adduser --system --no-create-home --shell=/bin/false --group ndpisrvd
|
||||
adduser --system --no-create-home --shell=/bin/false --group ndpid
|
||||
|
||||
cat <<EOF
|
||||
****************************************************************************
|
||||
* The user whom may want to access DPI data needs access to: *
|
||||
* /run/nDPIsrvd/distributor *
|
||||
* *
|
||||
* To make it accessible to [USER], type: *
|
||||
* sudo usermod --append --groups ndpisrvd-distributor [USER] *
|
||||
* *
|
||||
* Please note that you might need to re-login to make changes take effect. *
|
||||
****************************************************************************
|
||||
EOF
|
||||
5
packages/debian/prerm
Executable file
5
packages/debian/prerm
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" = "remove" -o "$1" = "purge" ]; then
|
||||
systemctl stop ndpisrvd.service
|
||||
fi
|
||||
@@ -1,17 +1,32 @@
|
||||
From dd4b50f34008f636e31e0a0c6c05df6791767231 Mon Sep 17 00:00:00 2001
|
||||
From: Toni Uhlig <matzeton@googlemail.com>
|
||||
Date: Mon, 9 Dec 2024 16:26:09 +0100
|
||||
Subject: [PATCH] Allow in-source builds required for OpenWrt toolchain.
|
||||
|
||||
Signed-off-by: Toni Uhlig <matzeton@googlemail.com>
|
||||
---
|
||||
CMakeLists.txt | 8 --------
|
||||
1 file changed, 8 deletions(-)
|
||||
|
||||
diff --git a/CMakeLists.txt b/CMakeLists.txt
|
||||
index 9045237..83f72b3 100644
|
||||
index 14a0ec829..5d7d45073 100644
|
||||
--- a/CMakeLists.txt
|
||||
+++ b/CMakeLists.txt
|
||||
@@ -1,12 +1,5 @@
|
||||
cmake_minimum_required(VERSION 3.12.4)
|
||||
project(nDPId C)
|
||||
@@ -10,14 +10,6 @@ if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
endif(CMAKE_COMPILER_IS_GNUCXX)
|
||||
set(CMAKE_C_STANDARD 11)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -D_DEFAULT_SOURCE=1 -D_GNU_SOURCE=1")
|
||||
-if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
|
||||
- message(FATAL_ERROR "In-source builds are not allowed.\n"
|
||||
- "Please remove ${PROJECT_SOURCE_DIR}/CMakeCache.txt\n"
|
||||
- "and\n"
|
||||
- "${PROJECT_SOURCE_DIR}/CMakeFiles\n"
|
||||
- "Create a build directory somewhere and run CMake again.")
|
||||
- "Create a build directory somewhere and run CMake again.\n"
|
||||
- "Or run: 'cmake -S ${PROJECT_SOURCE_DIR} -B ./your-custom-build-dir [CMAKE-OPTIONS]'")
|
||||
-endif()
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake)
|
||||
find_package(PkgConfig REQUIRED)
|
||||
|
||||
--
|
||||
2.39.5
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=nDPId-testing
|
||||
PKG_VERSION:=1.0
|
||||
PKG_RELEASE:=$(AUTORELEASE)
|
||||
PKG_RELEASE:=1
|
||||
|
||||
ifneq ($(wildcard /artifacts),)
|
||||
PKG_DIRECTORY:=/artifacts
|
||||
@@ -18,6 +18,7 @@ PKG_LICENSE_FILES:=COPYING
|
||||
|
||||
CMAKE_INSTALL:=1
|
||||
|
||||
include $(INCLUDE_DIR)/kernel.mk
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
include $(INCLUDE_DIR)/cmake.mk
|
||||
|
||||
@@ -25,13 +26,13 @@ define Package/nDPId-testing
|
||||
TITLE:=nDPId is a tiny nDPI based daemons / toolkit (nDPId source repository)
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
DEPENDS:=@!SMALL_FLASH @!LOW_MEMORY_FOOTPRINT +libpcap +zlib +LIBNDPI_GCRYPT:libgcrypt
|
||||
DEPENDS:=@!SMALL_FLASH @!LOW_MEMORY_FOOTPRINT +libpcap +zlib +LIBNDPI_GCRYPT:libgcrypt +NDPID_TESTING_INFLUXDB:libcurl +NDPID_TESTING_PFRING:libpfring
|
||||
URL:=http://github.com/lnslbrty/nDPId
|
||||
endef
|
||||
|
||||
define Package/nDPId-testing/description
|
||||
nDPId is a set of daemons and tools to capture, process and classify network flows.
|
||||
It's only dependencies (besides a half-way modern c library and POSIX threads) are libnDPI (>= 3.6.0 or current github dev branch) and libpcap.
|
||||
It's only dependencies (besides a half-way modern c library and POSIX threads) are libnDPI and libpcap.
|
||||
endef
|
||||
|
||||
define Package/nDPId-testing/config
|
||||
@@ -42,6 +43,30 @@ config NDPID_TESTING_COLLECTD_SUPPORT
|
||||
help
|
||||
This option enables collectd to gather nDPId statistics via plugin-exec.
|
||||
Disabled by default.
|
||||
|
||||
config NDPID_TESTING_LIBNDPI_COMMIT_HASH
|
||||
string "libnDPI commit hash"
|
||||
depends on PACKAGE_nDPId-testing
|
||||
default ""
|
||||
help
|
||||
Set the desired libnDPI git commit hash you want to link nDPId against.
|
||||
Leave empty to use the dev branch.
|
||||
Disabled by default.
|
||||
|
||||
config NDPID_TESTING_INFLUXDB
|
||||
bool "nDPIsrvd-influxdb"
|
||||
depends on PACKAGE_nDPId-testing
|
||||
default n
|
||||
help
|
||||
An InfluxDB push daemon. It aggregates various statistics gathered from nDPId.
|
||||
The results are sent to a specified InfluxDB endpoint.
|
||||
|
||||
config NDPID_TESTING_PFRING
|
||||
bool "PF_RING support"
|
||||
depends on PACKAGE_nDPId-testing
|
||||
default n
|
||||
help
|
||||
Enable PF_RING support for faster packet capture.
|
||||
endef
|
||||
|
||||
CMAKE_OPTIONS += -DBUILD_EXAMPLES=ON
|
||||
@@ -56,34 +81,45 @@ CMAKE_OPTIONS += -DSTATIC_LIBNDPI_INSTALLDIR="$(PKG_BUILD_DIR)/libnDPI/install"
|
||||
TARGET_CFLAGS += -DLIBNDPI_STATIC=1
|
||||
TARGET_CFLAGS += -Werror
|
||||
|
||||
ifneq ($(CONFIG_NDPID_TESTING_PFRING),)
|
||||
# FIXME: PFRING kernel include directory is hardcoded (not installed to linux header directory).
|
||||
CMAKE_OPTIONS += -DENABLE_PFRING=ON \
|
||||
-DPFRING_KERNEL_INC="$(KERNEL_BUILD_DIR)/PF_RING-8.4.0/kernel" \
|
||||
-DPFRING_INSTALLDIR="$(STAGING_DIR)/usr" \
|
||||
-DPFRING_LINK_STATIC=OFF
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_NDPID_TESTING_INFLUXDB),)
|
||||
CMAKE_OPTIONS += -DENABLE_CURL=ON
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_LIBNDPI_GCRYPT),)
|
||||
CMAKE_OPTIONS += -DNDPI_WIDTH_GCRYPT=ON
|
||||
endif
|
||||
|
||||
ifdef NDPID_TESTING_COLLECTD_SUPPORT
|
||||
ifdef CONFIG_NDPID_TESTING_COLLECTD_SUPPORT
|
||||
define Package/nDPId-testing/install-collectd-files
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/nDPIsrvd-collectd $(1)/usr/bin/nDPIsrvd-testing-collectd
|
||||
endef
|
||||
endif
|
||||
|
||||
define Build/Prepare
|
||||
@echo 'tar: $(DL_DIR)/$(PKG_SOURCE)'
|
||||
@echo 'PKG_DIRECTORY=$(PKG_DIRECTORY)'
|
||||
@echo 'PKG_SOURCE_SUBDIR=$(PKG_SOURCE_SUBDIR)'
|
||||
tar \
|
||||
--exclude-tag-all='gcov.css' \
|
||||
--exclude-tag-all='cmake_install.cmake' \
|
||||
--exclude='libnDPI-*.tar' \
|
||||
--exclude='nDPId-*.tar.bz2' \
|
||||
--exclude='libnDPI' \
|
||||
--exclude='test' \
|
||||
--exclude-vcs \
|
||||
--exclude-vcs-ignores \
|
||||
--transform 's,^,$(PKG_SOURCE_SUBDIR)/,' \
|
||||
-czf "$(DL_DIR)/$(PKG_SOURCE)" -C "$(PKG_DIRECTORY)" .
|
||||
tar -tzf "$(DL_DIR)/$(PKG_SOURCE)"
|
||||
$(PKG_UNPACK)
|
||||
cd '$(PKG_BUILD_DIR)' && patch -p1 < $(PKG_BUILD_DIR)/packages/openwrt/net/nDPId-testing/001-enable-in-source-build.patch
|
||||
ifdef CONFIG_NDPID_TESTING_LIBNDPI_COMMIT_HASH
|
||||
define Package/nDPId-testing/get-and-build-libndpi
|
||||
@echo 'Using commit hash: $(CONFIG_NDPID_TESTING_LIBNDPI_COMMIT_HASH)'
|
||||
env \
|
||||
CC="$(TARGET_CC)" \
|
||||
AR="$(TARGET_AR)" \
|
||||
RANLIB="$(TARGET_RANLIB)" \
|
||||
CFLAGS="$(TARGET_CFLAGS)" \
|
||||
CPPFLAGS="$(TARGET_CPPFLAGS)" \
|
||||
LDFLAGS="$(TARGET_LDFLAGS)" \
|
||||
MAKE_PROGRAM="$(MAKE)" \
|
||||
NDPI_COMMIT_HASH=$(CONFIG_NDPID_TESTING_LIBNDPI_COMMIT_HASH) \
|
||||
$(PKG_BUILD_DIR)/scripts/get-and-build-libndpi.sh
|
||||
endef
|
||||
else
|
||||
define Package/nDPId-testing/get-and-build-libndpi
|
||||
@echo 'Using dev branch.'
|
||||
env \
|
||||
CC="$(TARGET_CC)" \
|
||||
AR="$(TARGET_AR)" \
|
||||
@@ -94,6 +130,21 @@ define Build/Prepare
|
||||
MAKE_PROGRAM="$(MAKE)" \
|
||||
$(PKG_BUILD_DIR)/scripts/get-and-build-libndpi.sh
|
||||
endef
|
||||
endif
|
||||
|
||||
define Build/Prepare
|
||||
@rm -f '$(DL_DIR)/$(PKG_SOURCE)'
|
||||
@rm -rf '$(PKG_BUILD_DIR)/*'
|
||||
@echo 'tar: $(DL_DIR)/$(PKG_SOURCE)'
|
||||
@echo 'pwd: $(shell pwd)'
|
||||
@echo 'PKG_DIRECTORY=$(PKG_DIRECTORY)'
|
||||
@echo 'PKG_SOURCE_SUBDIR=$(PKG_SOURCE_SUBDIR)'
|
||||
cd '$(PKG_DIRECTORY)' && git archive --prefix '$(PKG_SOURCE_SUBDIR)/' -o '$(DL_DIR)/new_$(PKG_SOURCE)' HEAD
|
||||
mv '$(DL_DIR)/new_$(PKG_SOURCE)' '$(DL_DIR)/$(PKG_SOURCE)'; \
|
||||
$(PKG_UNPACK)
|
||||
cd '$(PKG_BUILD_DIR)' && patch -p1 < $(PKG_BUILD_DIR)/packages/openwrt/net/nDPId-testing/001-enable-in-source-build.patch
|
||||
$(call Package/nDPId-testing/get-and-build-libndpi)
|
||||
endef
|
||||
|
||||
define Package/nDPId-testing/install
|
||||
$(INSTALL_DIR) $(1)/usr/sbin
|
||||
@@ -105,14 +156,13 @@ define Package/nDPId-testing/install
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/nDPIsrvd-analysed $(1)/usr/bin/nDPIsrvd-testing-analysed
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/nDPIsrvd-captured $(1)/usr/bin/nDPIsrvd-testing-captured
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/nDPIsrvd-collectd $(1)/usr/bin/nDPIsrvd-testing-collectd
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/nDPIsrvd-json-dump $(1)/usr/bin/nDPIsrvd-testing-json-dump
|
||||
|
||||
$(INSTALL_DIR) $(1)/etc/init.d/
|
||||
$(INSTALL_BIN) $(PKG_NAME).init $(1)/etc/init.d/$(PKG_NAME)
|
||||
$(INSTALL_DIR) $(1)/etc/config
|
||||
$(INSTALL_CONF) $(PKG_NAME).config $(1)/etc/config/$(PKG_NAME)
|
||||
|
||||
$(call Package/nDPId-testing/install-collectd-files)
|
||||
$(call Package/nDPId-testing/install-collectd-files,$(1))
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,nDPId-testing))
|
||||
|
||||
@@ -33,7 +33,7 @@ config nDPId
|
||||
#option udp_connect '127.0.0.1:31337'
|
||||
#option proto_file ''
|
||||
#option cat_file ''
|
||||
#option ja3_file ''
|
||||
#option ja4_file ''
|
||||
#option ssl_file ''
|
||||
#option alias ''
|
||||
#option analysis 0
|
||||
|
||||
@@ -64,12 +64,14 @@ start_ndpid_instance() {
|
||||
fi
|
||||
|
||||
args="$(print_arg_str "$cfg" 'interface' '-i')"
|
||||
args="$args$(print_arg_bool "$cfg" 'use_pfring' '-r')"
|
||||
args="$args$(print_arg_bool "$cfg" 'internal_only' '-I')"
|
||||
args="$args$(print_arg_bool "$cfg" 'external_only' '-E')"
|
||||
args="$args$(print_arg_str "$cfg" 'bpf_filter' '-B')"
|
||||
args="$args$(print_arg_bool "$cfg" 'use_poll' '-e')"
|
||||
args="$args$(print_arg_str "$cfg" 'proto_file' '-P')"
|
||||
args="$args$(print_arg_str "$cfg" 'cat_file' '-C')"
|
||||
args="$args$(print_arg_str "$cfg" 'ja3_file' '-J')"
|
||||
args="$args$(print_arg_str "$cfg" 'ja4_file' '-J')"
|
||||
args="$args$(print_arg_str "$cfg" 'ssl_file' '-S')"
|
||||
args="$args$(print_arg_str "$cfg" 'alias' '-a')"
|
||||
args="$args$(print_arg_bool "$cfg" 'analysis' '-A')"
|
||||
@@ -116,7 +118,7 @@ validate_ndpid_section() {
|
||||
'udp_connect:string' \
|
||||
'proto_file:string' \
|
||||
'cat_file:string' \
|
||||
'ja3_file:string' \
|
||||
'ja4_file:string' \
|
||||
'ssl_file:string' \
|
||||
'alias:string' \
|
||||
'analysis:bool:0' \
|
||||
|
||||
8
packages/redhat/post_uninstall
Normal file
8
packages/redhat/post_uninstall
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ $1 == 0 ]; then
|
||||
rm -rf /run/nDPId /run/nDPIsrvd
|
||||
userdel ndpid || true
|
||||
userdel ndpisrvd || true
|
||||
groupdel ndpisrvd-distributor || true
|
||||
fi
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user