Files
nDPId/examples/py-flow-muliprocess/py-flow-multiprocess.py
Toni Uhlig 9e07a57566 Major nDPId extension. Sorry for the huge commit.
- nDPId: fixed invalid IP4/IP6 tuple compare
 - nDPIsrvd: fixed caching issue (finally)
 - added tiny c example (can be used to check flow manager sanity)
 - c-captured: use flow_last_seen timestamp from `struct nDPIsrvd_flow`
 - README.md update: added example JSON sequence
 - nDPId: added new flow event `update` necessary for correct
   timeout handling (and other future use-cases)
 - nDPIsrvd.h and nDPIsrvd.py: switched to an instance
   (consists of an alias/source tuple) based flow manager
 - every flow related event **must** now serialize `alias`, `source`,
   `flow_id`, `flow_last_seen` and `flow_idle_time` to make the timeout
   handling and verification process work correctly
 - nDPIsrvd.h: ability to profile any dynamic memory (de-)allocation
 - nDPIsrvd.py: removed PcapPacket class (unused)
 - py-flow-dashboard and py-flow-multiprocess: fixed race condition
 - py-flow-info: print statusbar with probably useful information
 - nDPId/nDPIsrvd.h: switched from packet-flow only timestamps (`pkt_*sec`)
   to a generic flow event timestamp `ts_msec`
 - nDPId-test: added additional checks
 - nDPId: increased ICMP flow timeout
 - nDPId: using event based i/o if capturing packets from a device
 - nDPIsrvd: fixed memory leak on shutdown if remote descriptors
   were still connected

Signed-off-by: Toni Uhlig <matzeton@googlemail.com>
2022-01-20 00:50:38 +01:00

96 lines
2.4 KiB
Python
Executable File

#!/usr/bin/env python3
import multiprocessing
import os
import sys
sys.path.append(os.path.dirname(sys.argv[0]) + '/../share/nDPId')
sys.path.append(os.path.dirname(sys.argv[0]) + '/../usr/share/nDPId')
try:
import nDPIsrvd
from nDPIsrvd import nDPIsrvdSocket
except ImportError:
sys.path.append(os.path.dirname(sys.argv[0]) + '/../../dependencies')
import nDPIsrvd
from nDPIsrvd import nDPIsrvdSocket
def mp_worker(unused, shared_flow_dict):
import time
while True:
s = str()
n = int()
for key in shared_flow_dict.keys():
try:
flow = shared_flow_dict[key]
except KeyError:
continue
s += '{}, '.format(str(flow.flow_id))
n += 1
if len(s) == 0:
s = '-'
else:
s = s[:-2]
print('Flows({}): {}'.format(n, s))
time.sleep(1)
def nDPIsrvd_worker_onFlowCleanup(instance, current_flow, global_user_data):
shared_flow_dict = global_user_data
del shared_flow_dict[current_flow.flow_id]
return True
def nDPIsrvd_worker_onJsonLineRecvd(json_dict, instance, current_flow, global_user_data):
shared_flow_dict = global_user_data
if 'flow_id' not in json_dict:
return True
shared_flow_dict[current_flow.flow_id] = current_flow
return True
def nDPIsrvd_worker(address, shared_flow_dict):
sys.stderr.write('Recv buffer size: {}\n'.format(
nDPIsrvd.NETWORK_BUFFER_MAX_SIZE))
sys.stderr.write('Connecting to {} ..\n'.format(
address[0] + ':' +
str(address[1]) if type(address) is tuple else address))
nsock = nDPIsrvdSocket()
nsock.connect(address)
nsock.loop(nDPIsrvd_worker_onJsonLineRecvd,
nDPIsrvd_worker_onFlowCleanup,
shared_flow_dict)
if __name__ == '__main__':
argparser = nDPIsrvd.defaultArgumentParser()
args = argparser.parse_args()
address = nDPIsrvd.validateAddress(args)
mgr = multiprocessing.Manager()
shared_flow_dict = mgr.dict()
nDPIsrvd_job = multiprocessing.Process(
target=nDPIsrvd_worker,
args=(address, shared_flow_dict))
nDPIsrvd_job.start()
mp_job = multiprocessing.Process(
target=mp_worker,
args=(None, shared_flow_dict))
mp_job.start()
nDPIsrvd_job.join()
mp_job.terminate()
mp_job.join()