mirror of
https://github.com/Telecominfraproject/wlan-lanforge-scripts.git
synced 2025-11-02 19:58:03 +00:00
new report logic - uses pandas instead of xlsxwriter. deleted a few imports. moved imports to top
This commit is contained in:
104
py-json/realm.py
104
py-json/realm.py
@@ -19,6 +19,8 @@ import datetime
|
||||
import base64
|
||||
import xlsxwriter
|
||||
import pandas as pd
|
||||
import requests
|
||||
import ast
|
||||
|
||||
|
||||
def wpa_ent_list():
|
||||
@@ -1164,7 +1166,7 @@ class L3CXProfile(BaseProfile):
|
||||
else:
|
||||
output_format = report_file.split('.')[-1]
|
||||
|
||||
# Step 1, column names . what is this for?
|
||||
# Step 1, column names
|
||||
fields=None
|
||||
if col_names is not None and len(col_names) > 0:
|
||||
fields = ",".join(col_names)
|
||||
@@ -1186,11 +1188,6 @@ class L3CXProfile(BaseProfile):
|
||||
response = self.json_get("/endp/all")
|
||||
else:
|
||||
response = self.json_get("/endp/%s?fields=%s" % (created_cx, fields))
|
||||
print("created cx.........")
|
||||
print(created_cx)
|
||||
print(fields)
|
||||
print("response...........................")
|
||||
print(response)
|
||||
if "endpoint" not in response:
|
||||
print(response)
|
||||
raise ValueError("no endpoint?")
|
||||
@@ -1215,62 +1212,47 @@ class L3CXProfile(BaseProfile):
|
||||
old_cx_rx_values = new_cx_rx_values
|
||||
time.sleep(monitor_interval)
|
||||
|
||||
#step 3 organize data
|
||||
endpoints=list()
|
||||
for endpoint in value_map.values():
|
||||
endpoints.append(endpoint['endpoint'])
|
||||
endpoints2 = []
|
||||
for y in range(0, len(endpoints)):
|
||||
for x in range(0, len(endpoints[0])):
|
||||
endpoints2.append(list(list(endpoints[y][x].values())[0].values()))
|
||||
import itertools
|
||||
timestamps2 = list(
|
||||
itertools.chain.from_iterable(itertools.repeat(x, len(created_cx.split(','))) for x in timestamps))
|
||||
for point in range(0, len(endpoints2)):
|
||||
endpoints2[point].insert(0, timestamps2[point])
|
||||
# step 4 save and close
|
||||
header_row.insert(0, 'Timestamp')
|
||||
print(header_row)
|
||||
|
||||
full_test_data_list = []
|
||||
for test_timestamp, data in value_map.items():
|
||||
#reduce the endpoint data to single dictionary of dictionaries
|
||||
for datum in data["endpoint"]:
|
||||
for endpoint_data in datum.values():
|
||||
if self.debug:
|
||||
print(endpoint_data)
|
||||
endpoint_data["Timestamp"] = test_timestamp
|
||||
full_test_data_list.append(endpoint_data)
|
||||
|
||||
df = pd.DataFrame(full_test_data_list)
|
||||
|
||||
try:
|
||||
systeminfo = ast.literal_eval(requests.get('http://'+str(self.lfclient_host)+':'+str(self.lfclient_port)).text)
|
||||
except:
|
||||
systeminfo = ast.literal_eval(requests.get('http://'+str(self.lfclient_host)+':'+str(self.lfclient_port)).text)
|
||||
|
||||
df['LFGUI Release'] = systeminfo['VersionInfo']['BuildVersion']
|
||||
df['Script Name'] = script_name
|
||||
df['Arguments'] = arguments
|
||||
|
||||
for x in ['LFGUI Release', 'Script Name', 'Arguments']:
|
||||
df[x][1:] = ''
|
||||
if output_format == 'hdf':
|
||||
df.to_hdf(report_file, 'table', append=True)
|
||||
if output_format == 'parquet':
|
||||
df.to_parquet(report_file, engine='pyarrow')
|
||||
if output_format == 'png':
|
||||
fig = df.plot().get_figure()
|
||||
fig.savefig(report_file)
|
||||
if output_format.lower() in ['excel', 'xlsx'] or report_file.split('.')[-1] == 'xlsx':
|
||||
workbook = xlsxwriter.Workbook(report_file)
|
||||
worksheet = workbook.add_worksheet()
|
||||
for col_num, data in enumerate(header_row):
|
||||
worksheet.write(0, col_num, data)
|
||||
row_num = 1
|
||||
for x in endpoints2:
|
||||
for col_num, data in enumerate(x):
|
||||
worksheet.write(row_num, col_num, str(data))
|
||||
row_num += 1
|
||||
workbook.close()
|
||||
else:
|
||||
df = pd.DataFrame(endpoints2)
|
||||
df.columns = header_row
|
||||
import requests
|
||||
import ast
|
||||
try:
|
||||
systeminfo = ast.literal_eval(requests.get('http://'+str(self.lfclient_host)+':'+str(self.lfclient_port)).text)
|
||||
except:
|
||||
systeminfo = ast.literal_eval(requests.get('http://'+str(self.lfclient_host)+':'+str(self.lfclient_port)).text)
|
||||
df['LFGUI Release'] = systeminfo['VersionInfo']['BuildVersion']
|
||||
df['Script Name'] = script_name
|
||||
df['Arguments'] = arguments
|
||||
for x in ['LFGUI Release', 'Script Name', 'Arguments']:
|
||||
df[x][1:] = ''
|
||||
if output_format == 'hdf':
|
||||
df.to_hdf(report_file, 'table', append=True)
|
||||
if output_format == 'parquet':
|
||||
df.to_parquet(report_file, engine='pyarrow')
|
||||
if output_format == 'png':
|
||||
fig = df.plot().get_figure()
|
||||
fig.savefig(report_file)
|
||||
if output_format == 'df':
|
||||
return df
|
||||
supported_formats = ['csv', 'json', 'stata', 'pickle','html']
|
||||
for x in supported_formats:
|
||||
if output_format.lower() == x or report_file.split('.')[-1] == x:
|
||||
exec('df.to_' + x + '("' + report_file + '")')
|
||||
else:
|
||||
pass
|
||||
df.to_excel(report_file)
|
||||
if output_format == 'df':
|
||||
return df
|
||||
supported_formats = ['csv', 'json', 'stata', 'pickle','html']
|
||||
|
||||
for x in supported_formats:
|
||||
if output_format.lower() == x or report_file.split('.')[-1] == x:
|
||||
exec('df.to_' + x + '("' + report_file + '")')
|
||||
|
||||
|
||||
def refresh_cx(self):
|
||||
for cx_name in self.created_cx.keys():
|
||||
|
||||
@@ -193,7 +193,7 @@ python3 ./test_ipv4_variable_time.py
|
||||
optional_args.add_argument('--a_min', help='--a_min bps rate minimum for side_a', default=256000)
|
||||
optional_args.add_argument('--b_min', help='--b_min bps rate minimum for side_b', default=256000)
|
||||
optional_args.add_argument('--test_duration', help='--test_duration sets the duration of the test', default="2m")
|
||||
optional_args.add_argument('--col_names', help='Which columns you want to monitor', default=['Name','Rx Rate','Rx PDUs'])
|
||||
optional_args.add_argument('--col_names', help='Which columns you want to monitor', default=['Dropped', 'Name', 'Rx Bytes', 'Tx Bytes'])
|
||||
args = parser.parse_args()
|
||||
|
||||
num_sta = 2
|
||||
|
||||
Reference in New Issue
Block a user