diff --git a/py-scripts/throughput_qos.py b/py-scripts/throughput_qos.py index 3738f473..321fec8b 100644 --- a/py-scripts/throughput_qos.py +++ b/py-scripts/throughput_qos.py @@ -139,37 +139,41 @@ class ThroughputQOS(Realm): self.station_profile.set_command_flag("set_port", "rpt_timer", 1) self.station_profile.create(radio=self.radio, sta_names_=self.sta_list, debug=self.debug) self._pass("PASS: Station build finished") - - # self.cx_profile.create(endp_type=self.traffic_type, side_a=self.upstream, - # side_b=self.sta_list, - # sleep_time=0, tos=BE) self.create_cx() print("cx build finished") def create_cx(self): - _tos = "BE,BK,VI,VO" + _tos = "BK,BE,VI,VO" self.tos = _tos.split(",") print("tos: {}".format(self.tos)) for ip_tos in self.tos: print("## ip_tos: {}".format(ip_tos)) print("Creating connections for endpoint type: %s TOS: %s cx-count: %s" % ( - self.traffic_type, ip_tos, self.cx_profile.get_cx_count())) + self.traffic_type, ip_tos, self.cx_profile.get_cx_count())) self.cx_profile.create(endp_type=self.traffic_type, side_a=self.sta_list, side_b=self.upstream, sleep_time=0, tos=ip_tos) - print("cross connections with TOS type created.") def evaluate_throughput(self): - test_bps_rx_a, test_bps_rx_b = [], [] - for sta in self.cx_profile.created_cx.keys(): - if self.cx_profile.side_a_min_bps != '0': - test_bps_rx_a.append(float( - f"{list((self.json_get('/cx/%s?fields=bps+rx+a' % (sta))).values())[2]['bps rx a'] / 10000:.2f}")) - if self.cx_profile.side_b_min_bps != '0': - test_bps_rx_b.append(float( - f"{list((self.json_get('/cx/%s?fields=bps+rx+b' % (sta))).values())[2]['bps rx b'] / 10000:.2f}")) - return test_bps_rx_a, test_bps_rx_b + tos_upload, tos_download = {}, {} + tos_video, tos_voice, tos_bk, tos_be = [], [], [], [] + if self.cx_profile.get_cx_count() > 0: + for sta in self.cx_profile.created_cx.keys(): + temp = int(sta[12:]) + if temp % 4 == 0: + tos_bk.append(list(self.json_get('/cx/%s?fields=bps+rx+a,bps+rx+b,name' % sta).values())[2]) + elif temp % 4 == 1: + tos_be.append(list(self.json_get('cx/%s?fields=bps+rx+a,bps+rx+b,name' % sta).values())[2]) + elif temp % 4 == 2: + tos_voice.append(list(self.json_get('cx/%s?fields=bps+rx+a,bps+rx+b,name' % sta).values())[2]) + elif temp % 4 == 3: + tos_video.append(list(self.json_get('cx/%s?fields=bps+rx+a,bps+rx+b,name' % sta).values())[2]) + else: + print("no connections available to evaluate QOS") + + print(tos_bk, tos_be, tos_voice, tos_video) + return tos_upload, tos_download def main(): @@ -220,7 +224,7 @@ python3 ./throughput_QOS.py --debug python3 ./throughput_QOS.py - --upstream_port eth1 (upstream POrt) + --upstream_port eth1 (upstream Port) --traffic_type lf_udp (traffic type, lf_udp | lf_tcp) --test_duration 5m (duration to run traffic 5m --> 5 Minutes) --create_sta False (False, means it will not create stations and use the sta_names specified below) @@ -238,7 +242,7 @@ python3 ./throughput_QOS.py parser.add_argument('--create_sta', help='Used to force a connection to a particular AP', default=True) parser.add_argument('--sta_names', help='Used to force a connection to a particular AP', default="sta0000") parser.add_argument('--tos', help='used to provide different ToS settings: BK | BE | VI | VO | numeric', - default="BE") + default="Best Effort") args = parser.parse_args() create_sta = True if args.create_sta == "False": @@ -253,12 +257,12 @@ python3 ./throughput_QOS.py radio=args.radio) else: station_list = args.sta_names.split(",") - throughput_qos_test = ThroughputQOS(host=args.mgr, + throughput_qos = ThroughputQOS(host=args.mgr, port=args.mgr_port, number_template="0000", sta_list=station_list, create_sta=create_sta, - name_prefix="TP", + name_prefix="TOS-", upstream=args.upstream_port, ssid=args.ssid, password=args.passwd, @@ -274,30 +278,34 @@ python3 ./throughput_QOS.py tos=args.tos, _debug_on=args.debug) - throughput_qos_test.pre_cleanup() - throughput_qos_test.build() + throughput_qos.pre_cleanup() + + throughput_qos.build() # exit() if create_sta: - if not throughput_qos_test.passes(): - print(throughput_qos_test.get_fail_message()) - throughput_qos_test.exit_fail() + + if not throughput_qos.passes(): + print(throughput_qos.get_fail_message()) + throughput_qos.exit_fail() try: - layer3connections = ','.join([[*x.keys()][0] for x in throughput_qos_test.json_get('endp')['endpoint']]) + layer3connections = ','.join([[*x.keys()][0] for x in throughput_qos.json_get('endp')['endpoint']]) except: raise ValueError('Try setting the upstream port flag if your device does not have an eth1 port') - throughput_qos_test.start(False, False) - # throughput_qos_test.stop() + throughput_qos.start(False, False) + time.sleep(30) + throughput_qos.stop() + throughput_qos.evaluate_throughput() if create_sta: - if not throughput_qos_test.passes(): - print(throughput_qos_test.get_fail_message()) - throughput_qos_test.exit_fail() + if not throughput_qos.passes(): + print(throughput_qos.get_fail_message()) + throughput_qos.exit_fail() LFUtils.wait_until_ports_admin_up(port_list=station_list) - if throughput_qos_test.passes(): - throughput_qos_test.success() - # throughput_qos_test.cleanup() + if throughput_qos.passes(): + throughput_qos.success() + throughput_qos.cleanup() if __name__ == "__main__": diff --git a/py-scripts/throughput_qos_report.py b/py-scripts/throughput_qos_report.py new file mode 100644 index 00000000..a06868fb --- /dev/null +++ b/py-scripts/throughput_qos_report.py @@ -0,0 +1,142 @@ +''' +------------------------------------------------------------------------------------ +Throughput QOS report evaluates the throughput for a number of clients which are running +traffic with a particular type of service Video | Voice | BE | BK +------------------------------------------------------------------------------------ +''' +import matplotlib.pyplot as plt +import matplotlib as mpl +import numpy as np +import pandas as pd +import pdfkit +from lf_report import lf_report +from lf_graph import lf_bar_graph + + +def table(report, title, data): + # creating table + report.set_table_title(title) + report.build_table_title() + report.set_table_dataframe(data) + report.build_table() + + +def grph(report, data_set=None, xaxis_name="stations", yaxis_name="Throughput 2 (Mbps)", + xaxis_categories=None, label=None, graph_image_name=""): + # creating bar graph + report.set_graph_title(graph_image_name) + report.build_graph_title() + graph = lf_bar_graph(_data_set=data_set, + _xaxis_name=xaxis_name, + _yaxis_name=yaxis_name, + _xaxis_categories=xaxis_categories, + _graph_image_name=graph_image_name, + _label=label, + _color=None, + _color_edge='red') + graph_png = graph.build_bar_graph() + print("graph name {}".format(graph_png)) + report.set_graph_image(graph_png) + report.move_graph_image() + report.build_graph() + + +def tos_report(util, sta_num, bps_rx_a, bps_rx_b, tbl_title, grp_title, upload=1000000, download=1000000): + # report generation main function + rx_a = [] + rx_b = [] + pas_fail_up = [] + pas_fail_down = [] + thrp_b = upload * len(sta_num) # get overall upload values + thrp_a = download * len(sta_num) ## get overall download values + print(f"given upload--{thrp_b} and download--{thrp_a} values") + index = -1 + for a in bps_rx_a: + index += 1 + if len(a): + rx_a.append(f'min: {min(a)} | max: {max(a)} | avg: {sum(a) / len(a)}') + if thrp_a: + print( + f"getting overall download values '{index}'----- {sum(a)} \n {(thrp_a / 100) * (100 - int(util[index]))}") + if (thrp_a / 100) * (100 - int(util[index])) <= sum(a): + pas_fail_down.append("PASS") + else: + pas_fail_down.append("FAIL") + else: + pas_fail_down.append("NA") + rx_a.append(0) + + if len(bps_rx_b[index]): + rx_b.append(f'min: {min(bps_rx_b[index])} | max: {max(bps_rx_b[index])} | ' + f'avg: {(sum(bps_rx_b[index]) / len(bps_rx_b[index])):.2f}') + + if thrp_b: + print( + f"getting overall upload values '{index}'----- {sum(bps_rx_b[index])} \n {(thrp_b / 100) * (100 - int(util[index]))}") + if (thrp_b / 100) * (100 - int(util[index])) <= sum(bps_rx_b[index]): + pas_fail_up.append("PASS") + else: + pas_fail_up.append("FAIL") + else: + pas_fail_up.append("NA") + rx_b.append(0) + + util[index] = f'{util[index]}%' # append % to the util values + + overall_tab = pd.DataFrame({ + 'Channel Utilization (%)': util, "No.of.clients": [len(sta_num)] * len(util), + 'Speed (mbps)': [f'upload: {upload} | download: {download}'] * len(util), + 'Upload (mbps)': rx_b, 'Download (mbps)': rx_a + }) + print(f"overall table \n{overall_tab}") + + pasfail_tab = pd.DataFrame({ + 'Channel Utilization (%)': util, + 'Upload': pas_fail_up, + 'Download': pas_fail_down + }) + print(f"pass-fail table \n {pasfail_tab}") + + report = lf_report() + report_path = report.get_path() + report_path_date_time = report.get_path_date_time() + print("path: {}".format(report_path)) + print("path_date_time: {}".format(report_path_date_time)) + report.set_title(tbl_title) + report.build_banner() + + # objective title and description + report.set_obj_html(_obj_title="Objective", + _obj="Through this test we can evaluate the throughput given number of clients which" + "are running the traffic with a particular TOS i.e BK,BE,VI,VO") + report.build_objective() + + table(report, "Overall throughput", overall_tab) + table(report, "Throughput Pass/Fail", pasfail_tab) + + if download: + grph(report, + data_set=[[min(i) for i in bps_rx_a], [max(i) for i in bps_rx_a], [sum(i) / len(i) for i in bps_rx_a]], + xaxis_name="Utilizations", yaxis_name="Throughput (Mbps)", + xaxis_categories=util, label=["min", "max", 'avg'], graph_image_name="Throughput_download") + if upload: + grph(report, + data_set=[[min(i) for i in bps_rx_b], [max(i) for i in bps_rx_b], [sum(i) / len(i) for i in bps_rx_b]], + xaxis_name="Utilizations", yaxis_name="Throughput (Mbps)", + xaxis_categories=util, label=["min", "max", 'avg'], graph_image_name="Throughput_upload") + + for i in range(len(util)): + if download: + grph(report, data_set=[bps_rx_a[i]], xaxis_name="stations", + yaxis_name="Throughput (Mbps)", xaxis_categories=range(0, len(sta_num)), + label=[util[i]], graph_image_name=f"client-Throughput-download_{i}") + if upload: + grph(report, data_set=[bps_rx_b[i]], xaxis_name="stations", + yaxis_name="Throughput (Mbps)", xaxis_categories=range(0, len(sta_num)), + label=[util[i]], graph_image_name=f"client-Throughput-upload_{i}") + + html_file = report.write_html() + print("returned file {}".format(html_file)) + report.write_pdf() + + # report.generate_report()